]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/dir.c
ceph: whitespace cleanup
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / dir.c
CommitLineData
2817b000
SW
1#include "ceph_debug.h"
2
3#include <linux/spinlock.h>
4#include <linux/fs_struct.h>
5#include <linux/namei.h>
5a0e3ad6 6#include <linux/slab.h>
2817b000
SW
7#include <linux/sched.h>
8
9#include "super.h"
10
11/*
12 * Directory operations: readdir, lookup, create, link, unlink,
13 * rename, etc.
14 */
15
16/*
17 * Ceph MDS operations are specified in terms of a base ino and
18 * relative path. Thus, the client can specify an operation on a
19 * specific inode (e.g., a getattr due to fstat(2)), or as a path
20 * relative to, say, the root directory.
21 *
22 * Normally, we limit ourselves to strict inode ops (no path component)
23 * or dentry operations (a single path component relative to an ino). The
24 * exception to this is open_root_dentry(), which will open the mount
25 * point by name.
26 */
27
28const struct inode_operations ceph_dir_iops;
29const struct file_operations ceph_dir_fops;
30struct dentry_operations ceph_dentry_ops;
31
32/*
33 * Initialize ceph dentry state.
34 */
35int ceph_init_dentry(struct dentry *dentry)
36{
37 struct ceph_dentry_info *di;
38
39 if (dentry->d_fsdata)
40 return 0;
41
42 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
43 dentry->d_op = &ceph_dentry_ops;
44 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
45 dentry->d_op = &ceph_snapdir_dentry_ops;
46 else
47 dentry->d_op = &ceph_snap_dentry_ops;
48
49 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
50 if (!di)
51 return -ENOMEM; /* oh well */
52
53 spin_lock(&dentry->d_lock);
8c6efb58
SW
54 if (dentry->d_fsdata) {
55 /* lost a race */
56 kmem_cache_free(ceph_dentry_cachep, di);
2817b000 57 goto out_unlock;
8c6efb58 58 }
2817b000
SW
59 di->dentry = dentry;
60 di->lease_session = NULL;
61 dentry->d_fsdata = di;
62 dentry->d_time = jiffies;
63 ceph_dentry_lru_add(dentry);
64out_unlock:
65 spin_unlock(&dentry->d_lock);
66 return 0;
67}
68
69
70
71/*
72 * for readdir, we encode the directory frag and offset within that
73 * frag into f_pos.
74 */
75static unsigned fpos_frag(loff_t p)
76{
77 return p >> 32;
78}
79static unsigned fpos_off(loff_t p)
80{
81 return p & 0xffffffff;
82}
83
84/*
85 * When possible, we try to satisfy a readdir by peeking at the
86 * dcache. We make this work by carefully ordering dentries on
87 * d_u.d_child when we initially get results back from the MDS, and
88 * falling back to a "normal" sync readdir if any dentries in the dir
89 * are dropped.
90 *
91 * I_COMPLETE tells indicates we have all dentries in the dir. It is
92 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
93 * the MDS if/when the directory is modified).
94 */
95static int __dcache_readdir(struct file *filp,
96 void *dirent, filldir_t filldir)
cd84db6e
YS
97 __releases(inode->i_lock)
98 __acquires(inode->i_lock)
2817b000
SW
99{
100 struct inode *inode = filp->f_dentry->d_inode;
101 struct ceph_file_info *fi = filp->private_data;
102 struct dentry *parent = filp->f_dentry;
103 struct inode *dir = parent->d_inode;
104 struct list_head *p;
105 struct dentry *dentry, *last;
106 struct ceph_dentry_info *di;
107 int err = 0;
108
109 /* claim ref on last dentry we returned */
110 last = fi->dentry;
111 fi->dentry = NULL;
112
113 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
114 last);
115
116 spin_lock(&dcache_lock);
117
118 /* start at beginning? */
119 if (filp->f_pos == 2 || (last &&
120 filp->f_pos < ceph_dentry(last)->offset)) {
121 if (list_empty(&parent->d_subdirs))
122 goto out_unlock;
123 p = parent->d_subdirs.prev;
124 dout(" initial p %p/%p\n", p->prev, p->next);
125 } else {
126 p = last->d_u.d_child.prev;
127 }
128
129more:
130 dentry = list_entry(p, struct dentry, d_u.d_child);
131 di = ceph_dentry(dentry);
132 while (1) {
1cd3935b
SW
133 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
134 d_unhashed(dentry) ? "!hashed" : "hashed",
2817b000
SW
135 parent->d_subdirs.prev, parent->d_subdirs.next);
136 if (p == &parent->d_subdirs) {
137 fi->at_end = 1;
138 goto out_unlock;
139 }
140 if (!d_unhashed(dentry) && dentry->d_inode &&
09b8a7d2 141 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
1d1de916 142 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
2817b000
SW
143 filp->f_pos <= di->offset)
144 break;
145 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
146 dentry->d_name.len, dentry->d_name.name, di->offset,
147 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
148 !dentry->d_inode ? " null" : "");
149 p = p->prev;
150 dentry = list_entry(p, struct dentry, d_u.d_child);
151 di = ceph_dentry(dentry);
152 }
153
154 atomic_inc(&dentry->d_count);
155 spin_unlock(&dcache_lock);
156 spin_unlock(&inode->i_lock);
157
158 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
159 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
160 filp->f_pos = di->offset;
161 err = filldir(dirent, dentry->d_name.name,
162 dentry->d_name.len, di->offset,
163 dentry->d_inode->i_ino,
164 dentry->d_inode->i_mode >> 12);
165
166 if (last) {
167 if (err < 0) {
168 /* remember our position */
169 fi->dentry = last;
170 fi->next_offset = di->offset;
171 } else {
172 dput(last);
173 }
174 last = NULL;
175 }
176
177 spin_lock(&inode->i_lock);
178 spin_lock(&dcache_lock);
179
f5b06628
SW
180 last = dentry;
181
2817b000
SW
182 if (err < 0)
183 goto out_unlock;
184
2817b000
SW
185 p = p->prev;
186 filp->f_pos++;
187
188 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
189 if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
190 goto more;
191 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
192 err = -EAGAIN;
193
194out_unlock:
195 spin_unlock(&dcache_lock);
196
197 if (last) {
198 spin_unlock(&inode->i_lock);
199 dput(last);
200 spin_lock(&inode->i_lock);
201 }
202
203 return err;
204}
205
206/*
207 * make note of the last dentry we read, so we can
208 * continue at the same lexicographical point,
209 * regardless of what dir changes take place on the
210 * server.
211 */
212static int note_last_dentry(struct ceph_file_info *fi, const char *name,
213 int len)
214{
215 kfree(fi->last_name);
216 fi->last_name = kmalloc(len+1, GFP_NOFS);
217 if (!fi->last_name)
218 return -ENOMEM;
219 memcpy(fi->last_name, name, len);
220 fi->last_name[len] = 0;
221 dout("note_last_dentry '%s'\n", fi->last_name);
222 return 0;
223}
224
225static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
226{
227 struct ceph_file_info *fi = filp->private_data;
228 struct inode *inode = filp->f_dentry->d_inode;
229 struct ceph_inode_info *ci = ceph_inode(inode);
230 struct ceph_client *client = ceph_inode_to_client(inode);
231 struct ceph_mds_client *mdsc = &client->mdsc;
232 unsigned frag = fpos_frag(filp->f_pos);
233 int off = fpos_off(filp->f_pos);
234 int err;
235 u32 ftype;
236 struct ceph_mds_reply_info_parsed *rinfo;
6b805185 237 const int max_entries = client->mount_args->max_readdir;
23804d91 238 const int max_bytes = client->mount_args->max_readdir_bytes;
2817b000
SW
239
240 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
241 if (fi->at_end)
242 return 0;
243
244 /* always start with . and .. */
245 if (filp->f_pos == 0) {
246 /* note dir version at start of readdir so we can tell
247 * if any dentries get dropped */
248 fi->dir_release_count = ci->i_release_count;
249
250 dout("readdir off 0 -> '.'\n");
251 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
252 inode->i_ino, inode->i_mode >> 12) < 0)
253 return 0;
254 filp->f_pos = 1;
255 off = 1;
256 }
257 if (filp->f_pos == 1) {
258 dout("readdir off 1 -> '..'\n");
259 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
260 filp->f_dentry->d_parent->d_inode->i_ino,
261 inode->i_mode >> 12) < 0)
262 return 0;
263 filp->f_pos = 2;
264 off = 2;
265 }
266
267 /* can we use the dcache? */
268 spin_lock(&inode->i_lock);
269 if ((filp->f_pos == 2 || fi->dentry) &&
270 !ceph_test_opt(client, NOASYNCREADDIR) &&
a0dff78d 271 ceph_snap(inode) != CEPH_SNAPDIR &&
2817b000
SW
272 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
273 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
274 err = __dcache_readdir(filp, dirent, filldir);
275 if (err != -EAGAIN) {
276 spin_unlock(&inode->i_lock);
277 return err;
278 }
279 }
280 spin_unlock(&inode->i_lock);
281 if (fi->dentry) {
282 err = note_last_dentry(fi, fi->dentry->d_name.name,
283 fi->dentry->d_name.len);
284 if (err)
285 return err;
286 dput(fi->dentry);
287 fi->dentry = NULL;
288 }
289
290 /* proceed with a normal readdir */
291
292more:
293 /* do we have the correct frag content buffered? */
294 if (fi->frag != frag || fi->last_readdir == NULL) {
295 struct ceph_mds_request *req;
296 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
297 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
298
299 /* discard old result, if any */
393f6620 300 if (fi->last_readdir) {
2817b000 301 ceph_mdsc_put_request(fi->last_readdir);
393f6620
SW
302 fi->last_readdir = NULL;
303 }
2817b000
SW
304
305 /* requery frag tree, as the frag topology may have changed */
306 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
307
308 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
309 ceph_vinop(inode), frag, fi->last_name);
310 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
311 if (IS_ERR(req))
312 return PTR_ERR(req);
313 req->r_inode = igrab(inode);
314 req->r_dentry = dget(filp->f_dentry);
315 /* hints to request -> mds selection code */
316 req->r_direct_mode = USE_AUTH_MDS;
317 req->r_direct_hash = ceph_frag_value(frag);
318 req->r_direct_is_hash = true;
319 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
320 req->r_readdir_offset = fi->next_offset;
321 req->r_args.readdir.frag = cpu_to_le32(frag);
322 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
23804d91 323 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
e1e4dd0c 324 req->r_num_caps = max_entries + 1;
2817b000
SW
325 err = ceph_mdsc_do_request(mdsc, NULL, req);
326 if (err < 0) {
327 ceph_mdsc_put_request(req);
328 return err;
329 }
330 dout("readdir got and parsed readdir result=%d"
331 " on frag %x, end=%d, complete=%d\n", err, frag,
332 (int)req->r_reply_info.dir_end,
333 (int)req->r_reply_info.dir_complete);
334
335 if (!req->r_did_prepopulate) {
336 dout("readdir !did_prepopulate");
337 fi->dir_release_count--; /* preclude I_COMPLETE */
338 }
339
340 /* note next offset and last dentry name */
341 fi->offset = fi->next_offset;
342 fi->last_readdir = req;
343
344 if (req->r_reply_info.dir_end) {
345 kfree(fi->last_name);
346 fi->last_name = NULL;
f1f2765f 347 fi->next_offset = 2;
2817b000
SW
348 } else {
349 rinfo = &req->r_reply_info;
350 err = note_last_dentry(fi,
351 rinfo->dir_dname[rinfo->dir_nr-1],
352 rinfo->dir_dname_len[rinfo->dir_nr-1]);
353 if (err)
354 return err;
355 fi->next_offset += rinfo->dir_nr;
356 }
357 }
358
359 rinfo = &fi->last_readdir->r_reply_info;
360 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
361 rinfo->dir_nr, off, fi->offset);
362 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
363 u64 pos = ceph_make_fpos(frag, off);
364 struct ceph_mds_reply_inode *in =
365 rinfo->dir_in[off - fi->offset].in;
366 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
367 off, off - fi->offset, rinfo->dir_nr, pos,
368 rinfo->dir_dname_len[off - fi->offset],
369 rinfo->dir_dname[off - fi->offset], in);
370 BUG_ON(!in);
371 ftype = le32_to_cpu(in->mode) >> 12;
372 if (filldir(dirent,
373 rinfo->dir_dname[off - fi->offset],
374 rinfo->dir_dname_len[off - fi->offset],
375 pos,
376 le64_to_cpu(in->ino),
377 ftype) < 0) {
378 dout("filldir stopping us...\n");
379 return 0;
380 }
381 off++;
382 filp->f_pos = pos + 1;
383 }
384
385 if (fi->last_name) {
386 ceph_mdsc_put_request(fi->last_readdir);
387 fi->last_readdir = NULL;
388 goto more;
389 }
390
391 /* more frags? */
392 if (!ceph_frag_is_rightmost(frag)) {
393 frag = ceph_frag_next(frag);
394 off = 0;
395 filp->f_pos = ceph_make_fpos(frag, off);
396 dout("readdir next frag is %x\n", frag);
397 goto more;
398 }
399 fi->at_end = 1;
400
401 /*
402 * if dir_release_count still matches the dir, no dentries
403 * were released during the whole readdir, and we should have
404 * the complete dir contents in our cache.
405 */
406 spin_lock(&inode->i_lock);
407 if (ci->i_release_count == fi->dir_release_count) {
408 dout(" marking %p complete\n", inode);
409 ci->i_ceph_flags |= CEPH_I_COMPLETE;
410 ci->i_max_offset = filp->f_pos;
411 }
412 spin_unlock(&inode->i_lock);
413
414 dout("readdir %p filp %p done.\n", inode, filp);
415 return 0;
416}
417
418static void reset_readdir(struct ceph_file_info *fi)
419{
420 if (fi->last_readdir) {
421 ceph_mdsc_put_request(fi->last_readdir);
422 fi->last_readdir = NULL;
423 }
424 kfree(fi->last_name);
425 fi->next_offset = 2; /* compensate for . and .. */
426 if (fi->dentry) {
427 dput(fi->dentry);
428 fi->dentry = NULL;
429 }
430 fi->at_end = 0;
431}
432
433static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
434{
435 struct ceph_file_info *fi = file->private_data;
436 struct inode *inode = file->f_mapping->host;
437 loff_t old_offset = offset;
438 loff_t retval;
439
440 mutex_lock(&inode->i_mutex);
441 switch (origin) {
442 case SEEK_END:
443 offset += inode->i_size + 2; /* FIXME */
444 break;
445 case SEEK_CUR:
446 offset += file->f_pos;
447 }
448 retval = -EINVAL;
449 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
450 if (offset != file->f_pos) {
451 file->f_pos = offset;
452 file->f_version = 0;
453 fi->at_end = 0;
454 }
455 retval = offset;
456
457 /*
458 * discard buffered readdir content on seekdir(0), or
459 * seek to new frag, or seek prior to current chunk.
460 */
461 if (offset == 0 ||
462 fpos_frag(offset) != fpos_frag(old_offset) ||
463 fpos_off(offset) < fi->offset) {
464 dout("dir_llseek dropping %p content\n", file);
465 reset_readdir(fi);
466 }
467
468 /* bump dir_release_count if we did a forward seek */
469 if (offset > old_offset)
470 fi->dir_release_count--;
471 }
472 mutex_unlock(&inode->i_mutex);
473 return retval;
474}
475
476/*
477 * Process result of a lookup/open request.
478 *
479 * Mainly, make sure we return the final req->r_dentry (if it already
480 * existed) in place of the original VFS-provided dentry when they
481 * differ.
482 *
483 * Gracefully handle the case where the MDS replies with -ENOENT and
484 * no trace (which it may do, at its discretion, e.g., if it doesn't
485 * care to issue a lease on the negative dentry).
486 */
487struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
488 struct dentry *dentry, int err)
489{
640ef79d 490 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb);
2817b000
SW
491 struct inode *parent = dentry->d_parent->d_inode;
492
493 /* .snap dir? */
494 if (err == -ENOENT &&
495 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
6b805185
SW
496 strcmp(dentry->d_name.name,
497 client->mount_args->snapdir_name) == 0) {
2817b000
SW
498 struct inode *inode = ceph_get_snapdir(parent);
499 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
500 dentry, dentry->d_name.len, dentry->d_name.name, inode);
9358c6d4 501 BUG_ON(!d_unhashed(dentry));
2817b000
SW
502 d_add(dentry, inode);
503 err = 0;
504 }
505
506 if (err == -ENOENT) {
507 /* no trace? */
508 err = 0;
509 if (!req->r_reply_info.head->is_dentry) {
510 dout("ENOENT and no trace, dentry %p inode %p\n",
511 dentry, dentry->d_inode);
512 if (dentry->d_inode) {
513 d_drop(dentry);
514 err = -ENOENT;
515 } else {
516 d_add(dentry, NULL);
517 }
518 }
519 }
520 if (err)
521 dentry = ERR_PTR(err);
522 else if (dentry != req->r_dentry)
523 dentry = dget(req->r_dentry); /* we got spliced */
524 else
525 dentry = NULL;
526 return dentry;
527}
528
1d1de916
SW
529static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
530{
531 return ceph_ino(inode) == CEPH_INO_ROOT &&
532 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
533}
534
2817b000
SW
535/*
536 * Look up a single dir entry. If there is a lookup intent, inform
537 * the MDS so that it gets our 'caps wanted' value in a single op.
538 */
539static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
540 struct nameidata *nd)
541{
542 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
543 struct ceph_mds_client *mdsc = &client->mdsc;
544 struct ceph_mds_request *req;
545 int op;
546 int err;
547
548 dout("lookup %p dentry %p '%.*s'\n",
549 dir, dentry, dentry->d_name.len, dentry->d_name.name);
550
551 if (dentry->d_name.len > NAME_MAX)
552 return ERR_PTR(-ENAMETOOLONG);
553
554 err = ceph_init_dentry(dentry);
555 if (err < 0)
556 return ERR_PTR(err);
557
558 /* open (but not create!) intent? */
559 if (nd &&
560 (nd->flags & LOOKUP_OPEN) &&
561 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
562 !(nd->intent.open.flags & O_CREAT)) {
563 int mode = nd->intent.open.create_mode & ~current->fs->umask;
564 return ceph_lookup_open(dir, dentry, nd, mode, 1);
565 }
566
567 /* can we conclude ENOENT locally? */
568 if (dentry->d_inode == NULL) {
569 struct ceph_inode_info *ci = ceph_inode(dir);
570 struct ceph_dentry_info *di = ceph_dentry(dentry);
571
572 spin_lock(&dir->i_lock);
573 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
574 if (strncmp(dentry->d_name.name,
6b805185 575 client->mount_args->snapdir_name,
2817b000 576 dentry->d_name.len) &&
1d1de916 577 !is_root_ceph_dentry(dir, dentry) &&
2817b000
SW
578 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
579 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
2817b000
SW
580 spin_unlock(&dir->i_lock);
581 dout(" dir %p complete, -ENOENT\n", dir);
582 d_add(dentry, NULL);
583 di->lease_shared_gen = ci->i_shared_gen;
584 return NULL;
585 }
586 spin_unlock(&dir->i_lock);
587 }
588
589 op = ceph_snap(dir) == CEPH_SNAPDIR ?
590 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
591 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
592 if (IS_ERR(req))
7e34bc52 593 return ERR_CAST(req);
2817b000
SW
594 req->r_dentry = dget(dentry);
595 req->r_num_caps = 2;
596 /* we only need inode linkage */
597 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
598 req->r_locked_dir = dir;
599 err = ceph_mdsc_do_request(mdsc, NULL, req);
600 dentry = ceph_finish_lookup(req, dentry, err);
601 ceph_mdsc_put_request(req); /* will dput(dentry) */
602 dout("lookup result=%p\n", dentry);
603 return dentry;
604}
605
606/*
607 * If we do a create but get no trace back from the MDS, follow up with
608 * a lookup (the VFS expects us to link up the provided dentry).
609 */
610int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
611{
612 struct dentry *result = ceph_lookup(dir, dentry, NULL);
613
614 if (result && !IS_ERR(result)) {
615 /*
616 * We created the item, then did a lookup, and found
617 * it was already linked to another inode we already
618 * had in our cache (and thus got spliced). Link our
619 * dentry to that inode, but don't hash it, just in
620 * case the VFS wants to dereference it.
621 */
622 BUG_ON(!result->d_inode);
623 d_instantiate(dentry, result->d_inode);
624 return 0;
625 }
626 return PTR_ERR(result);
627}
628
629static int ceph_mknod(struct inode *dir, struct dentry *dentry,
630 int mode, dev_t rdev)
631{
632 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
633 struct ceph_mds_client *mdsc = &client->mdsc;
634 struct ceph_mds_request *req;
635 int err;
636
637 if (ceph_snap(dir) != CEPH_NOSNAP)
638 return -EROFS;
639
640 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
641 dir, dentry, mode, rdev);
642 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
643 if (IS_ERR(req)) {
644 d_drop(dentry);
645 return PTR_ERR(req);
646 }
647 req->r_dentry = dget(dentry);
648 req->r_num_caps = 2;
649 req->r_locked_dir = dir;
650 req->r_args.mknod.mode = cpu_to_le32(mode);
651 req->r_args.mknod.rdev = cpu_to_le32(rdev);
652 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
653 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
654 err = ceph_mdsc_do_request(mdsc, dir, req);
655 if (!err && !req->r_reply_info.head->is_dentry)
656 err = ceph_handle_notrace_create(dir, dentry);
657 ceph_mdsc_put_request(req);
658 if (err)
659 d_drop(dentry);
660 return err;
661}
662
663static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
664 struct nameidata *nd)
665{
666 dout("create in dir %p dentry %p name '%.*s'\n",
667 dir, dentry, dentry->d_name.len, dentry->d_name.name);
668
669 if (ceph_snap(dir) != CEPH_NOSNAP)
670 return -EROFS;
671
672 if (nd) {
673 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
674 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
675 /* hrm, what should i do here if we get aliased? */
676 if (IS_ERR(dentry))
677 return PTR_ERR(dentry);
678 return 0;
679 }
680
681 /* fall back to mknod */
682 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
683}
684
685static int ceph_symlink(struct inode *dir, struct dentry *dentry,
686 const char *dest)
687{
688 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
689 struct ceph_mds_client *mdsc = &client->mdsc;
690 struct ceph_mds_request *req;
691 int err;
692
693 if (ceph_snap(dir) != CEPH_NOSNAP)
694 return -EROFS;
695
696 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
697 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
698 if (IS_ERR(req)) {
699 d_drop(dentry);
700 return PTR_ERR(req);
701 }
702 req->r_dentry = dget(dentry);
703 req->r_num_caps = 2;
704 req->r_path2 = kstrdup(dest, GFP_NOFS);
705 req->r_locked_dir = dir;
706 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
707 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
708 err = ceph_mdsc_do_request(mdsc, dir, req);
709 if (!err && !req->r_reply_info.head->is_dentry)
710 err = ceph_handle_notrace_create(dir, dentry);
711 ceph_mdsc_put_request(req);
712 if (err)
713 d_drop(dentry);
714 return err;
715}
716
717static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
718{
719 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
720 struct ceph_mds_client *mdsc = &client->mdsc;
721 struct ceph_mds_request *req;
722 int err = -EROFS;
723 int op;
724
725 if (ceph_snap(dir) == CEPH_SNAPDIR) {
726 /* mkdir .snap/foo is a MKSNAP */
727 op = CEPH_MDS_OP_MKSNAP;
728 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
729 dentry->d_name.len, dentry->d_name.name, dentry);
730 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
731 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
732 op = CEPH_MDS_OP_MKDIR;
733 } else {
734 goto out;
735 }
736 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
737 if (IS_ERR(req)) {
738 err = PTR_ERR(req);
739 goto out;
740 }
741
742 req->r_dentry = dget(dentry);
743 req->r_num_caps = 2;
744 req->r_locked_dir = dir;
745 req->r_args.mkdir.mode = cpu_to_le32(mode);
746 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
747 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
748 err = ceph_mdsc_do_request(mdsc, dir, req);
749 if (!err && !req->r_reply_info.head->is_dentry)
750 err = ceph_handle_notrace_create(dir, dentry);
751 ceph_mdsc_put_request(req);
752out:
753 if (err < 0)
754 d_drop(dentry);
755 return err;
756}
757
758static int ceph_link(struct dentry *old_dentry, struct inode *dir,
759 struct dentry *dentry)
760{
761 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
762 struct ceph_mds_client *mdsc = &client->mdsc;
763 struct ceph_mds_request *req;
764 int err;
765
766 if (ceph_snap(dir) != CEPH_NOSNAP)
767 return -EROFS;
768
769 dout("link in dir %p old_dentry %p dentry %p\n", dir,
770 old_dentry, dentry);
771 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
772 if (IS_ERR(req)) {
773 d_drop(dentry);
774 return PTR_ERR(req);
775 }
776 req->r_dentry = dget(dentry);
777 req->r_num_caps = 2;
778 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
779 req->r_locked_dir = dir;
780 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
781 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
782 err = ceph_mdsc_do_request(mdsc, dir, req);
783 if (err)
784 d_drop(dentry);
785 else if (!req->r_reply_info.head->is_dentry)
786 d_instantiate(dentry, igrab(old_dentry->d_inode));
787 ceph_mdsc_put_request(req);
788 return err;
789}
790
791/*
792 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
793 * looks like the link count will hit 0, drop any other caps (other
794 * than PIN) we don't specifically want (due to the file still being
795 * open).
796 */
797static int drop_caps_for_unlink(struct inode *inode)
798{
799 struct ceph_inode_info *ci = ceph_inode(inode);
800 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
801
802 spin_lock(&inode->i_lock);
803 if (inode->i_nlink == 1) {
804 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
805 ci->i_ceph_flags |= CEPH_I_NODELAY;
806 }
807 spin_unlock(&inode->i_lock);
808 return drop;
809}
810
811/*
812 * rmdir and unlink are differ only by the metadata op code
813 */
814static int ceph_unlink(struct inode *dir, struct dentry *dentry)
815{
816 struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
817 struct ceph_mds_client *mdsc = &client->mdsc;
818 struct inode *inode = dentry->d_inode;
819 struct ceph_mds_request *req;
820 int err = -EROFS;
821 int op;
822
823 if (ceph_snap(dir) == CEPH_SNAPDIR) {
824 /* rmdir .snap/foo is RMSNAP */
825 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
826 dentry->d_name.name, dentry);
827 op = CEPH_MDS_OP_RMSNAP;
828 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
829 dout("unlink/rmdir dir %p dn %p inode %p\n",
830 dir, dentry, inode);
831 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
832 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
833 } else
834 goto out;
835 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
836 if (IS_ERR(req)) {
837 err = PTR_ERR(req);
838 goto out;
839 }
840 req->r_dentry = dget(dentry);
841 req->r_num_caps = 2;
842 req->r_locked_dir = dir;
843 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
844 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
845 req->r_inode_drop = drop_caps_for_unlink(inode);
846 err = ceph_mdsc_do_request(mdsc, dir, req);
847 if (!err && !req->r_reply_info.head->is_dentry)
848 d_delete(dentry);
849 ceph_mdsc_put_request(req);
850out:
851 return err;
852}
853
854static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
855 struct inode *new_dir, struct dentry *new_dentry)
856{
857 struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb);
858 struct ceph_mds_client *mdsc = &client->mdsc;
859 struct ceph_mds_request *req;
860 int err;
861
862 if (ceph_snap(old_dir) != ceph_snap(new_dir))
863 return -EXDEV;
864 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
865 ceph_snap(new_dir) != CEPH_NOSNAP)
866 return -EROFS;
867 dout("rename dir %p dentry %p to dir %p dentry %p\n",
868 old_dir, old_dentry, new_dir, new_dentry);
869 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
870 if (IS_ERR(req))
871 return PTR_ERR(req);
872 req->r_dentry = dget(new_dentry);
873 req->r_num_caps = 2;
874 req->r_old_dentry = dget(old_dentry);
875 req->r_locked_dir = new_dir;
876 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
877 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
878 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
879 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
880 /* release LINK_RDCACHE on source inode (mds will lock it) */
881 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
882 if (new_dentry->d_inode)
883 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
884 err = ceph_mdsc_do_request(mdsc, old_dir, req);
885 if (!err && !req->r_reply_info.head->is_dentry) {
886 /*
887 * Normally d_move() is done by fill_trace (called by
888 * do_request, above). If there is no trace, we need
889 * to do it here.
890 */
ea1409f9
SW
891
892 /* d_move screws up d_subdirs order */
893 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
894
2817b000 895 d_move(old_dentry, new_dentry);
ea1409f9
SW
896
897 /* ensure target dentry is invalidated, despite
898 rehashing bug in vfs_rename_dir */
81a6cf2d 899 ceph_invalidate_dentry_lease(new_dentry);
2817b000
SW
900 }
901 ceph_mdsc_put_request(req);
902 return err;
903}
904
81a6cf2d
SW
905/*
906 * Ensure a dentry lease will no longer revalidate.
907 */
908void ceph_invalidate_dentry_lease(struct dentry *dentry)
909{
910 spin_lock(&dentry->d_lock);
911 dentry->d_time = jiffies;
912 ceph_dentry(dentry)->lease_shared_gen = 0;
913 spin_unlock(&dentry->d_lock);
914}
2817b000
SW
915
916/*
917 * Check if dentry lease is valid. If not, delete the lease. Try to
918 * renew if the least is more than half up.
919 */
920static int dentry_lease_is_valid(struct dentry *dentry)
921{
922 struct ceph_dentry_info *di;
923 struct ceph_mds_session *s;
924 int valid = 0;
925 u32 gen;
926 unsigned long ttl;
927 struct ceph_mds_session *session = NULL;
928 struct inode *dir = NULL;
929 u32 seq = 0;
930
931 spin_lock(&dentry->d_lock);
932 di = ceph_dentry(dentry);
933 if (di && di->lease_session) {
934 s = di->lease_session;
935 spin_lock(&s->s_cap_lock);
936 gen = s->s_cap_gen;
937 ttl = s->s_cap_ttl;
938 spin_unlock(&s->s_cap_lock);
939
940 if (di->lease_gen == gen &&
941 time_before(jiffies, dentry->d_time) &&
942 time_before(jiffies, ttl)) {
943 valid = 1;
944 if (di->lease_renew_after &&
945 time_after(jiffies, di->lease_renew_after)) {
946 /* we should renew */
947 dir = dentry->d_parent->d_inode;
948 session = ceph_get_mds_session(s);
949 seq = di->lease_seq;
950 di->lease_renew_after = 0;
951 di->lease_renew_from = jiffies;
952 }
2817b000
SW
953 }
954 }
955 spin_unlock(&dentry->d_lock);
956
957 if (session) {
958 ceph_mdsc_lease_send_msg(session, dir, dentry,
959 CEPH_MDS_LEASE_RENEW, seq);
960 ceph_put_mds_session(session);
961 }
962 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
963 return valid;
964}
965
966/*
967 * Check if directory-wide content lease/cap is valid.
968 */
969static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
970{
971 struct ceph_inode_info *ci = ceph_inode(dir);
972 struct ceph_dentry_info *di = ceph_dentry(dentry);
973 int valid = 0;
974
975 spin_lock(&dir->i_lock);
976 if (ci->i_shared_gen == di->lease_shared_gen)
977 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
978 spin_unlock(&dir->i_lock);
979 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
980 dir, (unsigned)ci->i_shared_gen, dentry,
981 (unsigned)di->lease_shared_gen, valid);
982 return valid;
983}
984
985/*
986 * Check if cached dentry can be trusted.
987 */
988static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
989{
990 struct inode *dir = dentry->d_parent->d_inode;
991
1cd3935b
SW
992 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
993 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
994 ceph_dentry(dentry)->offset);
2817b000
SW
995
996 /* always trust cached snapped dentries, snapdir dentry */
997 if (ceph_snap(dir) != CEPH_NOSNAP) {
998 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
999 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
1000 goto out_touch;
1001 }
1002 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
1003 goto out_touch;
1004
1005 if (dentry_lease_is_valid(dentry) ||
1006 dir_lease_is_valid(dir, dentry))
1007 goto out_touch;
1008
1009 dout("d_revalidate %p invalid\n", dentry);
1010 d_drop(dentry);
1011 return 0;
1012out_touch:
1013 ceph_dentry_lru_touch(dentry);
1014 return 1;
1015}
1016
1017/*
1018 * When a dentry is released, clear the dir I_COMPLETE if it was part
252af521 1019 * of the current dir gen or if this is in the snapshot namespace.
2817b000
SW
1020 */
1021static void ceph_dentry_release(struct dentry *dentry)
1022{
1023 struct ceph_dentry_info *di = ceph_dentry(dentry);
1024 struct inode *parent_inode = dentry->d_parent->d_inode;
252af521 1025 u64 snapid = ceph_snap(parent_inode);
2817b000 1026
252af521
SW
1027 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1028
1029 if (parent_inode && snapid != CEPH_SNAPDIR) {
2817b000
SW
1030 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1031
1032 spin_lock(&parent_inode->i_lock);
252af521
SW
1033 if (ci->i_shared_gen == di->lease_shared_gen ||
1034 snapid <= CEPH_MAXSNAP) {
2817b000
SW
1035 dout(" clearing %p complete (d_release)\n",
1036 parent_inode);
1037 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1038 ci->i_release_count++;
1039 }
1040 spin_unlock(&parent_inode->i_lock);
1041 }
1042 if (di) {
1043 ceph_dentry_lru_del(dentry);
1044 if (di->lease_session)
1045 ceph_put_mds_session(di->lease_session);
1046 kmem_cache_free(ceph_dentry_cachep, di);
1047 dentry->d_fsdata = NULL;
1048 }
1049}
1050
1051static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1052 struct nameidata *nd)
1053{
1054 /*
1055 * Eventually, we'll want to revalidate snapped metadata
1056 * too... probably...
1057 */
1058 return 1;
1059}
1060
1061
1062
1063/*
1064 * read() on a dir. This weird interface hack only works if mounted
1065 * with '-o dirstat'.
1066 */
1067static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1068 loff_t *ppos)
1069{
1070 struct ceph_file_info *cf = file->private_data;
1071 struct inode *inode = file->f_dentry->d_inode;
1072 struct ceph_inode_info *ci = ceph_inode(inode);
1073 int left;
1074
640ef79d 1075 if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
2817b000
SW
1076 return -EISDIR;
1077
1078 if (!cf->dir_info) {
1079 cf->dir_info = kmalloc(1024, GFP_NOFS);
1080 if (!cf->dir_info)
1081 return -ENOMEM;
1082 cf->dir_info_len =
1083 sprintf(cf->dir_info,
1084 "entries: %20lld\n"
1085 " files: %20lld\n"
1086 " subdirs: %20lld\n"
1087 "rentries: %20lld\n"
1088 " rfiles: %20lld\n"
1089 " rsubdirs: %20lld\n"
1090 "rbytes: %20lld\n"
1091 "rctime: %10ld.%09ld\n",
1092 ci->i_files + ci->i_subdirs,
1093 ci->i_files,
1094 ci->i_subdirs,
1095 ci->i_rfiles + ci->i_rsubdirs,
1096 ci->i_rfiles,
1097 ci->i_rsubdirs,
1098 ci->i_rbytes,
1099 (long)ci->i_rctime.tv_sec,
1100 (long)ci->i_rctime.tv_nsec);
1101 }
1102
1103 if (*ppos >= cf->dir_info_len)
1104 return 0;
1105 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1106 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1107 if (left == size)
1108 return -EFAULT;
1109 *ppos += (size - left);
1110 return size - left;
1111}
1112
1113/*
1114 * an fsync() on a dir will wait for any uncommitted directory
1115 * operations to commit.
1116 */
7ea80859 1117static int ceph_dir_fsync(struct file *file, int datasync)
2817b000 1118{
7ea80859 1119 struct inode *inode = file->f_path.dentry->d_inode;
2817b000
SW
1120 struct ceph_inode_info *ci = ceph_inode(inode);
1121 struct list_head *head = &ci->i_unsafe_dirops;
1122 struct ceph_mds_request *req;
1123 u64 last_tid;
1124 int ret = 0;
1125
1126 dout("dir_fsync %p\n", inode);
1127 spin_lock(&ci->i_unsafe_lock);
1128 if (list_empty(head))
1129 goto out;
1130
1131 req = list_entry(head->prev,
1132 struct ceph_mds_request, r_unsafe_dir_item);
1133 last_tid = req->r_tid;
1134
1135 do {
1136 ceph_mdsc_get_request(req);
1137 spin_unlock(&ci->i_unsafe_lock);
1138 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1139 inode, req->r_tid, last_tid);
1140 if (req->r_timeout) {
1141 ret = wait_for_completion_timeout(
1142 &req->r_safe_completion, req->r_timeout);
1143 if (ret > 0)
1144 ret = 0;
1145 else if (ret == 0)
1146 ret = -EIO; /* timed out */
1147 } else {
1148 wait_for_completion(&req->r_safe_completion);
1149 }
1150 spin_lock(&ci->i_unsafe_lock);
1151 ceph_mdsc_put_request(req);
1152
1153 if (ret || list_empty(head))
1154 break;
1155 req = list_entry(head->next,
1156 struct ceph_mds_request, r_unsafe_dir_item);
1157 } while (req->r_tid < last_tid);
1158out:
1159 spin_unlock(&ci->i_unsafe_lock);
1160 return ret;
1161}
1162
1163/*
1164 * We maintain a private dentry LRU.
1165 *
1166 * FIXME: this needs to be changed to a per-mds lru to be useful.
1167 */
1168void ceph_dentry_lru_add(struct dentry *dn)
1169{
1170 struct ceph_dentry_info *di = ceph_dentry(dn);
1171 struct ceph_mds_client *mdsc;
2817b000 1172
04a419f9
SW
1173 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1174 dn->d_name.len, dn->d_name.name);
2817b000 1175 if (di) {
640ef79d 1176 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1177 spin_lock(&mdsc->dentry_lru_lock);
1178 list_add_tail(&di->lru, &mdsc->dentry_lru);
1179 mdsc->num_dentry++;
1180 spin_unlock(&mdsc->dentry_lru_lock);
1181 }
1182}
1183
1184void ceph_dentry_lru_touch(struct dentry *dn)
1185{
1186 struct ceph_dentry_info *di = ceph_dentry(dn);
1187 struct ceph_mds_client *mdsc;
2817b000 1188
1cd3935b
SW
1189 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1190 dn->d_name.len, dn->d_name.name, di->offset);
2817b000 1191 if (di) {
640ef79d 1192 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1193 spin_lock(&mdsc->dentry_lru_lock);
1194 list_move_tail(&di->lru, &mdsc->dentry_lru);
1195 spin_unlock(&mdsc->dentry_lru_lock);
1196 }
1197}
1198
1199void ceph_dentry_lru_del(struct dentry *dn)
1200{
1201 struct ceph_dentry_info *di = ceph_dentry(dn);
1202 struct ceph_mds_client *mdsc;
1203
04a419f9
SW
1204 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1205 dn->d_name.len, dn->d_name.name);
2817b000 1206 if (di) {
640ef79d 1207 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc;
2817b000
SW
1208 spin_lock(&mdsc->dentry_lru_lock);
1209 list_del_init(&di->lru);
1210 mdsc->num_dentry--;
1211 spin_unlock(&mdsc->dentry_lru_lock);
1212 }
1213}
1214
1215const struct file_operations ceph_dir_fops = {
1216 .read = ceph_read_dir,
1217 .readdir = ceph_readdir,
1218 .llseek = ceph_dir_llseek,
1219 .open = ceph_open,
1220 .release = ceph_release,
1221 .unlocked_ioctl = ceph_ioctl,
1222 .fsync = ceph_dir_fsync,
1223};
1224
1225const struct inode_operations ceph_dir_iops = {
1226 .lookup = ceph_lookup,
1227 .permission = ceph_permission,
1228 .getattr = ceph_getattr,
1229 .setattr = ceph_setattr,
1230 .setxattr = ceph_setxattr,
1231 .getxattr = ceph_getxattr,
1232 .listxattr = ceph_listxattr,
1233 .removexattr = ceph_removexattr,
1234 .mknod = ceph_mknod,
1235 .symlink = ceph_symlink,
1236 .mkdir = ceph_mkdir,
1237 .link = ceph_link,
1238 .unlink = ceph_unlink,
1239 .rmdir = ceph_unlink,
1240 .rename = ceph_rename,
1241 .create = ceph_create,
1242};
1243
1244struct dentry_operations ceph_dentry_ops = {
1245 .d_revalidate = ceph_d_revalidate,
1246 .d_release = ceph_dentry_release,
1247};
1248
1249struct dentry_operations ceph_snapdir_dentry_ops = {
1250 .d_revalidate = ceph_snapdir_d_revalidate,
252af521 1251 .d_release = ceph_dentry_release,
2817b000
SW
1252};
1253
1254struct dentry_operations ceph_snap_dentry_ops = {
252af521 1255 .d_release = ceph_dentry_release,
2817b000 1256};