]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
2817b000 SW |
2 | |
3 | #include <linux/spinlock.h> | |
4 | #include <linux/fs_struct.h> | |
5 | #include <linux/namei.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
2817b000 SW |
7 | #include <linux/sched.h> |
8 | ||
9 | #include "super.h" | |
3d14c5d2 | 10 | #include "mds_client.h" |
2817b000 SW |
11 | |
12 | /* | |
13 | * Directory operations: readdir, lookup, create, link, unlink, | |
14 | * rename, etc. | |
15 | */ | |
16 | ||
17 | /* | |
18 | * Ceph MDS operations are specified in terms of a base ino and | |
19 | * relative path. Thus, the client can specify an operation on a | |
20 | * specific inode (e.g., a getattr due to fstat(2)), or as a path | |
21 | * relative to, say, the root directory. | |
22 | * | |
23 | * Normally, we limit ourselves to strict inode ops (no path component) | |
24 | * or dentry operations (a single path component relative to an ino). The | |
25 | * exception to this is open_root_dentry(), which will open the mount | |
26 | * point by name. | |
27 | */ | |
28 | ||
29 | const struct inode_operations ceph_dir_iops; | |
30 | const struct file_operations ceph_dir_fops; | |
52dfb8ac | 31 | const struct dentry_operations ceph_dentry_ops; |
2817b000 SW |
32 | |
33 | /* | |
34 | * Initialize ceph dentry state. | |
35 | */ | |
36 | int ceph_init_dentry(struct dentry *dentry) | |
37 | { | |
38 | struct ceph_dentry_info *di; | |
39 | ||
40 | if (dentry->d_fsdata) | |
41 | return 0; | |
42 | ||
92cf7652 SW |
43 | if (dentry->d_parent == NULL || /* nfs fh_to_dentry */ |
44 | ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) | |
fb045adb | 45 | d_set_d_op(dentry, &ceph_dentry_ops); |
2817b000 | 46 | else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) |
fb045adb | 47 | d_set_d_op(dentry, &ceph_snapdir_dentry_ops); |
2817b000 | 48 | else |
fb045adb | 49 | d_set_d_op(dentry, &ceph_snap_dentry_ops); |
2817b000 | 50 | |
36e21687 | 51 | di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); |
2817b000 SW |
52 | if (!di) |
53 | return -ENOMEM; /* oh well */ | |
54 | ||
55 | spin_lock(&dentry->d_lock); | |
8c6efb58 SW |
56 | if (dentry->d_fsdata) { |
57 | /* lost a race */ | |
58 | kmem_cache_free(ceph_dentry_cachep, di); | |
2817b000 | 59 | goto out_unlock; |
8c6efb58 | 60 | } |
2817b000 SW |
61 | di->dentry = dentry; |
62 | di->lease_session = NULL; | |
63 | dentry->d_fsdata = di; | |
64 | dentry->d_time = jiffies; | |
65 | ceph_dentry_lru_add(dentry); | |
66 | out_unlock: | |
67 | spin_unlock(&dentry->d_lock); | |
68 | return 0; | |
69 | } | |
70 | ||
71 | ||
72 | ||
73 | /* | |
74 | * for readdir, we encode the directory frag and offset within that | |
75 | * frag into f_pos. | |
76 | */ | |
77 | static unsigned fpos_frag(loff_t p) | |
78 | { | |
79 | return p >> 32; | |
80 | } | |
81 | static unsigned fpos_off(loff_t p) | |
82 | { | |
83 | return p & 0xffffffff; | |
84 | } | |
85 | ||
86 | /* | |
87 | * When possible, we try to satisfy a readdir by peeking at the | |
88 | * dcache. We make this work by carefully ordering dentries on | |
89 | * d_u.d_child when we initially get results back from the MDS, and | |
90 | * falling back to a "normal" sync readdir if any dentries in the dir | |
91 | * are dropped. | |
92 | * | |
93 | * I_COMPLETE tells indicates we have all dentries in the dir. It is | |
94 | * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by | |
95 | * the MDS if/when the directory is modified). | |
96 | */ | |
97 | static int __dcache_readdir(struct file *filp, | |
98 | void *dirent, filldir_t filldir) | |
99 | { | |
2817b000 SW |
100 | struct ceph_file_info *fi = filp->private_data; |
101 | struct dentry *parent = filp->f_dentry; | |
102 | struct inode *dir = parent->d_inode; | |
103 | struct list_head *p; | |
104 | struct dentry *dentry, *last; | |
105 | struct ceph_dentry_info *di; | |
106 | int err = 0; | |
107 | ||
108 | /* claim ref on last dentry we returned */ | |
109 | last = fi->dentry; | |
110 | fi->dentry = NULL; | |
111 | ||
112 | dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, | |
113 | last); | |
114 | ||
2fd6b7f5 | 115 | spin_lock(&parent->d_lock); |
2817b000 SW |
116 | |
117 | /* start at beginning? */ | |
884ea892 SW |
118 | if (filp->f_pos == 2 || last == NULL || |
119 | filp->f_pos < ceph_dentry(last)->offset) { | |
2817b000 SW |
120 | if (list_empty(&parent->d_subdirs)) |
121 | goto out_unlock; | |
122 | p = parent->d_subdirs.prev; | |
123 | dout(" initial p %p/%p\n", p->prev, p->next); | |
124 | } else { | |
125 | p = last->d_u.d_child.prev; | |
126 | } | |
127 | ||
128 | more: | |
129 | dentry = list_entry(p, struct dentry, d_u.d_child); | |
130 | di = ceph_dentry(dentry); | |
131 | while (1) { | |
1cd3935b SW |
132 | dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, |
133 | d_unhashed(dentry) ? "!hashed" : "hashed", | |
2817b000 SW |
134 | parent->d_subdirs.prev, parent->d_subdirs.next); |
135 | if (p == &parent->d_subdirs) { | |
136 | fi->at_end = 1; | |
137 | goto out_unlock; | |
138 | } | |
2fd6b7f5 | 139 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
2817b000 | 140 | if (!d_unhashed(dentry) && dentry->d_inode && |
09b8a7d2 | 141 | ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && |
1d1de916 | 142 | ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && |
2817b000 SW |
143 | filp->f_pos <= di->offset) |
144 | break; | |
145 | dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, | |
146 | dentry->d_name.len, dentry->d_name.name, di->offset, | |
147 | filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", | |
148 | !dentry->d_inode ? " null" : ""); | |
da502956 | 149 | spin_unlock(&dentry->d_lock); |
2817b000 SW |
150 | p = p->prev; |
151 | dentry = list_entry(p, struct dentry, d_u.d_child); | |
152 | di = ceph_dentry(dentry); | |
153 | } | |
154 | ||
da502956 | 155 | dget_dlock(dentry); |
b7ab39f6 | 156 | spin_unlock(&dentry->d_lock); |
2fd6b7f5 | 157 | spin_unlock(&parent->d_lock); |
2817b000 SW |
158 | |
159 | dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, | |
160 | dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); | |
161 | filp->f_pos = di->offset; | |
162 | err = filldir(dirent, dentry->d_name.name, | |
163 | dentry->d_name.len, di->offset, | |
164 | dentry->d_inode->i_ino, | |
165 | dentry->d_inode->i_mode >> 12); | |
166 | ||
167 | if (last) { | |
168 | if (err < 0) { | |
169 | /* remember our position */ | |
170 | fi->dentry = last; | |
171 | fi->next_offset = di->offset; | |
172 | } else { | |
173 | dput(last); | |
174 | } | |
2817b000 | 175 | } |
f5b06628 SW |
176 | last = dentry; |
177 | ||
2817b000 | 178 | if (err < 0) |
efa4c120 | 179 | goto out; |
2817b000 | 180 | |
2817b000 SW |
181 | filp->f_pos++; |
182 | ||
b5c84bf6 | 183 | /* make sure a dentry wasn't dropped while we didn't have parent lock */ |
efa4c120 SW |
184 | if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { |
185 | dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); | |
186 | err = -EAGAIN; | |
187 | goto out; | |
188 | } | |
189 | ||
2fd6b7f5 | 190 | spin_lock(&parent->d_lock); |
efa4c120 SW |
191 | p = p->prev; /* advance to next dentry */ |
192 | goto more; | |
2817b000 SW |
193 | |
194 | out_unlock: | |
2fd6b7f5 | 195 | spin_unlock(&parent->d_lock); |
efa4c120 SW |
196 | out: |
197 | if (last) | |
2817b000 | 198 | dput(last); |
2817b000 SW |
199 | return err; |
200 | } | |
201 | ||
202 | /* | |
203 | * make note of the last dentry we read, so we can | |
204 | * continue at the same lexicographical point, | |
205 | * regardless of what dir changes take place on the | |
206 | * server. | |
207 | */ | |
208 | static int note_last_dentry(struct ceph_file_info *fi, const char *name, | |
209 | int len) | |
210 | { | |
211 | kfree(fi->last_name); | |
212 | fi->last_name = kmalloc(len+1, GFP_NOFS); | |
213 | if (!fi->last_name) | |
214 | return -ENOMEM; | |
215 | memcpy(fi->last_name, name, len); | |
216 | fi->last_name[len] = 0; | |
217 | dout("note_last_dentry '%s'\n", fi->last_name); | |
218 | return 0; | |
219 | } | |
220 | ||
221 | static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) | |
222 | { | |
223 | struct ceph_file_info *fi = filp->private_data; | |
224 | struct inode *inode = filp->f_dentry->d_inode; | |
225 | struct ceph_inode_info *ci = ceph_inode(inode); | |
3d14c5d2 YS |
226 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); |
227 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
228 | unsigned frag = fpos_frag(filp->f_pos); |
229 | int off = fpos_off(filp->f_pos); | |
230 | int err; | |
231 | u32 ftype; | |
232 | struct ceph_mds_reply_info_parsed *rinfo; | |
3d14c5d2 YS |
233 | const int max_entries = fsc->mount_options->max_readdir; |
234 | const int max_bytes = fsc->mount_options->max_readdir_bytes; | |
2817b000 SW |
235 | |
236 | dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); | |
237 | if (fi->at_end) | |
238 | return 0; | |
239 | ||
240 | /* always start with . and .. */ | |
241 | if (filp->f_pos == 0) { | |
242 | /* note dir version at start of readdir so we can tell | |
243 | * if any dentries get dropped */ | |
244 | fi->dir_release_count = ci->i_release_count; | |
245 | ||
246 | dout("readdir off 0 -> '.'\n"); | |
247 | if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), | |
248 | inode->i_ino, inode->i_mode >> 12) < 0) | |
249 | return 0; | |
250 | filp->f_pos = 1; | |
251 | off = 1; | |
252 | } | |
253 | if (filp->f_pos == 1) { | |
254 | dout("readdir off 1 -> '..'\n"); | |
255 | if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), | |
256 | filp->f_dentry->d_parent->d_inode->i_ino, | |
257 | inode->i_mode >> 12) < 0) | |
258 | return 0; | |
259 | filp->f_pos = 2; | |
260 | off = 2; | |
261 | } | |
262 | ||
263 | /* can we use the dcache? */ | |
264 | spin_lock(&inode->i_lock); | |
265 | if ((filp->f_pos == 2 || fi->dentry) && | |
3d14c5d2 | 266 | !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && |
a0dff78d | 267 | ceph_snap(inode) != CEPH_SNAPDIR && |
2817b000 SW |
268 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && |
269 | __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { | |
efa4c120 | 270 | spin_unlock(&inode->i_lock); |
2817b000 | 271 | err = __dcache_readdir(filp, dirent, filldir); |
efa4c120 | 272 | if (err != -EAGAIN) |
2817b000 | 273 | return err; |
efa4c120 SW |
274 | } else { |
275 | spin_unlock(&inode->i_lock); | |
2817b000 | 276 | } |
2817b000 SW |
277 | if (fi->dentry) { |
278 | err = note_last_dentry(fi, fi->dentry->d_name.name, | |
279 | fi->dentry->d_name.len); | |
280 | if (err) | |
281 | return err; | |
282 | dput(fi->dentry); | |
283 | fi->dentry = NULL; | |
284 | } | |
285 | ||
286 | /* proceed with a normal readdir */ | |
287 | ||
288 | more: | |
289 | /* do we have the correct frag content buffered? */ | |
290 | if (fi->frag != frag || fi->last_readdir == NULL) { | |
291 | struct ceph_mds_request *req; | |
292 | int op = ceph_snap(inode) == CEPH_SNAPDIR ? | |
293 | CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; | |
294 | ||
295 | /* discard old result, if any */ | |
393f6620 | 296 | if (fi->last_readdir) { |
2817b000 | 297 | ceph_mdsc_put_request(fi->last_readdir); |
393f6620 SW |
298 | fi->last_readdir = NULL; |
299 | } | |
2817b000 SW |
300 | |
301 | /* requery frag tree, as the frag topology may have changed */ | |
302 | frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); | |
303 | ||
304 | dout("readdir fetching %llx.%llx frag %x offset '%s'\n", | |
305 | ceph_vinop(inode), frag, fi->last_name); | |
306 | req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); | |
307 | if (IS_ERR(req)) | |
308 | return PTR_ERR(req); | |
309 | req->r_inode = igrab(inode); | |
310 | req->r_dentry = dget(filp->f_dentry); | |
311 | /* hints to request -> mds selection code */ | |
312 | req->r_direct_mode = USE_AUTH_MDS; | |
313 | req->r_direct_hash = ceph_frag_value(frag); | |
314 | req->r_direct_is_hash = true; | |
315 | req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); | |
316 | req->r_readdir_offset = fi->next_offset; | |
317 | req->r_args.readdir.frag = cpu_to_le32(frag); | |
318 | req->r_args.readdir.max_entries = cpu_to_le32(max_entries); | |
23804d91 | 319 | req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes); |
e1e4dd0c | 320 | req->r_num_caps = max_entries + 1; |
2817b000 SW |
321 | err = ceph_mdsc_do_request(mdsc, NULL, req); |
322 | if (err < 0) { | |
323 | ceph_mdsc_put_request(req); | |
324 | return err; | |
325 | } | |
326 | dout("readdir got and parsed readdir result=%d" | |
327 | " on frag %x, end=%d, complete=%d\n", err, frag, | |
328 | (int)req->r_reply_info.dir_end, | |
329 | (int)req->r_reply_info.dir_complete); | |
330 | ||
331 | if (!req->r_did_prepopulate) { | |
332 | dout("readdir !did_prepopulate"); | |
333 | fi->dir_release_count--; /* preclude I_COMPLETE */ | |
334 | } | |
335 | ||
336 | /* note next offset and last dentry name */ | |
337 | fi->offset = fi->next_offset; | |
338 | fi->last_readdir = req; | |
339 | ||
340 | if (req->r_reply_info.dir_end) { | |
341 | kfree(fi->last_name); | |
342 | fi->last_name = NULL; | |
7b88dadc SW |
343 | if (ceph_frag_is_rightmost(frag)) |
344 | fi->next_offset = 2; | |
345 | else | |
346 | fi->next_offset = 0; | |
2817b000 SW |
347 | } else { |
348 | rinfo = &req->r_reply_info; | |
349 | err = note_last_dentry(fi, | |
350 | rinfo->dir_dname[rinfo->dir_nr-1], | |
351 | rinfo->dir_dname_len[rinfo->dir_nr-1]); | |
352 | if (err) | |
353 | return err; | |
354 | fi->next_offset += rinfo->dir_nr; | |
355 | } | |
356 | } | |
357 | ||
358 | rinfo = &fi->last_readdir->r_reply_info; | |
359 | dout("readdir frag %x num %d off %d chunkoff %d\n", frag, | |
360 | rinfo->dir_nr, off, fi->offset); | |
361 | while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) { | |
362 | u64 pos = ceph_make_fpos(frag, off); | |
363 | struct ceph_mds_reply_inode *in = | |
364 | rinfo->dir_in[off - fi->offset].in; | |
3105c19c SW |
365 | struct ceph_vino vino; |
366 | ino_t ino; | |
367 | ||
2817b000 SW |
368 | dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", |
369 | off, off - fi->offset, rinfo->dir_nr, pos, | |
370 | rinfo->dir_dname_len[off - fi->offset], | |
371 | rinfo->dir_dname[off - fi->offset], in); | |
372 | BUG_ON(!in); | |
373 | ftype = le32_to_cpu(in->mode) >> 12; | |
3105c19c SW |
374 | vino.ino = le64_to_cpu(in->ino); |
375 | vino.snap = le64_to_cpu(in->snapid); | |
376 | ino = ceph_vino_to_ino(vino); | |
2817b000 SW |
377 | if (filldir(dirent, |
378 | rinfo->dir_dname[off - fi->offset], | |
379 | rinfo->dir_dname_len[off - fi->offset], | |
3105c19c | 380 | pos, ino, ftype) < 0) { |
2817b000 SW |
381 | dout("filldir stopping us...\n"); |
382 | return 0; | |
383 | } | |
384 | off++; | |
385 | filp->f_pos = pos + 1; | |
386 | } | |
387 | ||
388 | if (fi->last_name) { | |
389 | ceph_mdsc_put_request(fi->last_readdir); | |
390 | fi->last_readdir = NULL; | |
391 | goto more; | |
392 | } | |
393 | ||
394 | /* more frags? */ | |
395 | if (!ceph_frag_is_rightmost(frag)) { | |
396 | frag = ceph_frag_next(frag); | |
397 | off = 0; | |
398 | filp->f_pos = ceph_make_fpos(frag, off); | |
399 | dout("readdir next frag is %x\n", frag); | |
400 | goto more; | |
401 | } | |
402 | fi->at_end = 1; | |
403 | ||
404 | /* | |
405 | * if dir_release_count still matches the dir, no dentries | |
406 | * were released during the whole readdir, and we should have | |
407 | * the complete dir contents in our cache. | |
408 | */ | |
409 | spin_lock(&inode->i_lock); | |
410 | if (ci->i_release_count == fi->dir_release_count) { | |
411 | dout(" marking %p complete\n", inode); | |
412 | ci->i_ceph_flags |= CEPH_I_COMPLETE; | |
413 | ci->i_max_offset = filp->f_pos; | |
414 | } | |
415 | spin_unlock(&inode->i_lock); | |
416 | ||
417 | dout("readdir %p filp %p done.\n", inode, filp); | |
418 | return 0; | |
419 | } | |
420 | ||
421 | static void reset_readdir(struct ceph_file_info *fi) | |
422 | { | |
423 | if (fi->last_readdir) { | |
424 | ceph_mdsc_put_request(fi->last_readdir); | |
425 | fi->last_readdir = NULL; | |
426 | } | |
427 | kfree(fi->last_name); | |
a1629c3b | 428 | fi->last_name = NULL; |
2817b000 SW |
429 | fi->next_offset = 2; /* compensate for . and .. */ |
430 | if (fi->dentry) { | |
431 | dput(fi->dentry); | |
432 | fi->dentry = NULL; | |
433 | } | |
434 | fi->at_end = 0; | |
435 | } | |
436 | ||
437 | static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) | |
438 | { | |
439 | struct ceph_file_info *fi = file->private_data; | |
440 | struct inode *inode = file->f_mapping->host; | |
441 | loff_t old_offset = offset; | |
442 | loff_t retval; | |
443 | ||
444 | mutex_lock(&inode->i_mutex); | |
445 | switch (origin) { | |
446 | case SEEK_END: | |
447 | offset += inode->i_size + 2; /* FIXME */ | |
448 | break; | |
449 | case SEEK_CUR: | |
450 | offset += file->f_pos; | |
451 | } | |
452 | retval = -EINVAL; | |
453 | if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { | |
454 | if (offset != file->f_pos) { | |
455 | file->f_pos = offset; | |
456 | file->f_version = 0; | |
457 | fi->at_end = 0; | |
458 | } | |
459 | retval = offset; | |
460 | ||
461 | /* | |
462 | * discard buffered readdir content on seekdir(0), or | |
463 | * seek to new frag, or seek prior to current chunk. | |
464 | */ | |
465 | if (offset == 0 || | |
466 | fpos_frag(offset) != fpos_frag(old_offset) || | |
467 | fpos_off(offset) < fi->offset) { | |
468 | dout("dir_llseek dropping %p content\n", file); | |
469 | reset_readdir(fi); | |
470 | } | |
471 | ||
472 | /* bump dir_release_count if we did a forward seek */ | |
473 | if (offset > old_offset) | |
474 | fi->dir_release_count--; | |
475 | } | |
476 | mutex_unlock(&inode->i_mutex); | |
477 | return retval; | |
478 | } | |
479 | ||
480 | /* | |
481 | * Process result of a lookup/open request. | |
482 | * | |
483 | * Mainly, make sure we return the final req->r_dentry (if it already | |
484 | * existed) in place of the original VFS-provided dentry when they | |
485 | * differ. | |
486 | * | |
487 | * Gracefully handle the case where the MDS replies with -ENOENT and | |
488 | * no trace (which it may do, at its discretion, e.g., if it doesn't | |
489 | * care to issue a lease on the negative dentry). | |
490 | */ | |
491 | struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, | |
492 | struct dentry *dentry, int err) | |
493 | { | |
3d14c5d2 | 494 | struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); |
2817b000 SW |
495 | struct inode *parent = dentry->d_parent->d_inode; |
496 | ||
497 | /* .snap dir? */ | |
498 | if (err == -ENOENT && | |
6b805185 | 499 | strcmp(dentry->d_name.name, |
3d14c5d2 | 500 | fsc->mount_options->snapdir_name) == 0) { |
2817b000 SW |
501 | struct inode *inode = ceph_get_snapdir(parent); |
502 | dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", | |
503 | dentry, dentry->d_name.len, dentry->d_name.name, inode); | |
9358c6d4 | 504 | BUG_ON(!d_unhashed(dentry)); |
2817b000 SW |
505 | d_add(dentry, inode); |
506 | err = 0; | |
507 | } | |
508 | ||
509 | if (err == -ENOENT) { | |
510 | /* no trace? */ | |
511 | err = 0; | |
512 | if (!req->r_reply_info.head->is_dentry) { | |
513 | dout("ENOENT and no trace, dentry %p inode %p\n", | |
514 | dentry, dentry->d_inode); | |
515 | if (dentry->d_inode) { | |
516 | d_drop(dentry); | |
517 | err = -ENOENT; | |
518 | } else { | |
519 | d_add(dentry, NULL); | |
520 | } | |
521 | } | |
522 | } | |
523 | if (err) | |
524 | dentry = ERR_PTR(err); | |
525 | else if (dentry != req->r_dentry) | |
526 | dentry = dget(req->r_dentry); /* we got spliced */ | |
527 | else | |
528 | dentry = NULL; | |
529 | return dentry; | |
530 | } | |
531 | ||
1d1de916 SW |
532 | static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) |
533 | { | |
534 | return ceph_ino(inode) == CEPH_INO_ROOT && | |
535 | strncmp(dentry->d_name.name, ".ceph", 5) == 0; | |
536 | } | |
537 | ||
2817b000 SW |
538 | /* |
539 | * Look up a single dir entry. If there is a lookup intent, inform | |
540 | * the MDS so that it gets our 'caps wanted' value in a single op. | |
541 | */ | |
542 | static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, | |
543 | struct nameidata *nd) | |
544 | { | |
3d14c5d2 YS |
545 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
546 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
547 | struct ceph_mds_request *req; |
548 | int op; | |
549 | int err; | |
550 | ||
551 | dout("lookup %p dentry %p '%.*s'\n", | |
552 | dir, dentry, dentry->d_name.len, dentry->d_name.name); | |
553 | ||
554 | if (dentry->d_name.len > NAME_MAX) | |
555 | return ERR_PTR(-ENAMETOOLONG); | |
556 | ||
557 | err = ceph_init_dentry(dentry); | |
558 | if (err < 0) | |
559 | return ERR_PTR(err); | |
560 | ||
561 | /* open (but not create!) intent? */ | |
562 | if (nd && | |
563 | (nd->flags & LOOKUP_OPEN) && | |
564 | (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */ | |
565 | !(nd->intent.open.flags & O_CREAT)) { | |
566 | int mode = nd->intent.open.create_mode & ~current->fs->umask; | |
567 | return ceph_lookup_open(dir, dentry, nd, mode, 1); | |
568 | } | |
569 | ||
570 | /* can we conclude ENOENT locally? */ | |
571 | if (dentry->d_inode == NULL) { | |
572 | struct ceph_inode_info *ci = ceph_inode(dir); | |
573 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
574 | ||
575 | spin_lock(&dir->i_lock); | |
576 | dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); | |
577 | if (strncmp(dentry->d_name.name, | |
3d14c5d2 | 578 | fsc->mount_options->snapdir_name, |
2817b000 | 579 | dentry->d_name.len) && |
1d1de916 | 580 | !is_root_ceph_dentry(dir, dentry) && |
2817b000 SW |
581 | (ci->i_ceph_flags & CEPH_I_COMPLETE) && |
582 | (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { | |
2817b000 SW |
583 | spin_unlock(&dir->i_lock); |
584 | dout(" dir %p complete, -ENOENT\n", dir); | |
585 | d_add(dentry, NULL); | |
586 | di->lease_shared_gen = ci->i_shared_gen; | |
587 | return NULL; | |
588 | } | |
589 | spin_unlock(&dir->i_lock); | |
590 | } | |
591 | ||
592 | op = ceph_snap(dir) == CEPH_SNAPDIR ? | |
593 | CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; | |
594 | req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); | |
595 | if (IS_ERR(req)) | |
7e34bc52 | 596 | return ERR_CAST(req); |
2817b000 SW |
597 | req->r_dentry = dget(dentry); |
598 | req->r_num_caps = 2; | |
599 | /* we only need inode linkage */ | |
600 | req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); | |
601 | req->r_locked_dir = dir; | |
602 | err = ceph_mdsc_do_request(mdsc, NULL, req); | |
603 | dentry = ceph_finish_lookup(req, dentry, err); | |
604 | ceph_mdsc_put_request(req); /* will dput(dentry) */ | |
605 | dout("lookup result=%p\n", dentry); | |
606 | return dentry; | |
607 | } | |
608 | ||
609 | /* | |
610 | * If we do a create but get no trace back from the MDS, follow up with | |
611 | * a lookup (the VFS expects us to link up the provided dentry). | |
612 | */ | |
613 | int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) | |
614 | { | |
615 | struct dentry *result = ceph_lookup(dir, dentry, NULL); | |
616 | ||
617 | if (result && !IS_ERR(result)) { | |
618 | /* | |
619 | * We created the item, then did a lookup, and found | |
620 | * it was already linked to another inode we already | |
621 | * had in our cache (and thus got spliced). Link our | |
622 | * dentry to that inode, but don't hash it, just in | |
623 | * case the VFS wants to dereference it. | |
624 | */ | |
625 | BUG_ON(!result->d_inode); | |
626 | d_instantiate(dentry, result->d_inode); | |
627 | return 0; | |
628 | } | |
629 | return PTR_ERR(result); | |
630 | } | |
631 | ||
632 | static int ceph_mknod(struct inode *dir, struct dentry *dentry, | |
633 | int mode, dev_t rdev) | |
634 | { | |
3d14c5d2 YS |
635 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
636 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
637 | struct ceph_mds_request *req; |
638 | int err; | |
639 | ||
640 | if (ceph_snap(dir) != CEPH_NOSNAP) | |
641 | return -EROFS; | |
642 | ||
643 | dout("mknod in dir %p dentry %p mode 0%o rdev %d\n", | |
644 | dir, dentry, mode, rdev); | |
645 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); | |
646 | if (IS_ERR(req)) { | |
647 | d_drop(dentry); | |
648 | return PTR_ERR(req); | |
649 | } | |
650 | req->r_dentry = dget(dentry); | |
651 | req->r_num_caps = 2; | |
652 | req->r_locked_dir = dir; | |
653 | req->r_args.mknod.mode = cpu_to_le32(mode); | |
654 | req->r_args.mknod.rdev = cpu_to_le32(rdev); | |
655 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
656 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
657 | err = ceph_mdsc_do_request(mdsc, dir, req); | |
658 | if (!err && !req->r_reply_info.head->is_dentry) | |
659 | err = ceph_handle_notrace_create(dir, dentry); | |
660 | ceph_mdsc_put_request(req); | |
661 | if (err) | |
662 | d_drop(dentry); | |
663 | return err; | |
664 | } | |
665 | ||
666 | static int ceph_create(struct inode *dir, struct dentry *dentry, int mode, | |
667 | struct nameidata *nd) | |
668 | { | |
669 | dout("create in dir %p dentry %p name '%.*s'\n", | |
670 | dir, dentry, dentry->d_name.len, dentry->d_name.name); | |
671 | ||
672 | if (ceph_snap(dir) != CEPH_NOSNAP) | |
673 | return -EROFS; | |
674 | ||
675 | if (nd) { | |
676 | BUG_ON((nd->flags & LOOKUP_OPEN) == 0); | |
677 | dentry = ceph_lookup_open(dir, dentry, nd, mode, 0); | |
678 | /* hrm, what should i do here if we get aliased? */ | |
679 | if (IS_ERR(dentry)) | |
680 | return PTR_ERR(dentry); | |
681 | return 0; | |
682 | } | |
683 | ||
684 | /* fall back to mknod */ | |
685 | return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0); | |
686 | } | |
687 | ||
688 | static int ceph_symlink(struct inode *dir, struct dentry *dentry, | |
689 | const char *dest) | |
690 | { | |
3d14c5d2 YS |
691 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
692 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
693 | struct ceph_mds_request *req; |
694 | int err; | |
695 | ||
696 | if (ceph_snap(dir) != CEPH_NOSNAP) | |
697 | return -EROFS; | |
698 | ||
699 | dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); | |
700 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); | |
701 | if (IS_ERR(req)) { | |
702 | d_drop(dentry); | |
703 | return PTR_ERR(req); | |
704 | } | |
705 | req->r_dentry = dget(dentry); | |
706 | req->r_num_caps = 2; | |
707 | req->r_path2 = kstrdup(dest, GFP_NOFS); | |
708 | req->r_locked_dir = dir; | |
709 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
710 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
711 | err = ceph_mdsc_do_request(mdsc, dir, req); | |
712 | if (!err && !req->r_reply_info.head->is_dentry) | |
713 | err = ceph_handle_notrace_create(dir, dentry); | |
714 | ceph_mdsc_put_request(req); | |
715 | if (err) | |
716 | d_drop(dentry); | |
717 | return err; | |
718 | } | |
719 | ||
720 | static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |
721 | { | |
3d14c5d2 YS |
722 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
723 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
724 | struct ceph_mds_request *req; |
725 | int err = -EROFS; | |
726 | int op; | |
727 | ||
728 | if (ceph_snap(dir) == CEPH_SNAPDIR) { | |
729 | /* mkdir .snap/foo is a MKSNAP */ | |
730 | op = CEPH_MDS_OP_MKSNAP; | |
731 | dout("mksnap dir %p snap '%.*s' dn %p\n", dir, | |
732 | dentry->d_name.len, dentry->d_name.name, dentry); | |
733 | } else if (ceph_snap(dir) == CEPH_NOSNAP) { | |
734 | dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode); | |
735 | op = CEPH_MDS_OP_MKDIR; | |
736 | } else { | |
737 | goto out; | |
738 | } | |
739 | req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); | |
740 | if (IS_ERR(req)) { | |
741 | err = PTR_ERR(req); | |
742 | goto out; | |
743 | } | |
744 | ||
745 | req->r_dentry = dget(dentry); | |
746 | req->r_num_caps = 2; | |
747 | req->r_locked_dir = dir; | |
748 | req->r_args.mkdir.mode = cpu_to_le32(mode); | |
749 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
750 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
751 | err = ceph_mdsc_do_request(mdsc, dir, req); | |
752 | if (!err && !req->r_reply_info.head->is_dentry) | |
753 | err = ceph_handle_notrace_create(dir, dentry); | |
754 | ceph_mdsc_put_request(req); | |
755 | out: | |
756 | if (err < 0) | |
757 | d_drop(dentry); | |
758 | return err; | |
759 | } | |
760 | ||
761 | static int ceph_link(struct dentry *old_dentry, struct inode *dir, | |
762 | struct dentry *dentry) | |
763 | { | |
3d14c5d2 YS |
764 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
765 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
766 | struct ceph_mds_request *req; |
767 | int err; | |
768 | ||
769 | if (ceph_snap(dir) != CEPH_NOSNAP) | |
770 | return -EROFS; | |
771 | ||
772 | dout("link in dir %p old_dentry %p dentry %p\n", dir, | |
773 | old_dentry, dentry); | |
774 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); | |
775 | if (IS_ERR(req)) { | |
776 | d_drop(dentry); | |
777 | return PTR_ERR(req); | |
778 | } | |
779 | req->r_dentry = dget(dentry); | |
780 | req->r_num_caps = 2; | |
781 | req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ | |
782 | req->r_locked_dir = dir; | |
783 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
784 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
785 | err = ceph_mdsc_do_request(mdsc, dir, req); | |
786 | if (err) | |
787 | d_drop(dentry); | |
788 | else if (!req->r_reply_info.head->is_dentry) | |
789 | d_instantiate(dentry, igrab(old_dentry->d_inode)); | |
790 | ceph_mdsc_put_request(req); | |
791 | return err; | |
792 | } | |
793 | ||
794 | /* | |
795 | * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it | |
796 | * looks like the link count will hit 0, drop any other caps (other | |
797 | * than PIN) we don't specifically want (due to the file still being | |
798 | * open). | |
799 | */ | |
800 | static int drop_caps_for_unlink(struct inode *inode) | |
801 | { | |
802 | struct ceph_inode_info *ci = ceph_inode(inode); | |
803 | int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; | |
804 | ||
805 | spin_lock(&inode->i_lock); | |
806 | if (inode->i_nlink == 1) { | |
807 | drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); | |
808 | ci->i_ceph_flags |= CEPH_I_NODELAY; | |
809 | } | |
810 | spin_unlock(&inode->i_lock); | |
811 | return drop; | |
812 | } | |
813 | ||
814 | /* | |
815 | * rmdir and unlink are differ only by the metadata op code | |
816 | */ | |
817 | static int ceph_unlink(struct inode *dir, struct dentry *dentry) | |
818 | { | |
3d14c5d2 YS |
819 | struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); |
820 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
821 | struct inode *inode = dentry->d_inode; |
822 | struct ceph_mds_request *req; | |
823 | int err = -EROFS; | |
824 | int op; | |
825 | ||
826 | if (ceph_snap(dir) == CEPH_SNAPDIR) { | |
827 | /* rmdir .snap/foo is RMSNAP */ | |
828 | dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, | |
829 | dentry->d_name.name, dentry); | |
830 | op = CEPH_MDS_OP_RMSNAP; | |
831 | } else if (ceph_snap(dir) == CEPH_NOSNAP) { | |
832 | dout("unlink/rmdir dir %p dn %p inode %p\n", | |
833 | dir, dentry, inode); | |
834 | op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ? | |
835 | CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; | |
836 | } else | |
837 | goto out; | |
838 | req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); | |
839 | if (IS_ERR(req)) { | |
840 | err = PTR_ERR(req); | |
841 | goto out; | |
842 | } | |
843 | req->r_dentry = dget(dentry); | |
844 | req->r_num_caps = 2; | |
845 | req->r_locked_dir = dir; | |
846 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
847 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
848 | req->r_inode_drop = drop_caps_for_unlink(inode); | |
849 | err = ceph_mdsc_do_request(mdsc, dir, req); | |
850 | if (!err && !req->r_reply_info.head->is_dentry) | |
851 | d_delete(dentry); | |
852 | ceph_mdsc_put_request(req); | |
853 | out: | |
854 | return err; | |
855 | } | |
856 | ||
857 | static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, | |
858 | struct inode *new_dir, struct dentry *new_dentry) | |
859 | { | |
3d14c5d2 YS |
860 | struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); |
861 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
2817b000 SW |
862 | struct ceph_mds_request *req; |
863 | int err; | |
864 | ||
865 | if (ceph_snap(old_dir) != ceph_snap(new_dir)) | |
866 | return -EXDEV; | |
867 | if (ceph_snap(old_dir) != CEPH_NOSNAP || | |
868 | ceph_snap(new_dir) != CEPH_NOSNAP) | |
869 | return -EROFS; | |
870 | dout("rename dir %p dentry %p to dir %p dentry %p\n", | |
871 | old_dir, old_dentry, new_dir, new_dentry); | |
872 | req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); | |
873 | if (IS_ERR(req)) | |
874 | return PTR_ERR(req); | |
875 | req->r_dentry = dget(new_dentry); | |
876 | req->r_num_caps = 2; | |
877 | req->r_old_dentry = dget(old_dentry); | |
878 | req->r_locked_dir = new_dir; | |
879 | req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; | |
880 | req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; | |
881 | req->r_dentry_drop = CEPH_CAP_FILE_SHARED; | |
882 | req->r_dentry_unless = CEPH_CAP_FILE_EXCL; | |
883 | /* release LINK_RDCACHE on source inode (mds will lock it) */ | |
884 | req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; | |
885 | if (new_dentry->d_inode) | |
886 | req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); | |
887 | err = ceph_mdsc_do_request(mdsc, old_dir, req); | |
888 | if (!err && !req->r_reply_info.head->is_dentry) { | |
889 | /* | |
890 | * Normally d_move() is done by fill_trace (called by | |
891 | * do_request, above). If there is no trace, we need | |
892 | * to do it here. | |
893 | */ | |
ea1409f9 SW |
894 | |
895 | /* d_move screws up d_subdirs order */ | |
896 | ceph_i_clear(new_dir, CEPH_I_COMPLETE); | |
897 | ||
2817b000 | 898 | d_move(old_dentry, new_dentry); |
ea1409f9 SW |
899 | |
900 | /* ensure target dentry is invalidated, despite | |
901 | rehashing bug in vfs_rename_dir */ | |
81a6cf2d | 902 | ceph_invalidate_dentry_lease(new_dentry); |
2817b000 SW |
903 | } |
904 | ceph_mdsc_put_request(req); | |
905 | return err; | |
906 | } | |
907 | ||
81a6cf2d SW |
908 | /* |
909 | * Ensure a dentry lease will no longer revalidate. | |
910 | */ | |
911 | void ceph_invalidate_dentry_lease(struct dentry *dentry) | |
912 | { | |
913 | spin_lock(&dentry->d_lock); | |
914 | dentry->d_time = jiffies; | |
915 | ceph_dentry(dentry)->lease_shared_gen = 0; | |
916 | spin_unlock(&dentry->d_lock); | |
917 | } | |
2817b000 SW |
918 | |
919 | /* | |
920 | * Check if dentry lease is valid. If not, delete the lease. Try to | |
921 | * renew if the least is more than half up. | |
922 | */ | |
923 | static int dentry_lease_is_valid(struct dentry *dentry) | |
924 | { | |
925 | struct ceph_dentry_info *di; | |
926 | struct ceph_mds_session *s; | |
927 | int valid = 0; | |
928 | u32 gen; | |
929 | unsigned long ttl; | |
930 | struct ceph_mds_session *session = NULL; | |
931 | struct inode *dir = NULL; | |
932 | u32 seq = 0; | |
933 | ||
934 | spin_lock(&dentry->d_lock); | |
935 | di = ceph_dentry(dentry); | |
936 | if (di && di->lease_session) { | |
937 | s = di->lease_session; | |
938 | spin_lock(&s->s_cap_lock); | |
939 | gen = s->s_cap_gen; | |
940 | ttl = s->s_cap_ttl; | |
941 | spin_unlock(&s->s_cap_lock); | |
942 | ||
943 | if (di->lease_gen == gen && | |
944 | time_before(jiffies, dentry->d_time) && | |
945 | time_before(jiffies, ttl)) { | |
946 | valid = 1; | |
947 | if (di->lease_renew_after && | |
948 | time_after(jiffies, di->lease_renew_after)) { | |
949 | /* we should renew */ | |
950 | dir = dentry->d_parent->d_inode; | |
951 | session = ceph_get_mds_session(s); | |
952 | seq = di->lease_seq; | |
953 | di->lease_renew_after = 0; | |
954 | di->lease_renew_from = jiffies; | |
955 | } | |
2817b000 SW |
956 | } |
957 | } | |
958 | spin_unlock(&dentry->d_lock); | |
959 | ||
960 | if (session) { | |
961 | ceph_mdsc_lease_send_msg(session, dir, dentry, | |
962 | CEPH_MDS_LEASE_RENEW, seq); | |
963 | ceph_put_mds_session(session); | |
964 | } | |
965 | dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); | |
966 | return valid; | |
967 | } | |
968 | ||
969 | /* | |
970 | * Check if directory-wide content lease/cap is valid. | |
971 | */ | |
972 | static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) | |
973 | { | |
974 | struct ceph_inode_info *ci = ceph_inode(dir); | |
975 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
976 | int valid = 0; | |
977 | ||
978 | spin_lock(&dir->i_lock); | |
979 | if (ci->i_shared_gen == di->lease_shared_gen) | |
980 | valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); | |
981 | spin_unlock(&dir->i_lock); | |
982 | dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", | |
983 | dir, (unsigned)ci->i_shared_gen, dentry, | |
984 | (unsigned)di->lease_shared_gen, valid); | |
985 | return valid; | |
986 | } | |
987 | ||
988 | /* | |
989 | * Check if cached dentry can be trusted. | |
990 | */ | |
991 | static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) | |
992 | { | |
34286d66 NP |
993 | struct inode *dir; |
994 | ||
995 | if (nd->flags & LOOKUP_RCU) | |
996 | return -ECHILD; | |
997 | ||
998 | dir = dentry->d_parent->d_inode; | |
2817b000 | 999 | |
1cd3935b SW |
1000 | dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, |
1001 | dentry->d_name.len, dentry->d_name.name, dentry->d_inode, | |
1002 | ceph_dentry(dentry)->offset); | |
2817b000 SW |
1003 | |
1004 | /* always trust cached snapped dentries, snapdir dentry */ | |
1005 | if (ceph_snap(dir) != CEPH_NOSNAP) { | |
1006 | dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, | |
1007 | dentry->d_name.len, dentry->d_name.name, dentry->d_inode); | |
1008 | goto out_touch; | |
1009 | } | |
1010 | if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) | |
1011 | goto out_touch; | |
1012 | ||
1013 | if (dentry_lease_is_valid(dentry) || | |
1014 | dir_lease_is_valid(dir, dentry)) | |
1015 | goto out_touch; | |
1016 | ||
1017 | dout("d_revalidate %p invalid\n", dentry); | |
1018 | d_drop(dentry); | |
1019 | return 0; | |
1020 | out_touch: | |
1021 | ceph_dentry_lru_touch(dentry); | |
1022 | return 1; | |
1023 | } | |
1024 | ||
1025 | /* | |
1026 | * When a dentry is released, clear the dir I_COMPLETE if it was part | |
252af521 | 1027 | * of the current dir gen or if this is in the snapshot namespace. |
2817b000 SW |
1028 | */ |
1029 | static void ceph_dentry_release(struct dentry *dentry) | |
1030 | { | |
1031 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
ca04d9c3 SW |
1032 | struct inode *parent_inode = NULL; |
1033 | u64 snapid = CEPH_NOSNAP; | |
2817b000 | 1034 | |
ca04d9c3 SW |
1035 | if (!IS_ROOT(dentry)) { |
1036 | parent_inode = dentry->d_parent->d_inode; | |
1037 | if (parent_inode) | |
1038 | snapid = ceph_snap(parent_inode); | |
1039 | } | |
252af521 | 1040 | dout("dentry_release %p parent %p\n", dentry, parent_inode); |
252af521 | 1041 | if (parent_inode && snapid != CEPH_SNAPDIR) { |
2817b000 SW |
1042 | struct ceph_inode_info *ci = ceph_inode(parent_inode); |
1043 | ||
1044 | spin_lock(&parent_inode->i_lock); | |
252af521 SW |
1045 | if (ci->i_shared_gen == di->lease_shared_gen || |
1046 | snapid <= CEPH_MAXSNAP) { | |
2817b000 SW |
1047 | dout(" clearing %p complete (d_release)\n", |
1048 | parent_inode); | |
1049 | ci->i_ceph_flags &= ~CEPH_I_COMPLETE; | |
1050 | ci->i_release_count++; | |
1051 | } | |
1052 | spin_unlock(&parent_inode->i_lock); | |
1053 | } | |
1054 | if (di) { | |
1055 | ceph_dentry_lru_del(dentry); | |
1056 | if (di->lease_session) | |
1057 | ceph_put_mds_session(di->lease_session); | |
1058 | kmem_cache_free(ceph_dentry_cachep, di); | |
1059 | dentry->d_fsdata = NULL; | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, | |
1064 | struct nameidata *nd) | |
1065 | { | |
1066 | /* | |
1067 | * Eventually, we'll want to revalidate snapped metadata | |
1068 | * too... probably... | |
1069 | */ | |
1070 | return 1; | |
1071 | } | |
1072 | ||
1073 | ||
1074 | ||
1075 | /* | |
1076 | * read() on a dir. This weird interface hack only works if mounted | |
1077 | * with '-o dirstat'. | |
1078 | */ | |
1079 | static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, | |
1080 | loff_t *ppos) | |
1081 | { | |
1082 | struct ceph_file_info *cf = file->private_data; | |
1083 | struct inode *inode = file->f_dentry->d_inode; | |
1084 | struct ceph_inode_info *ci = ceph_inode(inode); | |
1085 | int left; | |
1086 | ||
3d14c5d2 | 1087 | if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) |
2817b000 SW |
1088 | return -EISDIR; |
1089 | ||
1090 | if (!cf->dir_info) { | |
1091 | cf->dir_info = kmalloc(1024, GFP_NOFS); | |
1092 | if (!cf->dir_info) | |
1093 | return -ENOMEM; | |
1094 | cf->dir_info_len = | |
1095 | sprintf(cf->dir_info, | |
1096 | "entries: %20lld\n" | |
1097 | " files: %20lld\n" | |
1098 | " subdirs: %20lld\n" | |
1099 | "rentries: %20lld\n" | |
1100 | " rfiles: %20lld\n" | |
1101 | " rsubdirs: %20lld\n" | |
1102 | "rbytes: %20lld\n" | |
1103 | "rctime: %10ld.%09ld\n", | |
1104 | ci->i_files + ci->i_subdirs, | |
1105 | ci->i_files, | |
1106 | ci->i_subdirs, | |
1107 | ci->i_rfiles + ci->i_rsubdirs, | |
1108 | ci->i_rfiles, | |
1109 | ci->i_rsubdirs, | |
1110 | ci->i_rbytes, | |
1111 | (long)ci->i_rctime.tv_sec, | |
1112 | (long)ci->i_rctime.tv_nsec); | |
1113 | } | |
1114 | ||
1115 | if (*ppos >= cf->dir_info_len) | |
1116 | return 0; | |
1117 | size = min_t(unsigned, size, cf->dir_info_len-*ppos); | |
1118 | left = copy_to_user(buf, cf->dir_info + *ppos, size); | |
1119 | if (left == size) | |
1120 | return -EFAULT; | |
1121 | *ppos += (size - left); | |
1122 | return size - left; | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * an fsync() on a dir will wait for any uncommitted directory | |
1127 | * operations to commit. | |
1128 | */ | |
7ea80859 | 1129 | static int ceph_dir_fsync(struct file *file, int datasync) |
2817b000 | 1130 | { |
7ea80859 | 1131 | struct inode *inode = file->f_path.dentry->d_inode; |
2817b000 SW |
1132 | struct ceph_inode_info *ci = ceph_inode(inode); |
1133 | struct list_head *head = &ci->i_unsafe_dirops; | |
1134 | struct ceph_mds_request *req; | |
1135 | u64 last_tid; | |
1136 | int ret = 0; | |
1137 | ||
1138 | dout("dir_fsync %p\n", inode); | |
1139 | spin_lock(&ci->i_unsafe_lock); | |
1140 | if (list_empty(head)) | |
1141 | goto out; | |
1142 | ||
1143 | req = list_entry(head->prev, | |
1144 | struct ceph_mds_request, r_unsafe_dir_item); | |
1145 | last_tid = req->r_tid; | |
1146 | ||
1147 | do { | |
1148 | ceph_mdsc_get_request(req); | |
1149 | spin_unlock(&ci->i_unsafe_lock); | |
1150 | dout("dir_fsync %p wait on tid %llu (until %llu)\n", | |
1151 | inode, req->r_tid, last_tid); | |
1152 | if (req->r_timeout) { | |
1153 | ret = wait_for_completion_timeout( | |
1154 | &req->r_safe_completion, req->r_timeout); | |
1155 | if (ret > 0) | |
1156 | ret = 0; | |
1157 | else if (ret == 0) | |
1158 | ret = -EIO; /* timed out */ | |
1159 | } else { | |
1160 | wait_for_completion(&req->r_safe_completion); | |
1161 | } | |
1162 | spin_lock(&ci->i_unsafe_lock); | |
1163 | ceph_mdsc_put_request(req); | |
1164 | ||
1165 | if (ret || list_empty(head)) | |
1166 | break; | |
1167 | req = list_entry(head->next, | |
1168 | struct ceph_mds_request, r_unsafe_dir_item); | |
1169 | } while (req->r_tid < last_tid); | |
1170 | out: | |
1171 | spin_unlock(&ci->i_unsafe_lock); | |
1172 | return ret; | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * We maintain a private dentry LRU. | |
1177 | * | |
1178 | * FIXME: this needs to be changed to a per-mds lru to be useful. | |
1179 | */ | |
1180 | void ceph_dentry_lru_add(struct dentry *dn) | |
1181 | { | |
1182 | struct ceph_dentry_info *di = ceph_dentry(dn); | |
1183 | struct ceph_mds_client *mdsc; | |
2817b000 | 1184 | |
04a419f9 SW |
1185 | dout("dentry_lru_add %p %p '%.*s'\n", di, dn, |
1186 | dn->d_name.len, dn->d_name.name); | |
2817b000 | 1187 | if (di) { |
3d14c5d2 | 1188 | mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; |
2817b000 SW |
1189 | spin_lock(&mdsc->dentry_lru_lock); |
1190 | list_add_tail(&di->lru, &mdsc->dentry_lru); | |
1191 | mdsc->num_dentry++; | |
1192 | spin_unlock(&mdsc->dentry_lru_lock); | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | void ceph_dentry_lru_touch(struct dentry *dn) | |
1197 | { | |
1198 | struct ceph_dentry_info *di = ceph_dentry(dn); | |
1199 | struct ceph_mds_client *mdsc; | |
2817b000 | 1200 | |
1cd3935b SW |
1201 | dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, |
1202 | dn->d_name.len, dn->d_name.name, di->offset); | |
2817b000 | 1203 | if (di) { |
3d14c5d2 | 1204 | mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; |
2817b000 SW |
1205 | spin_lock(&mdsc->dentry_lru_lock); |
1206 | list_move_tail(&di->lru, &mdsc->dentry_lru); | |
1207 | spin_unlock(&mdsc->dentry_lru_lock); | |
1208 | } | |
1209 | } | |
1210 | ||
1211 | void ceph_dentry_lru_del(struct dentry *dn) | |
1212 | { | |
1213 | struct ceph_dentry_info *di = ceph_dentry(dn); | |
1214 | struct ceph_mds_client *mdsc; | |
1215 | ||
04a419f9 SW |
1216 | dout("dentry_lru_del %p %p '%.*s'\n", di, dn, |
1217 | dn->d_name.len, dn->d_name.name); | |
2817b000 | 1218 | if (di) { |
3d14c5d2 | 1219 | mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; |
2817b000 SW |
1220 | spin_lock(&mdsc->dentry_lru_lock); |
1221 | list_del_init(&di->lru); | |
1222 | mdsc->num_dentry--; | |
1223 | spin_unlock(&mdsc->dentry_lru_lock); | |
1224 | } | |
1225 | } | |
1226 | ||
6c0f3af7 SW |
1227 | /* |
1228 | * Return name hash for a given dentry. This is dependent on | |
1229 | * the parent directory's hash function. | |
1230 | */ | |
1231 | unsigned ceph_dentry_hash(struct dentry *dn) | |
1232 | { | |
1233 | struct inode *dir = dn->d_parent->d_inode; | |
1234 | struct ceph_inode_info *dci = ceph_inode(dir); | |
1235 | ||
1236 | switch (dci->i_dir_layout.dl_dir_hash) { | |
1237 | case 0: /* for backward compat */ | |
1238 | case CEPH_STR_HASH_LINUX: | |
1239 | return dn->d_name.hash; | |
1240 | ||
1241 | default: | |
1242 | return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, | |
1243 | dn->d_name.name, dn->d_name.len); | |
1244 | } | |
1245 | } | |
1246 | ||
2817b000 SW |
1247 | const struct file_operations ceph_dir_fops = { |
1248 | .read = ceph_read_dir, | |
1249 | .readdir = ceph_readdir, | |
1250 | .llseek = ceph_dir_llseek, | |
1251 | .open = ceph_open, | |
1252 | .release = ceph_release, | |
1253 | .unlocked_ioctl = ceph_ioctl, | |
1254 | .fsync = ceph_dir_fsync, | |
1255 | }; | |
1256 | ||
1257 | const struct inode_operations ceph_dir_iops = { | |
1258 | .lookup = ceph_lookup, | |
1259 | .permission = ceph_permission, | |
1260 | .getattr = ceph_getattr, | |
1261 | .setattr = ceph_setattr, | |
1262 | .setxattr = ceph_setxattr, | |
1263 | .getxattr = ceph_getxattr, | |
1264 | .listxattr = ceph_listxattr, | |
1265 | .removexattr = ceph_removexattr, | |
1266 | .mknod = ceph_mknod, | |
1267 | .symlink = ceph_symlink, | |
1268 | .mkdir = ceph_mkdir, | |
1269 | .link = ceph_link, | |
1270 | .unlink = ceph_unlink, | |
1271 | .rmdir = ceph_unlink, | |
1272 | .rename = ceph_rename, | |
1273 | .create = ceph_create, | |
1274 | }; | |
1275 | ||
52dfb8ac | 1276 | const struct dentry_operations ceph_dentry_ops = { |
2817b000 SW |
1277 | .d_revalidate = ceph_d_revalidate, |
1278 | .d_release = ceph_dentry_release, | |
1279 | }; | |
1280 | ||
52dfb8ac | 1281 | const struct dentry_operations ceph_snapdir_dentry_ops = { |
2817b000 | 1282 | .d_revalidate = ceph_snapdir_d_revalidate, |
252af521 | 1283 | .d_release = ceph_dentry_release, |
2817b000 SW |
1284 | }; |
1285 | ||
52dfb8ac | 1286 | const struct dentry_operations ceph_snap_dentry_ops = { |
252af521 | 1287 | .d_release = ceph_dentry_release, |
2817b000 | 1288 | }; |