]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/ceph/inode.c
UBUNTU: Start new release
[mirror_ubuntu-zesty-kernel.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
355da1eb
SW
9#include <linux/writeback.h>
10#include <linux/vmalloc.h>
2cdeb1e4 11#include <linux/xattr.h>
4db658ea 12#include <linux/posix_acl.h>
3e7fbe9c 13#include <linux/random.h>
a407846e 14#include <linux/sort.h>
355da1eb
SW
15
16#include "super.h"
3d14c5d2 17#include "mds_client.h"
99ccbd22 18#include "cache.h"
3d14c5d2 19#include <linux/ceph/decode.h>
355da1eb
SW
20
21/*
22 * Ceph inode operations
23 *
24 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
25 * setattr, etc.), xattr helpers, and helpers for assimilating
26 * metadata returned by the MDS into our cache.
27 *
28 * Also define helpers for doing asynchronous writeback, invalidation,
29 * and truncation for the benefit of those who can't afford to block
30 * (typically because they are in the message handler path).
31 */
32
33static const struct inode_operations ceph_symlink_iops;
34
3c6f6b79
SW
35static void ceph_invalidate_work(struct work_struct *work);
36static void ceph_writeback_work(struct work_struct *work);
37static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
38
39/*
40 * find or create an inode, given the ceph ino number
41 */
ad1fee96
YS
42static int ceph_set_ino_cb(struct inode *inode, void *data)
43{
44 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
45 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
46 return 0;
47}
48
355da1eb
SW
49struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
50{
51 struct inode *inode;
52 ino_t t = ceph_vino_to_ino(vino);
53
54 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
55 if (inode == NULL)
56 return ERR_PTR(-ENOMEM);
57 if (inode->i_state & I_NEW) {
58 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
59 inode, ceph_vinop(inode), (u64)inode->i_ino);
60 unlock_new_inode(inode);
61 }
62
63 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
64 vino.snap, inode);
65 return inode;
66}
67
68/*
69 * get/constuct snapdir inode for a given directory
70 */
71struct inode *ceph_get_snapdir(struct inode *parent)
72{
73 struct ceph_vino vino = {
74 .ino = ceph_ino(parent),
75 .snap = CEPH_SNAPDIR,
76 };
77 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 78 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
79
80 BUG_ON(!S_ISDIR(parent->i_mode));
81 if (IS_ERR(inode))
7e34bc52 82 return inode;
355da1eb
SW
83 inode->i_mode = parent->i_mode;
84 inode->i_uid = parent->i_uid;
85 inode->i_gid = parent->i_gid;
38c48b5f
YZ
86 inode->i_op = &ceph_snapdir_iops;
87 inode->i_fop = &ceph_snapdir_fops;
b377ff13
SW
88 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
89 ci->i_rbytes = 0;
355da1eb
SW
90 return inode;
91}
92
93const struct inode_operations ceph_file_iops = {
94 .permission = ceph_permission,
95 .setattr = ceph_setattr,
96 .getattr = ceph_getattr,
355da1eb 97 .listxattr = ceph_listxattr,
7221fe4c 98 .get_acl = ceph_get_acl,
72466d0b 99 .set_acl = ceph_set_acl,
355da1eb
SW
100};
101
102
103/*
104 * We use a 'frag tree' to keep track of the MDS's directory fragments
105 * for a given inode (usually there is just a single fragment). We
106 * need to know when a child frag is delegated to a new MDS, or when
107 * it is flagged as replicated, so we can direct our requests
108 * accordingly.
109 */
110
111/*
112 * find/create a frag in the tree
113 */
114static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
115 u32 f)
116{
117 struct rb_node **p;
118 struct rb_node *parent = NULL;
119 struct ceph_inode_frag *frag;
120 int c;
121
122 p = &ci->i_fragtree.rb_node;
123 while (*p) {
124 parent = *p;
125 frag = rb_entry(parent, struct ceph_inode_frag, node);
126 c = ceph_frag_compare(f, frag->frag);
127 if (c < 0)
128 p = &(*p)->rb_left;
129 else if (c > 0)
130 p = &(*p)->rb_right;
131 else
132 return frag;
133 }
134
135 frag = kmalloc(sizeof(*frag), GFP_NOFS);
136 if (!frag) {
137 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
138 "frag %x\n", &ci->vfs_inode,
139 ceph_vinop(&ci->vfs_inode), f);
140 return ERR_PTR(-ENOMEM);
141 }
142 frag->frag = f;
143 frag->split_by = 0;
144 frag->mds = -1;
145 frag->ndist = 0;
146
147 rb_link_node(&frag->node, parent, p);
148 rb_insert_color(&frag->node, &ci->i_fragtree);
149
150 dout("get_or_create_frag added %llx.%llx frag %x\n",
151 ceph_vinop(&ci->vfs_inode), f);
152 return frag;
153}
154
155/*
156 * find a specific frag @f
157 */
158struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
159{
160 struct rb_node *n = ci->i_fragtree.rb_node;
161
162 while (n) {
163 struct ceph_inode_frag *frag =
164 rb_entry(n, struct ceph_inode_frag, node);
165 int c = ceph_frag_compare(f, frag->frag);
166 if (c < 0)
167 n = n->rb_left;
168 else if (c > 0)
169 n = n->rb_right;
170 else
171 return frag;
172 }
173 return NULL;
174}
175
176/*
177 * Choose frag containing the given value @v. If @pfrag is
178 * specified, copy the frag delegation info to the caller if
179 * it is present.
180 */
3e7fbe9c
YZ
181static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
182 struct ceph_inode_frag *pfrag, int *found)
355da1eb
SW
183{
184 u32 t = ceph_frag_make(0, 0);
185 struct ceph_inode_frag *frag;
186 unsigned nway, i;
187 u32 n;
188
189 if (found)
190 *found = 0;
191
355da1eb
SW
192 while (1) {
193 WARN_ON(!ceph_frag_contains_value(t, v));
194 frag = __ceph_find_frag(ci, t);
195 if (!frag)
196 break; /* t is a leaf */
197 if (frag->split_by == 0) {
198 if (pfrag)
199 memcpy(pfrag, frag, sizeof(*pfrag));
200 if (found)
201 *found = 1;
202 break;
203 }
204
205 /* choose child */
206 nway = 1 << frag->split_by;
207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
208 frag->split_by, nway);
209 for (i = 0; i < nway; i++) {
210 n = ceph_frag_make_child(t, frag->split_by, i);
211 if (ceph_frag_contains_value(n, v)) {
212 t = n;
213 break;
214 }
215 }
216 BUG_ON(i == nway);
217 }
218 dout("choose_frag(%x) = %x\n", v, t);
219
355da1eb
SW
220 return t;
221}
222
3e7fbe9c
YZ
223u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
224 struct ceph_inode_frag *pfrag, int *found)
225{
226 u32 ret;
227 mutex_lock(&ci->i_fragtree_mutex);
228 ret = __ceph_choose_frag(ci, v, pfrag, found);
229 mutex_unlock(&ci->i_fragtree_mutex);
230 return ret;
231}
232
355da1eb
SW
233/*
234 * Process dirfrag (delegation) info from the mds. Include leaf
235 * fragment in tree ONLY if ndist > 0. Otherwise, only
236 * branches/splits are included in i_fragtree)
237 */
238static int ceph_fill_dirfrag(struct inode *inode,
239 struct ceph_mds_reply_dirfrag *dirinfo)
240{
241 struct ceph_inode_info *ci = ceph_inode(inode);
242 struct ceph_inode_frag *frag;
243 u32 id = le32_to_cpu(dirinfo->frag);
244 int mds = le32_to_cpu(dirinfo->auth);
245 int ndist = le32_to_cpu(dirinfo->ndist);
8d08503c 246 int diri_auth = -1;
355da1eb
SW
247 int i;
248 int err = 0;
249
8d08503c
YZ
250 spin_lock(&ci->i_ceph_lock);
251 if (ci->i_auth_cap)
252 diri_auth = ci->i_auth_cap->mds;
253 spin_unlock(&ci->i_ceph_lock);
254
42172119
YZ
255 if (mds == -1) /* CDIR_AUTH_PARENT */
256 mds = diri_auth;
257
355da1eb 258 mutex_lock(&ci->i_fragtree_mutex);
8d08503c 259 if (ndist == 0 && mds == diri_auth) {
355da1eb
SW
260 /* no delegation info needed. */
261 frag = __ceph_find_frag(ci, id);
262 if (!frag)
263 goto out;
264 if (frag->split_by == 0) {
265 /* tree leaf, remove */
266 dout("fill_dirfrag removed %llx.%llx frag %x"
267 " (no ref)\n", ceph_vinop(inode), id);
268 rb_erase(&frag->node, &ci->i_fragtree);
269 kfree(frag);
270 } else {
271 /* tree branch, keep and clear */
272 dout("fill_dirfrag cleared %llx.%llx frag %x"
273 " referral\n", ceph_vinop(inode), id);
274 frag->mds = -1;
275 frag->ndist = 0;
276 }
277 goto out;
278 }
279
280
281 /* find/add this frag to store mds delegation info */
282 frag = __get_or_create_frag(ci, id);
283 if (IS_ERR(frag)) {
284 /* this is not the end of the world; we can continue
285 with bad/inaccurate delegation info */
286 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
287 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
288 err = -ENOMEM;
289 goto out;
290 }
291
292 frag->mds = mds;
293 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
294 for (i = 0; i < frag->ndist; i++)
295 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
296 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
297 ceph_vinop(inode), frag->frag, frag->ndist);
298
299out:
300 mutex_unlock(&ci->i_fragtree_mutex);
301 return err;
302}
303
a407846e
YZ
304static int frag_tree_split_cmp(const void *l, const void *r)
305{
306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
fe2ed425
JL
308 return ceph_frag_compare(le32_to_cpu(ls->frag),
309 le32_to_cpu(rs->frag));
a407846e
YZ
310}
311
a4b7431f
YZ
312static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
313{
314 if (!frag)
315 return f == ceph_frag_make(0, 0);
316 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
317 return false;
318 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
319}
320
3e7fbe9c
YZ
321static int ceph_fill_fragtree(struct inode *inode,
322 struct ceph_frag_tree_head *fragtree,
323 struct ceph_mds_reply_dirfrag *dirinfo)
324{
325 struct ceph_inode_info *ci = ceph_inode(inode);
a4b7431f 326 struct ceph_inode_frag *frag, *prev_frag = NULL;
3e7fbe9c 327 struct rb_node *rb_node;
1b1bc16d
YZ
328 unsigned i, split_by, nsplits;
329 u32 id;
3e7fbe9c
YZ
330 bool update = false;
331
332 mutex_lock(&ci->i_fragtree_mutex);
333 nsplits = le32_to_cpu(fragtree->nsplits);
1b1bc16d
YZ
334 if (nsplits != ci->i_fragtree_nsplits) {
335 update = true;
336 } else if (nsplits) {
3e7fbe9c
YZ
337 i = prandom_u32() % nsplits;
338 id = le32_to_cpu(fragtree->splits[i].frag);
339 if (!__ceph_find_frag(ci, id))
340 update = true;
341 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
342 rb_node = rb_first(&ci->i_fragtree);
343 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
344 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
345 update = true;
346 }
347 if (!update && dirinfo) {
348 id = le32_to_cpu(dirinfo->frag);
349 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
350 update = true;
351 }
352 if (!update)
353 goto out_unlock;
354
a407846e
YZ
355 if (nsplits > 1) {
356 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
357 frag_tree_split_cmp, NULL);
358 }
359
3e7fbe9c
YZ
360 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
361 rb_node = rb_first(&ci->i_fragtree);
362 for (i = 0; i < nsplits; i++) {
363 id = le32_to_cpu(fragtree->splits[i].frag);
1b1bc16d
YZ
364 split_by = le32_to_cpu(fragtree->splits[i].by);
365 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
366 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
367 "frag %x split by %d\n", ceph_vinop(inode),
368 i, nsplits, id, split_by);
369 continue;
370 }
3e7fbe9c
YZ
371 frag = NULL;
372 while (rb_node) {
373 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
374 if (ceph_frag_compare(frag->frag, id) >= 0) {
375 if (frag->frag != id)
376 frag = NULL;
377 else
378 rb_node = rb_next(rb_node);
379 break;
380 }
381 rb_node = rb_next(rb_node);
a4b7431f
YZ
382 /* delete stale split/leaf node */
383 if (frag->split_by > 0 ||
384 !is_frag_child(frag->frag, prev_frag)) {
385 rb_erase(&frag->node, &ci->i_fragtree);
1b1bc16d
YZ
386 if (frag->split_by > 0)
387 ci->i_fragtree_nsplits--;
a4b7431f
YZ
388 kfree(frag);
389 }
3e7fbe9c
YZ
390 frag = NULL;
391 }
392 if (!frag) {
393 frag = __get_or_create_frag(ci, id);
394 if (IS_ERR(frag))
395 continue;
396 }
1b1bc16d
YZ
397 if (frag->split_by == 0)
398 ci->i_fragtree_nsplits++;
399 frag->split_by = split_by;
3e7fbe9c 400 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
a4b7431f 401 prev_frag = frag;
3e7fbe9c
YZ
402 }
403 while (rb_node) {
404 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
405 rb_node = rb_next(rb_node);
a4b7431f
YZ
406 /* delete stale split/leaf node */
407 if (frag->split_by > 0 ||
408 !is_frag_child(frag->frag, prev_frag)) {
409 rb_erase(&frag->node, &ci->i_fragtree);
1b1bc16d
YZ
410 if (frag->split_by > 0)
411 ci->i_fragtree_nsplits--;
a4b7431f
YZ
412 kfree(frag);
413 }
3e7fbe9c
YZ
414 }
415out_unlock:
416 mutex_unlock(&ci->i_fragtree_mutex);
417 return 0;
418}
355da1eb
SW
419
420/*
421 * initialize a newly allocated inode.
422 */
423struct inode *ceph_alloc_inode(struct super_block *sb)
424{
425 struct ceph_inode_info *ci;
426 int i;
427
428 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
429 if (!ci)
430 return NULL;
431
432 dout("alloc_inode %p\n", &ci->vfs_inode);
433
be655596
SW
434 spin_lock_init(&ci->i_ceph_lock);
435
355da1eb 436 ci->i_version = 0;
31c542a1 437 ci->i_inline_version = 0;
355da1eb
SW
438 ci->i_time_warp_seq = 0;
439 ci->i_ceph_flags = 0;
fdd4e158
YZ
440 atomic64_set(&ci->i_ordered_count, 1);
441 atomic64_set(&ci->i_release_count, 1);
442 atomic64_set(&ci->i_complete_seq[0], 0);
443 atomic64_set(&ci->i_complete_seq[1], 0);
355da1eb
SW
444 ci->i_symlink = NULL;
445
6c0f3af7 446 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
30c156d9 447 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
6c0f3af7 448
355da1eb
SW
449 ci->i_fragtree = RB_ROOT;
450 mutex_init(&ci->i_fragtree_mutex);
451
452 ci->i_xattrs.blob = NULL;
453 ci->i_xattrs.prealloc_blob = NULL;
454 ci->i_xattrs.dirty = false;
455 ci->i_xattrs.index = RB_ROOT;
456 ci->i_xattrs.count = 0;
457 ci->i_xattrs.names_size = 0;
458 ci->i_xattrs.vals_size = 0;
459 ci->i_xattrs.version = 0;
460 ci->i_xattrs.index_version = 0;
461
462 ci->i_caps = RB_ROOT;
463 ci->i_auth_cap = NULL;
464 ci->i_dirty_caps = 0;
465 ci->i_flushing_caps = 0;
466 INIT_LIST_HEAD(&ci->i_dirty_item);
467 INIT_LIST_HEAD(&ci->i_flushing_item);
f66fd9f0 468 ci->i_prealloc_cap_flush = NULL;
e4500b5e 469 INIT_LIST_HEAD(&ci->i_cap_flush_list);
355da1eb
SW
470 init_waitqueue_head(&ci->i_cap_wq);
471 ci->i_hold_caps_min = 0;
472 ci->i_hold_caps_max = 0;
473 INIT_LIST_HEAD(&ci->i_cap_delay_list);
355da1eb
SW
474 INIT_LIST_HEAD(&ci->i_cap_snaps);
475 ci->i_head_snapc = NULL;
476 ci->i_snap_caps = 0;
477
774a6a11 478 for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
355da1eb
SW
479 ci->i_nr_by_mode[i] = 0;
480
b0d7c223 481 mutex_init(&ci->i_truncate_mutex);
355da1eb
SW
482 ci->i_truncate_seq = 0;
483 ci->i_truncate_size = 0;
484 ci->i_truncate_pending = 0;
485
486 ci->i_max_size = 0;
487 ci->i_reported_size = 0;
488 ci->i_wanted_max_size = 0;
489 ci->i_requested_max_size = 0;
490
491 ci->i_pin_ref = 0;
492 ci->i_rd_ref = 0;
493 ci->i_rdcache_ref = 0;
494 ci->i_wr_ref = 0;
d3d0720d 495 ci->i_wb_ref = 0;
355da1eb
SW
496 ci->i_wrbuffer_ref = 0;
497 ci->i_wrbuffer_ref_head = 0;
498 ci->i_shared_gen = 0;
499 ci->i_rdcache_gen = 0;
500 ci->i_rdcache_revoking = 0;
501
502 INIT_LIST_HEAD(&ci->i_unsafe_writes);
503 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
68cd5b4b 504 INIT_LIST_HEAD(&ci->i_unsafe_iops);
355da1eb
SW
505 spin_lock_init(&ci->i_unsafe_lock);
506
507 ci->i_snap_realm = NULL;
508 INIT_LIST_HEAD(&ci->i_snap_realm_item);
509 INIT_LIST_HEAD(&ci->i_snap_flush_item);
510
3c6f6b79
SW
511 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
512 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
513
514 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
515
99ccbd22
MT
516 ceph_fscache_inode_init(ci);
517
355da1eb
SW
518 return &ci->vfs_inode;
519}
520
fa0d7e3d
NP
521static void ceph_i_callback(struct rcu_head *head)
522{
523 struct inode *inode = container_of(head, struct inode, i_rcu);
524 struct ceph_inode_info *ci = ceph_inode(inode);
525
fa0d7e3d
NP
526 kmem_cache_free(ceph_inode_cachep, ci);
527}
528
355da1eb
SW
529void ceph_destroy_inode(struct inode *inode)
530{
531 struct ceph_inode_info *ci = ceph_inode(inode);
532 struct ceph_inode_frag *frag;
533 struct rb_node *n;
534
535 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
536
99ccbd22
MT
537 ceph_fscache_unregister_inode_cookie(ci);
538
355da1eb
SW
539 ceph_queue_caps_release(inode);
540
8b218b8a
SW
541 /*
542 * we may still have a snap_realm reference if there are stray
d9df2783 543 * caps in i_snap_caps.
8b218b8a
SW
544 */
545 if (ci->i_snap_realm) {
546 struct ceph_mds_client *mdsc =
3d14c5d2 547 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
548 struct ceph_snap_realm *realm = ci->i_snap_realm;
549
550 dout(" dropping residual ref to snap realm %p\n", realm);
551 spin_lock(&realm->inodes_with_caps_lock);
552 list_del_init(&ci->i_snap_realm_item);
553 spin_unlock(&realm->inodes_with_caps_lock);
554 ceph_put_snap_realm(mdsc, realm);
555 }
556
355da1eb
SW
557 kfree(ci->i_symlink);
558 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
559 frag = rb_entry(n, struct ceph_inode_frag, node);
560 rb_erase(n, &ci->i_fragtree);
561 kfree(frag);
562 }
1b1bc16d 563 ci->i_fragtree_nsplits = 0;
355da1eb
SW
564
565 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
566 if (ci->i_xattrs.blob)
567 ceph_buffer_put(ci->i_xattrs.blob);
568 if (ci->i_xattrs.prealloc_blob)
569 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 570
779fe0fb 571 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
30c156d9 572
fa0d7e3d 573 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
574}
575
9f12bd11
YZ
576int ceph_drop_inode(struct inode *inode)
577{
578 /*
579 * Positve dentry and corresponding inode are always accompanied
580 * in MDS reply. So no need to keep inode in the cache after
581 * dropping all its aliases.
582 */
583 return 1;
584}
585
9a5530c6
YZ
586void ceph_evict_inode(struct inode *inode)
587{
588 /* wait unsafe sync writes */
589 ceph_sync_write_wait(inode);
590 truncate_inode_pages_final(&inode->i_data);
591 clear_inode(inode);
592}
593
224a7542
YZ
594static inline blkcnt_t calc_inode_blocks(u64 size)
595{
596 return (size + (1<<9) - 1) >> 9;
597}
598
355da1eb
SW
599/*
600 * Helpers to fill in size, ctime, mtime, and atime. We have to be
601 * careful because either the client or MDS may have more up to date
602 * info, depending on which capabilities are held, and whether
603 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
604 * and size are monotonically increasing, except when utimes() or
605 * truncate() increments the corresponding _seq values.)
606 */
607int ceph_fill_file_size(struct inode *inode, int issued,
608 u32 truncate_seq, u64 truncate_size, u64 size)
609{
610 struct ceph_inode_info *ci = ceph_inode(inode);
611 int queue_trunc = 0;
612
613 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
614 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
615 dout("size %lld -> %llu\n", inode->i_size, size);
a3d714c3
YZ
616 if (size > 0 && S_ISDIR(inode->i_mode)) {
617 pr_err("fill_file_size non-zero size for directory\n");
618 size = 0;
619 }
99c88e69 620 i_size_write(inode, size);
224a7542 621 inode->i_blocks = calc_inode_blocks(size);
355da1eb
SW
622 ci->i_reported_size = size;
623 if (truncate_seq != ci->i_truncate_seq) {
624 dout("truncate_seq %u -> %u\n",
625 ci->i_truncate_seq, truncate_seq);
626 ci->i_truncate_seq = truncate_seq;
b0d7c223
YZ
627
628 /* the MDS should have revoked these caps */
629 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
630 CEPH_CAP_FILE_RD |
631 CEPH_CAP_FILE_WR |
632 CEPH_CAP_FILE_LAZYIO));
3d497d85
YS
633 /*
634 * If we hold relevant caps, or in the case where we're
635 * not the only client referencing this file and we
636 * don't hold those caps, then we need to check whether
637 * the file is either opened or mmaped
638 */
b0d7c223
YZ
639 if ((issued & (CEPH_CAP_FILE_CACHE|
640 CEPH_CAP_FILE_BUFFER)) ||
3d497d85
YS
641 mapping_mapped(inode->i_mapping) ||
642 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
643 ci->i_truncate_pending++;
644 queue_trunc = 1;
645 }
646 }
647 }
648 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
649 ci->i_truncate_size != truncate_size) {
650 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
651 truncate_size);
652 ci->i_truncate_size = truncate_size;
653 }
99ccbd22
MT
654
655 if (queue_trunc)
656 ceph_fscache_invalidate(inode);
657
355da1eb
SW
658 return queue_trunc;
659}
660
661void ceph_fill_file_time(struct inode *inode, int issued,
662 u64 time_warp_seq, struct timespec *ctime,
663 struct timespec *mtime, struct timespec *atime)
664{
665 struct ceph_inode_info *ci = ceph_inode(inode);
666 int warn = 0;
667
668 if (issued & (CEPH_CAP_FILE_EXCL|
669 CEPH_CAP_FILE_WR|
d8672d64
SW
670 CEPH_CAP_FILE_BUFFER|
671 CEPH_CAP_AUTH_EXCL|
672 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
673 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
674 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
675 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
676 ctime->tv_sec, ctime->tv_nsec);
677 inode->i_ctime = *ctime;
678 }
679 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
680 /* the MDS did a utimes() */
681 dout("mtime %ld.%09ld -> %ld.%09ld "
682 "tw %d -> %d\n",
683 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
684 mtime->tv_sec, mtime->tv_nsec,
685 ci->i_time_warp_seq, (int)time_warp_seq);
686
687 inode->i_mtime = *mtime;
688 inode->i_atime = *atime;
689 ci->i_time_warp_seq = time_warp_seq;
690 } else if (time_warp_seq == ci->i_time_warp_seq) {
691 /* nobody did utimes(); take the max */
692 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
693 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
694 inode->i_mtime.tv_sec,
695 inode->i_mtime.tv_nsec,
696 mtime->tv_sec, mtime->tv_nsec);
697 inode->i_mtime = *mtime;
698 }
699 if (timespec_compare(atime, &inode->i_atime) > 0) {
700 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
701 inode->i_atime.tv_sec,
702 inode->i_atime.tv_nsec,
703 atime->tv_sec, atime->tv_nsec);
704 inode->i_atime = *atime;
705 }
706 } else if (issued & CEPH_CAP_FILE_EXCL) {
707 /* we did a utimes(); ignore mds values */
708 } else {
709 warn = 1;
710 }
711 } else {
d8672d64 712 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
713 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
714 inode->i_ctime = *ctime;
715 inode->i_mtime = *mtime;
716 inode->i_atime = *atime;
717 ci->i_time_warp_seq = time_warp_seq;
718 } else {
719 warn = 1;
720 }
721 }
722 if (warn) /* time_warp_seq shouldn't go backwards */
723 dout("%p mds time_warp_seq %llu < %u\n",
724 inode, time_warp_seq, ci->i_time_warp_seq);
725}
726
727/*
728 * Populate an inode based on info from mds. May be called on new or
729 * existing inodes.
730 */
01deead0 731static int fill_inode(struct inode *inode, struct page *locked_page,
355da1eb
SW
732 struct ceph_mds_reply_info_in *iinfo,
733 struct ceph_mds_reply_dirfrag *dirinfo,
734 struct ceph_mds_session *session,
735 unsigned long ttl_from, int cap_fmode,
736 struct ceph_cap_reservation *caps_reservation)
737{
d9df2783 738 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
355da1eb
SW
739 struct ceph_mds_reply_inode *info = iinfo->in;
740 struct ceph_inode_info *ci = ceph_inode(inode);
f98a128a 741 int issued = 0, implemented, new_issued;
355da1eb 742 struct timespec mtime, atime, ctime;
355da1eb 743 struct ceph_buffer *xattr_blob = NULL;
779fe0fb 744 struct ceph_string *pool_ns = NULL;
d9df2783 745 struct ceph_cap *new_cap = NULL;
355da1eb 746 int err = 0;
d9df2783 747 bool wake = false;
f98a128a
YZ
748 bool queue_trunc = false;
749 bool new_version = false;
31c542a1 750 bool fill_inline = false;
355da1eb
SW
751
752 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
753 inode, ceph_vinop(inode), le64_to_cpu(info->version),
754 ci->i_version);
755
d9df2783
YZ
756 /* prealloc new cap struct */
757 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
758 new_cap = ceph_get_cap(mdsc, caps_reservation);
759
355da1eb
SW
760 /*
761 * prealloc xattr data, if it looks like we'll need it. only
762 * if len > 4 (meaning there are actually xattrs; the first 4
763 * bytes are the xattr count).
764 */
765 if (iinfo->xattr_len > 4) {
b6c1d5b8 766 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
767 if (!xattr_blob)
768 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
769 iinfo->xattr_len);
770 }
771
779fe0fb
YZ
772 if (iinfo->pool_ns_len > 0)
773 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
774 iinfo->pool_ns_len);
775
be655596 776 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
777
778 /*
779 * provided version will be odd if inode value is projected,
8bd59e01
SW
780 * even if stable. skip the update if we have newer stable
781 * info (ours>=theirs, e.g. due to racing mds replies), unless
782 * we are getting projected (unstable) info (in which case the
783 * version is odd, and we want ours>theirs).
784 * us them
785 * 2 2 skip
786 * 3 2 skip
787 * 3 3 update
355da1eb 788 */
f98a128a
YZ
789 if (ci->i_version == 0 ||
790 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
791 le64_to_cpu(info->version) > (ci->i_version & ~1)))
792 new_version = true;
793
355da1eb
SW
794 issued = __ceph_caps_issued(ci, &implemented);
795 issued |= implemented | __ceph_caps_dirty(ci);
f98a128a 796 new_issued = ~issued & le32_to_cpu(info->cap.caps);
355da1eb
SW
797
798 /* update inode */
799 ci->i_version = le64_to_cpu(info->version);
800 inode->i_version++;
801 inode->i_rdev = le32_to_cpu(info->rdev);
f98a128a 802 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
355da1eb 803
f98a128a
YZ
804 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
805 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
355da1eb 806 inode->i_mode = le32_to_cpu(info->mode);
ab871b90
EB
807 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
808 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
355da1eb 809 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
810 from_kuid(&init_user_ns, inode->i_uid),
811 from_kgid(&init_user_ns, inode->i_gid));
355da1eb
SW
812 }
813
f98a128a
YZ
814 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
815 (issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 816 set_nlink(inode, le32_to_cpu(info->nlink));
355da1eb 817
f98a128a
YZ
818 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
819 /* be careful with mtime, atime, size */
820 ceph_decode_timespec(&atime, &info->atime);
821 ceph_decode_timespec(&mtime, &info->mtime);
822 ceph_decode_timespec(&ctime, &info->ctime);
823 ceph_fill_file_time(inode, issued,
824 le32_to_cpu(info->time_warp_seq),
825 &ctime, &mtime, &atime);
826 }
827
828 if (new_version ||
829 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
7627151e 830 s64 old_pool = ci->i_layout.pool_id;
779fe0fb
YZ
831 struct ceph_string *old_ns;
832
7627151e 833 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
779fe0fb
YZ
834 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
835 lockdep_is_held(&ci->i_ceph_lock));
836 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
837
838 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
10183a69 839 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
10183a69 840
779fe0fb 841 pool_ns = old_ns;
10183a69 842
f98a128a
YZ
843 queue_trunc = ceph_fill_file_size(inode, issued,
844 le32_to_cpu(info->truncate_seq),
845 le64_to_cpu(info->truncate_size),
846 le64_to_cpu(info->size));
847 /* only update max_size on auth cap */
848 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
849 ci->i_max_size != le64_to_cpu(info->max_size)) {
850 dout("max_size %lld -> %llu\n", ci->i_max_size,
851 le64_to_cpu(info->max_size));
852 ci->i_max_size = le64_to_cpu(info->max_size);
853 }
854 }
355da1eb
SW
855
856 /* xattrs */
857 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
508b32d8 858 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
355da1eb
SW
859 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
860 if (ci->i_xattrs.blob)
861 ceph_buffer_put(ci->i_xattrs.blob);
862 ci->i_xattrs.blob = xattr_blob;
863 if (xattr_blob)
864 memcpy(ci->i_xattrs.blob->vec.iov_base,
865 iinfo->xattr_data, iinfo->xattr_len);
866 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
7221fe4c 867 ceph_forget_all_cached_acls(inode);
a6424e48 868 xattr_blob = NULL;
355da1eb
SW
869 }
870
871 inode->i_mapping->a_ops = &ceph_aops;
355da1eb
SW
872
873 switch (inode->i_mode & S_IFMT) {
874 case S_IFIFO:
875 case S_IFBLK:
876 case S_IFCHR:
877 case S_IFSOCK:
878 init_special_inode(inode, inode->i_mode, inode->i_rdev);
879 inode->i_op = &ceph_file_iops;
880 break;
881 case S_IFREG:
882 inode->i_op = &ceph_file_iops;
883 inode->i_fop = &ceph_file_fops;
884 break;
885 case S_IFLNK:
886 inode->i_op = &ceph_symlink_iops;
887 if (!ci->i_symlink) {
810339ec 888 u32 symlen = iinfo->symlink_len;
355da1eb
SW
889 char *sym;
890
be655596 891 spin_unlock(&ci->i_ceph_lock);
355da1eb 892
224a7542
YZ
893 if (symlen != i_size_read(inode)) {
894 pr_err("fill_inode %llx.%llx BAD symlink "
895 "size %lld\n", ceph_vinop(inode),
896 i_size_read(inode));
897 i_size_write(inode, symlen);
898 inode->i_blocks = calc_inode_blocks(symlen);
899 }
810339ec 900
355da1eb 901 err = -ENOMEM;
810339ec 902 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
355da1eb
SW
903 if (!sym)
904 goto out;
355da1eb 905
be655596 906 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
907 if (!ci->i_symlink)
908 ci->i_symlink = sym;
909 else
910 kfree(sym); /* lost a race */
911 }
ac194dcc 912 inode->i_link = ci->i_symlink;
355da1eb
SW
913 break;
914 case S_IFDIR:
915 inode->i_op = &ceph_dir_iops;
916 inode->i_fop = &ceph_dir_fops;
917
14303d20
SW
918 ci->i_dir_layout = iinfo->dir_layout;
919
355da1eb
SW
920 ci->i_files = le64_to_cpu(info->files);
921 ci->i_subdirs = le64_to_cpu(info->subdirs);
922 ci->i_rbytes = le64_to_cpu(info->rbytes);
923 ci->i_rfiles = le64_to_cpu(info->rfiles);
924 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
925 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
926 break;
927 default:
928 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
929 ceph_vinop(inode), inode->i_mode);
930 }
931
355da1eb
SW
932 /* were we issued a capability? */
933 if (info->cap.caps) {
934 if (ceph_snap(inode) == CEPH_NOSNAP) {
2f92b3d0 935 unsigned caps = le32_to_cpu(info->cap.caps);
355da1eb
SW
936 ceph_add_cap(inode, session,
937 le64_to_cpu(info->cap.cap_id),
2f92b3d0 938 cap_fmode, caps,
355da1eb
SW
939 le32_to_cpu(info->cap.wanted),
940 le32_to_cpu(info->cap.seq),
941 le32_to_cpu(info->cap.mseq),
942 le64_to_cpu(info->cap.realm),
d9df2783 943 info->cap.flags, &new_cap);
2f92b3d0
YZ
944
945 /* set dir completion flag? */
946 if (S_ISDIR(inode->i_mode) &&
947 ci->i_files == 0 && ci->i_subdirs == 0 &&
948 (caps & CEPH_CAP_FILE_SHARED) &&
949 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
950 !__ceph_dir_is_complete(ci)) {
951 dout(" marking %p complete (empty)\n", inode);
fdd4e158 952 i_size_write(inode, 0);
2f92b3d0 953 __ceph_dir_set_complete(ci,
fdd4e158
YZ
954 atomic64_read(&ci->i_release_count),
955 atomic64_read(&ci->i_ordered_count));
2f92b3d0
YZ
956 }
957
d9df2783 958 wake = true;
355da1eb 959 } else {
355da1eb
SW
960 dout(" %p got snap_caps %s\n", inode,
961 ceph_cap_string(le32_to_cpu(info->cap.caps)));
962 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
963 if (cap_fmode >= 0)
964 __ceph_get_fmode(ci, cap_fmode);
355da1eb 965 }
04d000eb 966 } else if (cap_fmode >= 0) {
f3ae1b97 967 pr_warn("mds issued no caps on %llx.%llx\n",
04d000eb
SW
968 ceph_vinop(inode));
969 __ceph_get_fmode(ci, cap_fmode);
355da1eb 970 }
31c542a1
YZ
971
972 if (iinfo->inline_version > 0 &&
973 iinfo->inline_version >= ci->i_inline_version) {
974 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
975 ci->i_inline_version = iinfo->inline_version;
976 if (ci->i_inline_version != CEPH_INLINE_NONE &&
01deead0
YZ
977 (locked_page ||
978 (le32_to_cpu(info->cap.caps) & cache_caps)))
31c542a1
YZ
979 fill_inline = true;
980 }
981
be655596 982 spin_unlock(&ci->i_ceph_lock);
355da1eb 983
31c542a1 984 if (fill_inline)
01deead0 985 ceph_fill_inline_data(inode, locked_page,
31c542a1
YZ
986 iinfo->inline_data, iinfo->inline_len);
987
d9df2783
YZ
988 if (wake)
989 wake_up_all(&ci->i_cap_wq);
990
355da1eb
SW
991 /* queue truncate if we saw i_size decrease */
992 if (queue_trunc)
3c6f6b79 993 ceph_queue_vmtruncate(inode);
355da1eb
SW
994
995 /* populate frag tree */
3e7fbe9c
YZ
996 if (S_ISDIR(inode->i_mode))
997 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
355da1eb
SW
998
999 /* update delegation info? */
1000 if (dirinfo)
1001 ceph_fill_dirfrag(inode, dirinfo);
1002
1003 err = 0;
355da1eb 1004out:
d9df2783
YZ
1005 if (new_cap)
1006 ceph_put_cap(mdsc, new_cap);
b6c1d5b8
SW
1007 if (xattr_blob)
1008 ceph_buffer_put(xattr_blob);
779fe0fb 1009 ceph_put_string(pool_ns);
355da1eb
SW
1010 return err;
1011}
1012
1013/*
1014 * caller should hold session s_mutex.
1015 */
1016static void update_dentry_lease(struct dentry *dentry,
1017 struct ceph_mds_reply_lease *lease,
1018 struct ceph_mds_session *session,
1019 unsigned long from_time)
1020{
1021 struct ceph_dentry_info *di = ceph_dentry(dentry);
1022 long unsigned duration = le32_to_cpu(lease->duration_ms);
1023 long unsigned ttl = from_time + (duration * HZ) / 1000;
1024 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1025 struct inode *dir;
1026
355da1eb 1027 spin_lock(&dentry->d_lock);
2f90b852
SW
1028 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1029 dentry, duration, ttl);
355da1eb
SW
1030
1031 /* make lease_rdcache_gen match directory */
2b0143b5 1032 dir = d_inode(dentry->d_parent);
18fc8abd
AV
1033
1034 /* only track leases on regular dentries */
1035 if (ceph_snap(dir) != CEPH_NOSNAP)
1036 goto out_unlock;
1037
355da1eb
SW
1038 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
1039
2f90b852 1040 if (duration == 0)
355da1eb
SW
1041 goto out_unlock;
1042
1043 if (di->lease_gen == session->s_cap_gen &&
9b16f03c 1044 time_before(ttl, di->time))
355da1eb
SW
1045 goto out_unlock; /* we already have a newer lease. */
1046
1047 if (di->lease_session && di->lease_session != session)
1048 goto out_unlock;
1049
1050 ceph_dentry_lru_touch(dentry);
1051
1052 if (!di->lease_session)
1053 di->lease_session = ceph_get_mds_session(session);
1054 di->lease_gen = session->s_cap_gen;
1055 di->lease_seq = le32_to_cpu(lease->seq);
1056 di->lease_renew_after = half_ttl;
1057 di->lease_renew_from = 0;
9b16f03c 1058 di->time = ttl;
355da1eb
SW
1059out_unlock:
1060 spin_unlock(&dentry->d_lock);
1061 return;
1062}
1063
1064/*
1065 * splice a dentry to an inode.
1066 * caller must hold directory i_mutex for this to be safe.
355da1eb 1067 */
f7380af0 1068static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
355da1eb
SW
1069{
1070 struct dentry *realdn;
1071
2b0143b5 1072 BUG_ON(d_inode(dn));
1cd3935b 1073
355da1eb
SW
1074 /* dn must be unhashed */
1075 if (!d_unhashed(dn))
1076 d_drop(dn);
41d28bca 1077 realdn = d_splice_alias(in, dn);
355da1eb 1078 if (IS_ERR(realdn)) {
d69ed05a
SW
1079 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1080 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
1081 dn = realdn; /* note realdn contains the error */
1082 goto out;
1083 } else if (realdn) {
1084 dout("dn %p (%d) spliced with %p (%d) "
1085 "inode %p ino %llx.%llx\n",
84d08fa8
AV
1086 dn, d_count(dn),
1087 realdn, d_count(realdn),
2b0143b5 1088 d_inode(realdn), ceph_vinop(d_inode(realdn)));
355da1eb
SW
1089 dput(dn);
1090 dn = realdn;
1091 } else {
1092 BUG_ON(!ceph_dentry(dn));
355da1eb 1093 dout("dn %p attached to %p ino %llx.%llx\n",
2b0143b5 1094 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
355da1eb 1095 }
355da1eb
SW
1096out:
1097 return dn;
1098}
1099
1100/*
1101 * Incorporate results into the local cache. This is either just
1102 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1103 * after a lookup).
1104 *
1105 * A reply may contain
1106 * a directory inode along with a dentry.
1107 * and/or a target inode
1108 *
1109 * Called with snap_rwsem (read).
1110 */
1111int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1112 struct ceph_mds_session *session)
1113{
1114 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1115 struct inode *in = NULL;
355da1eb 1116 struct ceph_vino vino;
3d14c5d2 1117 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
1118 int err = 0;
1119
1120 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1121 rinfo->head->is_dentry, rinfo->head->is_target);
1122
1123#if 0
1124 /*
1125 * Debugging hook:
1126 *
1127 * If we resend completed ops to a recovering mds, we get no
1128 * trace. Since that is very rare, pretend this is the case
1129 * to ensure the 'no trace' handlers in the callers behave.
1130 *
1131 * Fill in inodes unconditionally to avoid breaking cap
1132 * invariants.
1133 */
1134 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1135 pr_info("fill_trace faking empty trace on %lld %s\n",
1136 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1137 if (rinfo->head->is_dentry) {
1138 rinfo->head->is_dentry = 0;
1139 err = fill_inode(req->r_locked_dir,
1140 &rinfo->diri, rinfo->dirfrag,
1141 session, req->r_request_started, -1);
1142 }
1143 if (rinfo->head->is_target) {
1144 rinfo->head->is_target = 0;
1145 ininfo = rinfo->targeti.in;
1146 vino.ino = le64_to_cpu(ininfo->ino);
1147 vino.snap = le64_to_cpu(ininfo->snapid);
1148 in = ceph_get_inode(sb, vino);
1149 err = fill_inode(in, &rinfo->targeti, NULL,
1150 session, req->r_request_started,
1151 req->r_fmode);
1152 iput(in);
1153 }
1154 }
1155#endif
1156
1157 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1158 dout("fill_trace reply is empty!\n");
167c9e35
SW
1159 if (rinfo->head->result == 0 && req->r_locked_dir)
1160 ceph_invalidate_dir_request(req);
355da1eb
SW
1161 return 0;
1162 }
1163
1164 if (rinfo->head->is_dentry) {
5b1daecd
SW
1165 struct inode *dir = req->r_locked_dir;
1166
6c5e50fa 1167 if (dir) {
01deead0
YZ
1168 err = fill_inode(dir, NULL,
1169 &rinfo->diri, rinfo->dirfrag,
6c5e50fa
SW
1170 session, req->r_request_started, -1,
1171 &req->r_caps_reservation);
1172 if (err < 0)
19913b4e 1173 goto done;
6c5e50fa
SW
1174 } else {
1175 WARN_ON_ONCE(1);
1176 }
19913b4e
YZ
1177
1178 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1179 struct qstr dname;
1180 struct dentry *dn, *parent;
1181
1182 BUG_ON(!rinfo->head->is_target);
1183 BUG_ON(req->r_dentry);
1184
1185 parent = d_find_any_alias(dir);
1186 BUG_ON(!parent);
1187
1188 dname.name = rinfo->dname;
1189 dname.len = rinfo->dname_len;
8387ff25 1190 dname.hash = full_name_hash(parent, dname.name, dname.len);
19913b4e
YZ
1191 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1192 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1193retry_lookup:
1194 dn = d_lookup(parent, &dname);
1195 dout("d_lookup on parent=%p name=%.*s got %p\n",
1196 parent, dname.len, dname.name, dn);
1197
1198 if (!dn) {
1199 dn = d_alloc(parent, &dname);
1200 dout("d_alloc %p '%.*s' = %p\n", parent,
1201 dname.len, dname.name, dn);
1202 if (dn == NULL) {
1203 dput(parent);
1204 err = -ENOMEM;
1205 goto done;
1206 }
ad5cb123 1207 err = 0;
2b0143b5
DH
1208 } else if (d_really_is_positive(dn) &&
1209 (ceph_ino(d_inode(dn)) != vino.ino ||
1210 ceph_snap(d_inode(dn)) != vino.snap)) {
19913b4e 1211 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1212 dn, d_inode(dn));
19913b4e
YZ
1213 d_delete(dn);
1214 dput(dn);
1215 goto retry_lookup;
1216 }
1217
1218 req->r_dentry = dn;
1219 dput(parent);
1220 }
5b1daecd
SW
1221 }
1222
86b58d13
YZ
1223 if (rinfo->head->is_target) {
1224 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1225 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1226
1227 in = ceph_get_inode(sb, vino);
1228 if (IS_ERR(in)) {
1229 err = PTR_ERR(in);
1230 goto done;
1231 }
1232 req->r_target_inode = in;
1233
01deead0 1234 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
86b58d13 1235 session, req->r_request_started,
48193012 1236 (!req->r_aborted && rinfo->head->result == 0) ?
86b58d13
YZ
1237 req->r_fmode : -1,
1238 &req->r_caps_reservation);
1239 if (err < 0) {
1240 pr_err("fill_inode badness %p %llx.%llx\n",
1241 in, ceph_vinop(in));
1242 goto done;
1243 }
1244 }
1245
9358c6d4
SW
1246 /*
1247 * ignore null lease/binding on snapdir ENOENT, or else we
1248 * will have trouble splicing in the virtual snapdir later
1249 */
1250 if (rinfo->head->is_dentry && !req->r_aborted &&
6c5e50fa 1251 req->r_locked_dir &&
9358c6d4 1252 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1253 fsc->mount_options->snapdir_name,
9358c6d4 1254 req->r_dentry->d_name.len))) {
355da1eb
SW
1255 /*
1256 * lookup link rename : null -> possibly existing inode
1257 * mknod symlink mkdir : null -> new inode
1258 * unlink : linked -> null
1259 */
1260 struct inode *dir = req->r_locked_dir;
1261 struct dentry *dn = req->r_dentry;
1262 bool have_dir_cap, have_lease;
1263
1264 BUG_ON(!dn);
1265 BUG_ON(!dir);
2b0143b5 1266 BUG_ON(d_inode(dn->d_parent) != dir);
355da1eb
SW
1267 BUG_ON(ceph_ino(dir) !=
1268 le64_to_cpu(rinfo->diri.in->ino));
1269 BUG_ON(ceph_snap(dir) !=
1270 le64_to_cpu(rinfo->diri.in->snapid));
1271
355da1eb
SW
1272 /* do we have a lease on the whole dir? */
1273 have_dir_cap =
1274 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1275 CEPH_CAP_FILE_SHARED);
1276
1277 /* do we have a dn lease? */
1278 have_lease = have_dir_cap ||
2f90b852 1279 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1280 if (!have_lease)
1281 dout("fill_trace no dentry lease or dir cap\n");
1282
1283 /* rename? */
1284 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
0a8a70f9
YZ
1285 struct inode *olddir = req->r_old_dentry_dir;
1286 BUG_ON(!olddir);
1287
a455589f 1288 dout(" src %p '%pd' dst %p '%pd'\n",
355da1eb 1289 req->r_old_dentry,
a455589f
AV
1290 req->r_old_dentry,
1291 dn, dn);
355da1eb
SW
1292 dout("fill_trace doing d_move %p -> %p\n",
1293 req->r_old_dentry, dn);
c10f5e12 1294
fdd4e158
YZ
1295 /* d_move screws up sibling dentries' offsets */
1296 ceph_dir_clear_ordered(dir);
1297 ceph_dir_clear_ordered(olddir);
1298
355da1eb 1299 d_move(req->r_old_dentry, dn);
a455589f
AV
1300 dout(" src %p '%pd' dst %p '%pd'\n",
1301 req->r_old_dentry,
355da1eb 1302 req->r_old_dentry,
a455589f 1303 dn, dn);
81a6cf2d 1304
c4a29f26
SW
1305 /* ensure target dentry is invalidated, despite
1306 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1307 ceph_invalidate_dentry_lease(dn);
1308
99ccbd22 1309 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1310 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1311
355da1eb 1312 dn = req->r_old_dentry; /* use old_dentry */
355da1eb
SW
1313 }
1314
1315 /* null dentry? */
1316 if (!rinfo->head->is_target) {
1317 dout("fill_trace null dentry\n");
2b0143b5 1318 if (d_really_is_positive(dn)) {
70db4f36 1319 ceph_dir_clear_ordered(dir);
355da1eb
SW
1320 dout("d_delete %p\n", dn);
1321 d_delete(dn);
1322 } else {
355da1eb 1323 if (have_lease && d_unhashed(dn))
f8b31710 1324 d_add(dn, NULL);
355da1eb
SW
1325 update_dentry_lease(dn, rinfo->dlease,
1326 session,
1327 req->r_request_started);
1328 }
1329 goto done;
1330 }
1331
1332 /* attach proper inode */
2b0143b5 1333 if (d_really_is_negative(dn)) {
70db4f36 1334 ceph_dir_clear_ordered(dir);
86b58d13 1335 ihold(in);
f7380af0 1336 dn = splice_dentry(dn, in);
355da1eb
SW
1337 if (IS_ERR(dn)) {
1338 err = PTR_ERR(dn);
1339 goto done;
1340 }
1341 req->r_dentry = dn; /* may have spliced */
2b0143b5 1342 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
355da1eb 1343 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
2b0143b5 1344 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
86b58d13 1345 ceph_vinop(in));
200fd27c 1346 d_invalidate(dn);
355da1eb 1347 have_lease = false;
355da1eb
SW
1348 }
1349
1350 if (have_lease)
1351 update_dentry_lease(dn, rinfo->dlease, session,
1352 req->r_request_started);
1353 dout(" final dn %p\n", dn);
86b58d13
YZ
1354 } else if (!req->r_aborted &&
1355 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1356 req->r_op == CEPH_MDS_OP_MKSNAP)) {
355da1eb 1357 struct dentry *dn = req->r_dentry;
0a8a70f9 1358 struct inode *dir = req->r_locked_dir;
355da1eb
SW
1359
1360 /* fill out a snapdir LOOKUPSNAP dentry */
1361 BUG_ON(!dn);
0a8a70f9
YZ
1362 BUG_ON(!dir);
1363 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
355da1eb 1364 dout(" linking snapped dir %p to dn %p\n", in, dn);
70db4f36 1365 ceph_dir_clear_ordered(dir);
86b58d13 1366 ihold(in);
f7380af0 1367 dn = splice_dentry(dn, in);
355da1eb
SW
1368 if (IS_ERR(dn)) {
1369 err = PTR_ERR(dn);
1370 goto done;
1371 }
1372 req->r_dentry = dn; /* may have spliced */
355da1eb 1373 }
355da1eb
SW
1374done:
1375 dout("fill_trace done err=%d\n", err);
1376 return err;
1377}
1378
1379/*
1380 * Prepopulate our cache with readdir results, leases, etc.
1381 */
79f9f99a
SW
1382static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1383 struct ceph_mds_session *session)
1384{
1385 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1386 int i, err = 0;
1387
1388 for (i = 0; i < rinfo->dir_nr; i++) {
2a5beea3 1389 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
79f9f99a
SW
1390 struct ceph_vino vino;
1391 struct inode *in;
1392 int rc;
1393
2a5beea3
YZ
1394 vino.ino = le64_to_cpu(rde->inode.in->ino);
1395 vino.snap = le64_to_cpu(rde->inode.in->snapid);
79f9f99a
SW
1396
1397 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1398 if (IS_ERR(in)) {
1399 err = PTR_ERR(in);
1400 dout("new_inode badness got %d\n", err);
1401 continue;
1402 }
2a5beea3 1403 rc = fill_inode(in, NULL, &rde->inode, NULL, session,
79f9f99a
SW
1404 req->r_request_started, -1,
1405 &req->r_caps_reservation);
1406 if (rc < 0) {
1407 pr_err("fill_inode badness on %p got %d\n", in, rc);
1408 err = rc;
79f9f99a 1409 }
209ae762 1410 iput(in);
79f9f99a
SW
1411 }
1412
1413 return err;
1414}
1415
fdd4e158
YZ
1416void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1417{
1418 if (ctl->page) {
1419 kunmap(ctl->page);
09cbfeaf 1420 put_page(ctl->page);
fdd4e158
YZ
1421 ctl->page = NULL;
1422 }
1423}
1424
1425static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1426 struct ceph_readdir_cache_control *ctl,
1427 struct ceph_mds_request *req)
1428{
1429 struct ceph_inode_info *ci = ceph_inode(dir);
09cbfeaf 1430 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
fdd4e158
YZ
1431 unsigned idx = ctl->index % nsize;
1432 pgoff_t pgoff = ctl->index / nsize;
1433
1434 if (!ctl->page || pgoff != page_index(ctl->page)) {
1435 ceph_readdir_cache_release(ctl);
af5e5eb5
YZ
1436 if (idx == 0)
1437 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1438 else
1439 ctl->page = find_lock_page(&dir->i_data, pgoff);
fdd4e158
YZ
1440 if (!ctl->page) {
1441 ctl->index = -1;
af5e5eb5 1442 return idx == 0 ? -ENOMEM : 0;
fdd4e158
YZ
1443 }
1444 /* reading/filling the cache are serialized by
1445 * i_mutex, no need to use page lock */
1446 unlock_page(ctl->page);
1447 ctl->dentries = kmap(ctl->page);
af5e5eb5 1448 if (idx == 0)
09cbfeaf 1449 memset(ctl->dentries, 0, PAGE_SIZE);
fdd4e158
YZ
1450 }
1451
1452 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1453 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1454 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1455 ctl->dentries[idx] = dn;
1456 ctl->index++;
1457 } else {
1458 dout("disable readdir cache\n");
1459 ctl->index = -1;
1460 }
1461 return 0;
1462}
1463
355da1eb
SW
1464int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1465 struct ceph_mds_session *session)
1466{
1467 struct dentry *parent = req->r_dentry;
f3c4ebe6 1468 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
355da1eb
SW
1469 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1470 struct qstr dname;
1471 struct dentry *dn;
1472 struct inode *in;
315f2408 1473 int err = 0, skipped = 0, ret, i;
355da1eb
SW
1474 struct inode *snapdir = NULL;
1475 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
81c6aea5 1476 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
f3c4ebe6
YZ
1477 u32 last_hash = 0;
1478 u32 fpos_offset;
fdd4e158
YZ
1479 struct ceph_readdir_cache_control cache_ctl = {};
1480
1481 if (req->r_aborted)
1482 return readdir_prepopulate_inodes_only(req, session);
81c6aea5 1483
f3c4ebe6
YZ
1484 if (rinfo->hash_order && req->r_path2) {
1485 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1486 req->r_path2, strlen(req->r_path2));
1487 last_hash = ceph_frag_value(last_hash);
1488 }
1489
81c6aea5
YZ
1490 if (rinfo->dir_dir &&
1491 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1492 dout("readdir_prepopulate got new frag %x -> %x\n",
1493 frag, le32_to_cpu(rinfo->dir_dir->frag));
1494 frag = le32_to_cpu(rinfo->dir_dir->frag);
f3c4ebe6 1495 if (!rinfo->hash_order)
fdd4e158 1496 req->r_readdir_offset = 2;
81c6aea5 1497 }
355da1eb
SW
1498
1499 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
2b0143b5 1500 snapdir = ceph_get_snapdir(d_inode(parent));
355da1eb
SW
1501 parent = d_find_alias(snapdir);
1502 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1503 rinfo->dir_nr, parent);
1504 } else {
1505 dout("readdir_prepopulate %d items under dn %p\n",
1506 rinfo->dir_nr, parent);
1507 if (rinfo->dir_dir)
2b0143b5 1508 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
355da1eb
SW
1509 }
1510
f72f9455
YZ
1511 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
1512 !(rinfo->hash_order && req->r_path2)) {
fdd4e158
YZ
1513 /* note dir version at start of readdir so we can tell
1514 * if any dentries get dropped */
fdd4e158
YZ
1515 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1516 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1517 req->r_readdir_cache_idx = 0;
1518 }
1519
1520 cache_ctl.index = req->r_readdir_cache_idx;
f3c4ebe6 1521 fpos_offset = req->r_readdir_offset;
fdd4e158 1522
86b58d13 1523 /* FIXME: release caps/leases if error occurs */
355da1eb 1524 for (i = 0; i < rinfo->dir_nr; i++) {
2a5beea3 1525 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
355da1eb
SW
1526 struct ceph_vino vino;
1527
2a5beea3
YZ
1528 dname.name = rde->name;
1529 dname.len = rde->name_len;
8387ff25 1530 dname.hash = full_name_hash(parent, dname.name, dname.len);
355da1eb 1531
2a5beea3
YZ
1532 vino.ino = le64_to_cpu(rde->inode.in->ino);
1533 vino.snap = le64_to_cpu(rde->inode.in->snapid);
355da1eb 1534
f3c4ebe6
YZ
1535 if (rinfo->hash_order) {
1536 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1537 rde->name, rde->name_len);
1538 hash = ceph_frag_value(hash);
1539 if (hash != last_hash)
1540 fpos_offset = 2;
1541 last_hash = hash;
1542 rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1543 } else {
1544 rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1545 }
355da1eb
SW
1546
1547retry_lookup:
1548 dn = d_lookup(parent, &dname);
1549 dout("d_lookup on parent=%p name=%.*s got %p\n",
1550 parent, dname.len, dname.name, dn);
1551
1552 if (!dn) {
1553 dn = d_alloc(parent, &dname);
1554 dout("d_alloc %p '%.*s' = %p\n", parent,
1555 dname.len, dname.name, dn);
1556 if (dn == NULL) {
1557 dout("d_alloc badness\n");
1558 err = -ENOMEM;
1559 goto out;
1560 }
2b0143b5
DH
1561 } else if (d_really_is_positive(dn) &&
1562 (ceph_ino(d_inode(dn)) != vino.ino ||
1563 ceph_snap(d_inode(dn)) != vino.snap)) {
355da1eb 1564 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1565 dn, d_inode(dn));
355da1eb
SW
1566 d_delete(dn);
1567 dput(dn);
1568 goto retry_lookup;
355da1eb
SW
1569 }
1570
355da1eb 1571 /* inode */
2b0143b5
DH
1572 if (d_really_is_positive(dn)) {
1573 in = d_inode(dn);
355da1eb
SW
1574 } else {
1575 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1576 if (IS_ERR(in)) {
355da1eb 1577 dout("new_inode badness\n");
2744c171 1578 d_drop(dn);
355da1eb 1579 dput(dn);
ac1f12ef 1580 err = PTR_ERR(in);
355da1eb
SW
1581 goto out;
1582 }
355da1eb
SW
1583 }
1584
2a5beea3 1585 ret = fill_inode(in, NULL, &rde->inode, NULL, session,
fdd4e158
YZ
1586 req->r_request_started, -1,
1587 &req->r_caps_reservation);
1588 if (ret < 0) {
355da1eb 1589 pr_err("fill_inode badness on %p\n", in);
2b0143b5 1590 if (d_really_is_negative(dn))
86b58d13
YZ
1591 iput(in);
1592 d_drop(dn);
fdd4e158 1593 err = ret;
d69ed05a 1594 goto next_item;
355da1eb 1595 }
86b58d13 1596
2b0143b5 1597 if (d_really_is_negative(dn)) {
315f2408
YZ
1598 struct dentry *realdn;
1599
1600 if (ceph_security_xattr_deadlock(in)) {
1601 dout(" skip splicing dn %p to inode %p"
1602 " (security xattr deadlock)\n", dn, in);
1603 iput(in);
1604 skipped++;
1605 goto next_item;
1606 }
1607
1608 realdn = splice_dentry(dn, in);
5cba372c
YZ
1609 if (IS_ERR(realdn)) {
1610 err = PTR_ERR(realdn);
1611 d_drop(dn);
86b58d13
YZ
1612 dn = NULL;
1613 goto next_item;
1614 }
5cba372c 1615 dn = realdn;
86b58d13
YZ
1616 }
1617
f3c4ebe6 1618 ceph_dentry(dn)->offset = rde->offset;
86b58d13 1619
2a5beea3 1620 update_dentry_lease(dn, rde->lease, req->r_session,
86b58d13 1621 req->r_request_started);
fdd4e158 1622
315f2408 1623 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
fdd4e158
YZ
1624 ret = fill_readdir_cache(d_inode(parent), dn,
1625 &cache_ctl, req);
1626 if (ret < 0)
1627 err = ret;
1628 }
d69ed05a
SW
1629next_item:
1630 if (dn)
1631 dput(dn);
355da1eb 1632 }
355da1eb 1633out:
315f2408 1634 if (err == 0 && skipped == 0) {
fdd4e158
YZ
1635 req->r_did_prepopulate = true;
1636 req->r_readdir_cache_idx = cache_ctl.index;
1637 }
1638 ceph_readdir_cache_release(&cache_ctl);
355da1eb
SW
1639 if (snapdir) {
1640 iput(snapdir);
1641 dput(parent);
1642 }
1643 dout("readdir_prepopulate done\n");
1644 return err;
1645}
1646
1647int ceph_inode_set_size(struct inode *inode, loff_t size)
1648{
1649 struct ceph_inode_info *ci = ceph_inode(inode);
1650 int ret = 0;
1651
be655596 1652 spin_lock(&ci->i_ceph_lock);
355da1eb 1653 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
99c88e69 1654 i_size_write(inode, size);
224a7542 1655 inode->i_blocks = calc_inode_blocks(size);
355da1eb
SW
1656
1657 /* tell the MDS if we are approaching max_size */
1658 if ((size << 1) >= ci->i_max_size &&
1659 (ci->i_reported_size << 1) < ci->i_max_size)
1660 ret = 1;
1661
be655596 1662 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1663 return ret;
1664}
1665
1666/*
1667 * Write back inode data in a worker thread. (This can't be done
1668 * in the message handler context.)
1669 */
3c6f6b79
SW
1670void ceph_queue_writeback(struct inode *inode)
1671{
15a2015f 1672 ihold(inode);
3c6f6b79
SW
1673 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1674 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1675 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79 1676 } else {
2c27c9a5 1677 dout("ceph_queue_writeback %p failed\n", inode);
15a2015f 1678 iput(inode);
3c6f6b79
SW
1679 }
1680}
1681
1682static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1683{
1684 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1685 i_wb_work);
1686 struct inode *inode = &ci->vfs_inode;
1687
1688 dout("writeback %p\n", inode);
1689 filemap_fdatawrite(&inode->i_data);
1690 iput(inode);
1691}
1692
3c6f6b79
SW
1693/*
1694 * queue an async invalidation
1695 */
1696void ceph_queue_invalidate(struct inode *inode)
1697{
15a2015f 1698 ihold(inode);
3c6f6b79
SW
1699 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1700 &ceph_inode(inode)->i_pg_inv_work)) {
1701 dout("ceph_queue_invalidate %p\n", inode);
3c6f6b79
SW
1702 } else {
1703 dout("ceph_queue_invalidate %p failed\n", inode);
15a2015f 1704 iput(inode);
3c6f6b79
SW
1705 }
1706}
1707
355da1eb
SW
1708/*
1709 * Invalidate inode pages in a worker thread. (This can't be done
1710 * in the message handler context.)
1711 */
3c6f6b79 1712static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1713{
1714 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1715 i_pg_inv_work);
1716 struct inode *inode = &ci->vfs_inode;
6c93df5d 1717 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
355da1eb
SW
1718 u32 orig_gen;
1719 int check = 0;
1720
b0d7c223 1721 mutex_lock(&ci->i_truncate_mutex);
6c93df5d
YZ
1722
1723 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1724 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1725 inode, ceph_ino(inode));
1726 mapping_set_error(inode->i_mapping, -EIO);
1727 truncate_pagecache(inode, 0);
1728 mutex_unlock(&ci->i_truncate_mutex);
1729 goto out;
1730 }
1731
be655596 1732 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1733 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1734 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1735 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
9563f88c
YZ
1736 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1737 check = 1;
be655596 1738 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1739 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1740 goto out;
1741 }
1742 orig_gen = ci->i_rdcache_gen;
be655596 1743 spin_unlock(&ci->i_ceph_lock);
355da1eb 1744
9abd4db7
YZ
1745 if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1746 pr_err("invalidate_pages %p fails\n", inode);
1747 }
355da1eb 1748
be655596 1749 spin_lock(&ci->i_ceph_lock);
cd045cb4
SW
1750 if (orig_gen == ci->i_rdcache_gen &&
1751 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1752 dout("invalidate_pages %p gen %d successful\n", inode,
1753 ci->i_rdcache_gen);
cd045cb4 1754 ci->i_rdcache_revoking--;
355da1eb
SW
1755 check = 1;
1756 } else {
cd045cb4
SW
1757 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1758 inode, orig_gen, ci->i_rdcache_gen,
1759 ci->i_rdcache_revoking);
9563f88c
YZ
1760 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1761 check = 1;
355da1eb 1762 }
be655596 1763 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1764 mutex_unlock(&ci->i_truncate_mutex);
9563f88c 1765out:
355da1eb
SW
1766 if (check)
1767 ceph_check_caps(ci, 0, NULL);
355da1eb
SW
1768 iput(inode);
1769}
1770
1771
1772/*
3f99969f 1773 * called by trunc_wq;
355da1eb
SW
1774 *
1775 * We also truncate in a separate thread as well.
1776 */
3c6f6b79 1777static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1778{
1779 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1780 i_vmtruncate_work);
1781 struct inode *inode = &ci->vfs_inode;
1782
1783 dout("vmtruncate_work %p\n", inode);
b415bf4f 1784 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1785 iput(inode);
1786}
1787
3c6f6b79
SW
1788/*
1789 * Queue an async vmtruncate. If we fail to queue work, we will handle
1790 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1791 */
1792void ceph_queue_vmtruncate(struct inode *inode)
1793{
1794 struct ceph_inode_info *ci = ceph_inode(inode);
1795
15a2015f 1796 ihold(inode);
99ccbd22 1797
640ef79d 1798 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1799 &ci->i_vmtruncate_work)) {
1800 dout("ceph_queue_vmtruncate %p\n", inode);
3c6f6b79
SW
1801 } else {
1802 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1803 inode, ci->i_truncate_pending);
15a2015f 1804 iput(inode);
3c6f6b79
SW
1805 }
1806}
1807
355da1eb 1808/*
355da1eb
SW
1809 * Make sure any pending truncation is applied before doing anything
1810 * that may depend on it.
1811 */
b415bf4f 1812void __ceph_do_pending_vmtruncate(struct inode *inode)
355da1eb
SW
1813{
1814 struct ceph_inode_info *ci = ceph_inode(inode);
1815 u64 to;
a85f50b6 1816 int wrbuffer_refs, finish = 0;
355da1eb 1817
b0d7c223 1818 mutex_lock(&ci->i_truncate_mutex);
355da1eb 1819retry:
be655596 1820 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1821 if (ci->i_truncate_pending == 0) {
1822 dout("__do_pending_vmtruncate %p none pending\n", inode);
be655596 1823 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1824 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1825 return;
1826 }
1827
1828 /*
1829 * make sure any dirty snapped pages are flushed before we
1830 * possibly truncate them.. so write AND block!
1831 */
1832 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1833 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1834 inode);
be655596 1835 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1836 filemap_write_and_wait_range(&inode->i_data, 0,
1837 inode->i_sb->s_maxbytes);
1838 goto retry;
1839 }
1840
b0d7c223
YZ
1841 /* there should be no reader or writer */
1842 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1843
355da1eb
SW
1844 to = ci->i_truncate_size;
1845 wrbuffer_refs = ci->i_wrbuffer_ref;
1846 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1847 ci->i_truncate_pending, to);
be655596 1848 spin_unlock(&ci->i_ceph_lock);
355da1eb 1849
4e217b5d 1850 truncate_pagecache(inode, to);
355da1eb 1851
be655596 1852 spin_lock(&ci->i_ceph_lock);
a85f50b6
YZ
1853 if (to == ci->i_truncate_size) {
1854 ci->i_truncate_pending = 0;
1855 finish = 1;
1856 }
be655596 1857 spin_unlock(&ci->i_ceph_lock);
a85f50b6
YZ
1858 if (!finish)
1859 goto retry;
355da1eb 1860
b0d7c223
YZ
1861 mutex_unlock(&ci->i_truncate_mutex);
1862
355da1eb
SW
1863 if (wrbuffer_refs == 0)
1864 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
a85f50b6
YZ
1865
1866 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1867}
1868
355da1eb
SW
1869/*
1870 * symlinks
1871 */
355da1eb 1872static const struct inode_operations ceph_symlink_iops = {
6b255391 1873 .get_link = simple_get_link,
0b932672
YZ
1874 .setattr = ceph_setattr,
1875 .getattr = ceph_getattr,
0b932672 1876 .listxattr = ceph_listxattr,
355da1eb
SW
1877};
1878
a26fecca 1879int __ceph_setattr(struct inode *inode, struct iattr *attr)
355da1eb 1880{
355da1eb 1881 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1882 const unsigned int ia_valid = attr->ia_valid;
1883 struct ceph_mds_request *req;
a26fecca 1884 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
f66fd9f0 1885 struct ceph_cap_flush *prealloc_cf;
355da1eb
SW
1886 int issued;
1887 int release = 0, dirtied = 0;
1888 int mask = 0;
1889 int err = 0;
fca65b4a 1890 int inode_dirty_flags = 0;
604d1b02 1891 bool lock_snap_rwsem = false;
355da1eb 1892
f66fd9f0
YZ
1893 prealloc_cf = ceph_alloc_cap_flush();
1894 if (!prealloc_cf)
1895 return -ENOMEM;
1896
355da1eb
SW
1897 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1898 USE_AUTH_MDS);
f66fd9f0
YZ
1899 if (IS_ERR(req)) {
1900 ceph_free_cap_flush(prealloc_cf);
355da1eb 1901 return PTR_ERR(req);
f66fd9f0 1902 }
355da1eb 1903
be655596 1904 spin_lock(&ci->i_ceph_lock);
355da1eb 1905 issued = __ceph_caps_issued(ci, NULL);
604d1b02
YZ
1906
1907 if (!ci->i_head_snapc &&
1908 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1909 lock_snap_rwsem = true;
1910 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1911 spin_unlock(&ci->i_ceph_lock);
1912 down_read(&mdsc->snap_rwsem);
1913 spin_lock(&ci->i_ceph_lock);
1914 issued = __ceph_caps_issued(ci, NULL);
1915 }
1916 }
1917
355da1eb
SW
1918 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1919
1920 if (ia_valid & ATTR_UID) {
1921 dout("setattr %p uid %d -> %d\n", inode,
bd2bae6a
EB
1922 from_kuid(&init_user_ns, inode->i_uid),
1923 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1924 if (issued & CEPH_CAP_AUTH_EXCL) {
1925 inode->i_uid = attr->ia_uid;
1926 dirtied |= CEPH_CAP_AUTH_EXCL;
1927 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1928 !uid_eq(attr->ia_uid, inode->i_uid)) {
1929 req->r_args.setattr.uid = cpu_to_le32(
1930 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1931 mask |= CEPH_SETATTR_UID;
1932 release |= CEPH_CAP_AUTH_SHARED;
1933 }
1934 }
1935 if (ia_valid & ATTR_GID) {
1936 dout("setattr %p gid %d -> %d\n", inode,
bd2bae6a
EB
1937 from_kgid(&init_user_ns, inode->i_gid),
1938 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1939 if (issued & CEPH_CAP_AUTH_EXCL) {
1940 inode->i_gid = attr->ia_gid;
1941 dirtied |= CEPH_CAP_AUTH_EXCL;
1942 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1943 !gid_eq(attr->ia_gid, inode->i_gid)) {
1944 req->r_args.setattr.gid = cpu_to_le32(
1945 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1946 mask |= CEPH_SETATTR_GID;
1947 release |= CEPH_CAP_AUTH_SHARED;
1948 }
1949 }
1950 if (ia_valid & ATTR_MODE) {
1951 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1952 attr->ia_mode);
1953 if (issued & CEPH_CAP_AUTH_EXCL) {
1954 inode->i_mode = attr->ia_mode;
1955 dirtied |= CEPH_CAP_AUTH_EXCL;
1956 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1957 attr->ia_mode != inode->i_mode) {
7221fe4c 1958 inode->i_mode = attr->ia_mode;
355da1eb
SW
1959 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1960 mask |= CEPH_SETATTR_MODE;
1961 release |= CEPH_CAP_AUTH_SHARED;
1962 }
1963 }
1964
1965 if (ia_valid & ATTR_ATIME) {
1966 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1967 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1968 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1969 if (issued & CEPH_CAP_FILE_EXCL) {
1970 ci->i_time_warp_seq++;
1971 inode->i_atime = attr->ia_atime;
1972 dirtied |= CEPH_CAP_FILE_EXCL;
1973 } else if ((issued & CEPH_CAP_FILE_WR) &&
1974 timespec_compare(&inode->i_atime,
1975 &attr->ia_atime) < 0) {
1976 inode->i_atime = attr->ia_atime;
1977 dirtied |= CEPH_CAP_FILE_WR;
1978 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1979 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1980 ceph_encode_timespec(&req->r_args.setattr.atime,
1981 &attr->ia_atime);
1982 mask |= CEPH_SETATTR_ATIME;
1983 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1984 CEPH_CAP_FILE_WR;
1985 }
1986 }
1987 if (ia_valid & ATTR_MTIME) {
1988 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1989 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1990 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1991 if (issued & CEPH_CAP_FILE_EXCL) {
1992 ci->i_time_warp_seq++;
1993 inode->i_mtime = attr->ia_mtime;
1994 dirtied |= CEPH_CAP_FILE_EXCL;
1995 } else if ((issued & CEPH_CAP_FILE_WR) &&
1996 timespec_compare(&inode->i_mtime,
1997 &attr->ia_mtime) < 0) {
1998 inode->i_mtime = attr->ia_mtime;
1999 dirtied |= CEPH_CAP_FILE_WR;
2000 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2001 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
2002 ceph_encode_timespec(&req->r_args.setattr.mtime,
2003 &attr->ia_mtime);
2004 mask |= CEPH_SETATTR_MTIME;
2005 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2006 CEPH_CAP_FILE_WR;
2007 }
2008 }
2009 if (ia_valid & ATTR_SIZE) {
2010 dout("setattr %p size %lld -> %lld\n", inode,
2011 inode->i_size, attr->ia_size);
355da1eb
SW
2012 if ((issued & CEPH_CAP_FILE_EXCL) &&
2013 attr->ia_size > inode->i_size) {
99c88e69 2014 i_size_write(inode, attr->ia_size);
224a7542 2015 inode->i_blocks = calc_inode_blocks(attr->ia_size);
355da1eb
SW
2016 inode->i_ctime = attr->ia_ctime;
2017 ci->i_reported_size = attr->ia_size;
2018 dirtied |= CEPH_CAP_FILE_EXCL;
2019 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2020 attr->ia_size != inode->i_size) {
2021 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2022 req->r_args.setattr.old_size =
2023 cpu_to_le64(inode->i_size);
2024 mask |= CEPH_SETATTR_SIZE;
2025 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2026 CEPH_CAP_FILE_WR;
2027 }
2028 }
2029
2030 /* these do nothing */
2031 if (ia_valid & ATTR_CTIME) {
2032 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2033 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2034 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
2035 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2036 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2037 only ? "ctime only" : "ignored");
2038 inode->i_ctime = attr->ia_ctime;
2039 if (only) {
2040 /*
2041 * if kernel wants to dirty ctime but nothing else,
2042 * we need to choose a cap to dirty under, or do
2043 * a almost-no-op setattr
2044 */
2045 if (issued & CEPH_CAP_AUTH_EXCL)
2046 dirtied |= CEPH_CAP_AUTH_EXCL;
2047 else if (issued & CEPH_CAP_FILE_EXCL)
2048 dirtied |= CEPH_CAP_FILE_EXCL;
2049 else if (issued & CEPH_CAP_XATTR_EXCL)
2050 dirtied |= CEPH_CAP_XATTR_EXCL;
2051 else
2052 mask |= CEPH_SETATTR_CTIME;
2053 }
2054 }
2055 if (ia_valid & ATTR_FILE)
2056 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2057
2058 if (dirtied) {
f66fd9f0
YZ
2059 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2060 &prealloc_cf);
c2050a45 2061 inode->i_ctime = current_time(inode);
355da1eb
SW
2062 }
2063
2064 release &= issued;
be655596 2065 spin_unlock(&ci->i_ceph_lock);
604d1b02
YZ
2066 if (lock_snap_rwsem)
2067 up_read(&mdsc->snap_rwsem);
355da1eb 2068
fca65b4a
SW
2069 if (inode_dirty_flags)
2070 __mark_inode_dirty(inode, inode_dirty_flags);
2071
7221fe4c 2072
355da1eb 2073 if (mask) {
70b666c3
SW
2074 req->r_inode = inode;
2075 ihold(inode);
355da1eb
SW
2076 req->r_inode_drop = release;
2077 req->r_args.setattr.mask = cpu_to_le32(mask);
2078 req->r_num_caps = 1;
752c8bdc 2079 err = ceph_mdsc_do_request(mdsc, NULL, req);
355da1eb
SW
2080 }
2081 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2082 ceph_cap_string(dirtied), mask);
2083
355da1eb 2084 ceph_mdsc_put_request(req);
f66fd9f0 2085 ceph_free_cap_flush(prealloc_cf);
443204c7
YZ
2086
2087 if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2088 __ceph_do_pending_vmtruncate(inode);
2089
355da1eb
SW
2090 return err;
2091}
2092
a26fecca
AG
2093/*
2094 * setattr
2095 */
2096int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2097{
fd5472ed
JK
2098 struct inode *inode = d_inode(dentry);
2099 int err;
2100
2101 if (ceph_snap(inode) != CEPH_NOSNAP)
2102 return -EROFS;
2103
31051c85 2104 err = setattr_prepare(dentry, attr);
fd5472ed
JK
2105 if (err != 0)
2106 return err;
2107
443204c7
YZ
2108 err = __ceph_setattr(inode, attr);
2109
2110 if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2111 err = posix_acl_chmod(inode, attr->ia_mode);
2112
2113 return err;
a26fecca
AG
2114}
2115
355da1eb
SW
2116/*
2117 * Verify that we have a lease on the given mask. If not,
2118 * do a getattr against an mds.
2119 */
01deead0
YZ
2120int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2121 int mask, bool force)
355da1eb 2122{
3d14c5d2
YS
2123 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2124 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
2125 struct ceph_mds_request *req;
2126 int err;
2127
2128 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2129 dout("do_getattr inode %p SNAPDIR\n", inode);
2130 return 0;
2131 }
2132
01deead0
YZ
2133 dout("do_getattr inode %p mask %s mode 0%o\n",
2134 inode, ceph_cap_string(mask), inode->i_mode);
508b32d8 2135 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
355da1eb
SW
2136 return 0;
2137
2138 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2139 if (IS_ERR(req))
2140 return PTR_ERR(req);
70b666c3
SW
2141 req->r_inode = inode;
2142 ihold(inode);
355da1eb
SW
2143 req->r_num_caps = 1;
2144 req->r_args.getattr.mask = cpu_to_le32(mask);
01deead0 2145 req->r_locked_page = locked_page;
355da1eb 2146 err = ceph_mdsc_do_request(mdsc, NULL, req);
01deead0
YZ
2147 if (locked_page && err == 0) {
2148 u64 inline_version = req->r_reply_info.targeti.inline_version;
2149 if (inline_version == 0) {
2150 /* the reply is supposed to contain inline data */
2151 err = -EINVAL;
2152 } else if (inline_version == CEPH_INLINE_NONE) {
2153 err = -ENODATA;
2154 } else {
2155 err = req->r_reply_info.targeti.inline_len;
2156 }
2157 }
355da1eb
SW
2158 ceph_mdsc_put_request(req);
2159 dout("do_getattr result=%d\n", err);
2160 return err;
2161}
2162
2163
2164/*
2165 * Check inode permissions. We verify we have a valid value for
2166 * the AUTH cap, then call the generic handler.
2167 */
10556cb2 2168int ceph_permission(struct inode *inode, int mask)
355da1eb 2169{
b74c79e9
NP
2170 int err;
2171
10556cb2 2172 if (mask & MAY_NOT_BLOCK)
b74c79e9
NP
2173 return -ECHILD;
2174
508b32d8 2175 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
355da1eb
SW
2176
2177 if (!err)
2830ba7f 2178 err = generic_permission(inode, mask);
355da1eb
SW
2179 return err;
2180}
2181
2182/*
2183 * Get all attributes. Hopefully somedata we'll have a statlite()
2184 * and can limit the fields we require to be accurate.
2185 */
2186int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2187 struct kstat *stat)
2188{
2b0143b5 2189 struct inode *inode = d_inode(dentry);
232d4b01 2190 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
2191 int err;
2192
508b32d8 2193 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
355da1eb
SW
2194 if (!err) {
2195 generic_fillattr(inode, stat);
ad1fee96 2196 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
2197 if (ceph_snap(inode) != CEPH_NOSNAP)
2198 stat->dev = ceph_snap(inode);
2199 else
2200 stat->dev = 0;
232d4b01 2201 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
2202 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2203 RBYTES))
2204 stat->size = ci->i_rbytes;
2205 else
2206 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 2207 stat->blocks = 0;
355da1eb 2208 stat->blksize = 65536;
232d4b01 2209 }
355da1eb
SW
2210 }
2211 return err;
2212}