]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/inode.c
ceph: fix ceph_fh_to_parent()
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
9#include <linux/namei.h>
10#include <linux/writeback.h>
11#include <linux/vmalloc.h>
4db658ea 12#include <linux/posix_acl.h>
355da1eb
SW
13
14#include "super.h"
3d14c5d2 15#include "mds_client.h"
99ccbd22 16#include "cache.h"
3d14c5d2 17#include <linux/ceph/decode.h>
355da1eb
SW
18
19/*
20 * Ceph inode operations
21 *
22 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
23 * setattr, etc.), xattr helpers, and helpers for assimilating
24 * metadata returned by the MDS into our cache.
25 *
26 * Also define helpers for doing asynchronous writeback, invalidation,
27 * and truncation for the benefit of those who can't afford to block
28 * (typically because they are in the message handler path).
29 */
30
31static const struct inode_operations ceph_symlink_iops;
32
3c6f6b79
SW
33static void ceph_invalidate_work(struct work_struct *work);
34static void ceph_writeback_work(struct work_struct *work);
35static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
36
37/*
38 * find or create an inode, given the ceph ino number
39 */
ad1fee96
YS
40static int ceph_set_ino_cb(struct inode *inode, void *data)
41{
42 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
43 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
44 return 0;
45}
46
355da1eb
SW
47struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
48{
49 struct inode *inode;
50 ino_t t = ceph_vino_to_ino(vino);
51
52 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
53 if (inode == NULL)
54 return ERR_PTR(-ENOMEM);
55 if (inode->i_state & I_NEW) {
56 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
57 inode, ceph_vinop(inode), (u64)inode->i_ino);
58 unlock_new_inode(inode);
59 }
60
61 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
62 vino.snap, inode);
63 return inode;
64}
65
66/*
67 * get/constuct snapdir inode for a given directory
68 */
69struct inode *ceph_get_snapdir(struct inode *parent)
70{
71 struct ceph_vino vino = {
72 .ino = ceph_ino(parent),
73 .snap = CEPH_SNAPDIR,
74 };
75 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 76 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
77
78 BUG_ON(!S_ISDIR(parent->i_mode));
79 if (IS_ERR(inode))
7e34bc52 80 return inode;
355da1eb
SW
81 inode->i_mode = parent->i_mode;
82 inode->i_uid = parent->i_uid;
83 inode->i_gid = parent->i_gid;
84 inode->i_op = &ceph_dir_iops;
85 inode->i_fop = &ceph_dir_fops;
b377ff13
SW
86 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
87 ci->i_rbytes = 0;
355da1eb
SW
88 return inode;
89}
90
91const struct inode_operations ceph_file_iops = {
92 .permission = ceph_permission,
93 .setattr = ceph_setattr,
94 .getattr = ceph_getattr,
95 .setxattr = ceph_setxattr,
96 .getxattr = ceph_getxattr,
97 .listxattr = ceph_listxattr,
98 .removexattr = ceph_removexattr,
7221fe4c 99 .get_acl = ceph_get_acl,
72466d0b 100 .set_acl = ceph_set_acl,
355da1eb
SW
101};
102
103
104/*
105 * We use a 'frag tree' to keep track of the MDS's directory fragments
106 * for a given inode (usually there is just a single fragment). We
107 * need to know when a child frag is delegated to a new MDS, or when
108 * it is flagged as replicated, so we can direct our requests
109 * accordingly.
110 */
111
112/*
113 * find/create a frag in the tree
114 */
115static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
116 u32 f)
117{
118 struct rb_node **p;
119 struct rb_node *parent = NULL;
120 struct ceph_inode_frag *frag;
121 int c;
122
123 p = &ci->i_fragtree.rb_node;
124 while (*p) {
125 parent = *p;
126 frag = rb_entry(parent, struct ceph_inode_frag, node);
127 c = ceph_frag_compare(f, frag->frag);
128 if (c < 0)
129 p = &(*p)->rb_left;
130 else if (c > 0)
131 p = &(*p)->rb_right;
132 else
133 return frag;
134 }
135
136 frag = kmalloc(sizeof(*frag), GFP_NOFS);
137 if (!frag) {
138 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
139 "frag %x\n", &ci->vfs_inode,
140 ceph_vinop(&ci->vfs_inode), f);
141 return ERR_PTR(-ENOMEM);
142 }
143 frag->frag = f;
144 frag->split_by = 0;
145 frag->mds = -1;
146 frag->ndist = 0;
147
148 rb_link_node(&frag->node, parent, p);
149 rb_insert_color(&frag->node, &ci->i_fragtree);
150
151 dout("get_or_create_frag added %llx.%llx frag %x\n",
152 ceph_vinop(&ci->vfs_inode), f);
153 return frag;
154}
155
156/*
157 * find a specific frag @f
158 */
159struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
160{
161 struct rb_node *n = ci->i_fragtree.rb_node;
162
163 while (n) {
164 struct ceph_inode_frag *frag =
165 rb_entry(n, struct ceph_inode_frag, node);
166 int c = ceph_frag_compare(f, frag->frag);
167 if (c < 0)
168 n = n->rb_left;
169 else if (c > 0)
170 n = n->rb_right;
171 else
172 return frag;
173 }
174 return NULL;
175}
176
177/*
178 * Choose frag containing the given value @v. If @pfrag is
179 * specified, copy the frag delegation info to the caller if
180 * it is present.
181 */
182u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
183 struct ceph_inode_frag *pfrag,
184 int *found)
185{
186 u32 t = ceph_frag_make(0, 0);
187 struct ceph_inode_frag *frag;
188 unsigned nway, i;
189 u32 n;
190
191 if (found)
192 *found = 0;
193
194 mutex_lock(&ci->i_fragtree_mutex);
195 while (1) {
196 WARN_ON(!ceph_frag_contains_value(t, v));
197 frag = __ceph_find_frag(ci, t);
198 if (!frag)
199 break; /* t is a leaf */
200 if (frag->split_by == 0) {
201 if (pfrag)
202 memcpy(pfrag, frag, sizeof(*pfrag));
203 if (found)
204 *found = 1;
205 break;
206 }
207
208 /* choose child */
209 nway = 1 << frag->split_by;
210 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
211 frag->split_by, nway);
212 for (i = 0; i < nway; i++) {
213 n = ceph_frag_make_child(t, frag->split_by, i);
214 if (ceph_frag_contains_value(n, v)) {
215 t = n;
216 break;
217 }
218 }
219 BUG_ON(i == nway);
220 }
221 dout("choose_frag(%x) = %x\n", v, t);
222
223 mutex_unlock(&ci->i_fragtree_mutex);
224 return t;
225}
226
227/*
228 * Process dirfrag (delegation) info from the mds. Include leaf
229 * fragment in tree ONLY if ndist > 0. Otherwise, only
230 * branches/splits are included in i_fragtree)
231 */
232static int ceph_fill_dirfrag(struct inode *inode,
233 struct ceph_mds_reply_dirfrag *dirinfo)
234{
235 struct ceph_inode_info *ci = ceph_inode(inode);
236 struct ceph_inode_frag *frag;
237 u32 id = le32_to_cpu(dirinfo->frag);
238 int mds = le32_to_cpu(dirinfo->auth);
239 int ndist = le32_to_cpu(dirinfo->ndist);
240 int i;
241 int err = 0;
242
243 mutex_lock(&ci->i_fragtree_mutex);
244 if (ndist == 0) {
245 /* no delegation info needed. */
246 frag = __ceph_find_frag(ci, id);
247 if (!frag)
248 goto out;
249 if (frag->split_by == 0) {
250 /* tree leaf, remove */
251 dout("fill_dirfrag removed %llx.%llx frag %x"
252 " (no ref)\n", ceph_vinop(inode), id);
253 rb_erase(&frag->node, &ci->i_fragtree);
254 kfree(frag);
255 } else {
256 /* tree branch, keep and clear */
257 dout("fill_dirfrag cleared %llx.%llx frag %x"
258 " referral\n", ceph_vinop(inode), id);
259 frag->mds = -1;
260 frag->ndist = 0;
261 }
262 goto out;
263 }
264
265
266 /* find/add this frag to store mds delegation info */
267 frag = __get_or_create_frag(ci, id);
268 if (IS_ERR(frag)) {
269 /* this is not the end of the world; we can continue
270 with bad/inaccurate delegation info */
271 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
272 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
273 err = -ENOMEM;
274 goto out;
275 }
276
277 frag->mds = mds;
278 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
279 for (i = 0; i < frag->ndist; i++)
280 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
281 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
282 ceph_vinop(inode), frag->frag, frag->ndist);
283
284out:
285 mutex_unlock(&ci->i_fragtree_mutex);
286 return err;
287}
288
289
290/*
291 * initialize a newly allocated inode.
292 */
293struct inode *ceph_alloc_inode(struct super_block *sb)
294{
295 struct ceph_inode_info *ci;
296 int i;
297
298 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
299 if (!ci)
300 return NULL;
301
302 dout("alloc_inode %p\n", &ci->vfs_inode);
303
be655596
SW
304 spin_lock_init(&ci->i_ceph_lock);
305
355da1eb
SW
306 ci->i_version = 0;
307 ci->i_time_warp_seq = 0;
308 ci->i_ceph_flags = 0;
2f276c51
YZ
309 atomic_set(&ci->i_release_count, 1);
310 atomic_set(&ci->i_complete_count, 0);
355da1eb
SW
311 ci->i_symlink = NULL;
312
6c0f3af7
SW
313 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
314
355da1eb
SW
315 ci->i_fragtree = RB_ROOT;
316 mutex_init(&ci->i_fragtree_mutex);
317
318 ci->i_xattrs.blob = NULL;
319 ci->i_xattrs.prealloc_blob = NULL;
320 ci->i_xattrs.dirty = false;
321 ci->i_xattrs.index = RB_ROOT;
322 ci->i_xattrs.count = 0;
323 ci->i_xattrs.names_size = 0;
324 ci->i_xattrs.vals_size = 0;
325 ci->i_xattrs.version = 0;
326 ci->i_xattrs.index_version = 0;
327
328 ci->i_caps = RB_ROOT;
329 ci->i_auth_cap = NULL;
330 ci->i_dirty_caps = 0;
331 ci->i_flushing_caps = 0;
332 INIT_LIST_HEAD(&ci->i_dirty_item);
333 INIT_LIST_HEAD(&ci->i_flushing_item);
334 ci->i_cap_flush_seq = 0;
335 ci->i_cap_flush_last_tid = 0;
336 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
337 init_waitqueue_head(&ci->i_cap_wq);
338 ci->i_hold_caps_min = 0;
339 ci->i_hold_caps_max = 0;
340 INIT_LIST_HEAD(&ci->i_cap_delay_list);
355da1eb
SW
341 INIT_LIST_HEAD(&ci->i_cap_snaps);
342 ci->i_head_snapc = NULL;
343 ci->i_snap_caps = 0;
11df2dfb 344 ci->i_cap_exporting_issued = 0;
355da1eb
SW
345
346 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
347 ci->i_nr_by_mode[i] = 0;
348
b0d7c223 349 mutex_init(&ci->i_truncate_mutex);
355da1eb
SW
350 ci->i_truncate_seq = 0;
351 ci->i_truncate_size = 0;
352 ci->i_truncate_pending = 0;
353
354 ci->i_max_size = 0;
355 ci->i_reported_size = 0;
356 ci->i_wanted_max_size = 0;
357 ci->i_requested_max_size = 0;
358
359 ci->i_pin_ref = 0;
360 ci->i_rd_ref = 0;
361 ci->i_rdcache_ref = 0;
362 ci->i_wr_ref = 0;
d3d0720d 363 ci->i_wb_ref = 0;
355da1eb
SW
364 ci->i_wrbuffer_ref = 0;
365 ci->i_wrbuffer_ref_head = 0;
366 ci->i_shared_gen = 0;
367 ci->i_rdcache_gen = 0;
368 ci->i_rdcache_revoking = 0;
369
370 INIT_LIST_HEAD(&ci->i_unsafe_writes);
371 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
372 spin_lock_init(&ci->i_unsafe_lock);
373
374 ci->i_snap_realm = NULL;
375 INIT_LIST_HEAD(&ci->i_snap_realm_item);
376 INIT_LIST_HEAD(&ci->i_snap_flush_item);
377
3c6f6b79
SW
378 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
379 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
380
381 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
382
99ccbd22
MT
383 ceph_fscache_inode_init(ci);
384
355da1eb
SW
385 return &ci->vfs_inode;
386}
387
fa0d7e3d
NP
388static void ceph_i_callback(struct rcu_head *head)
389{
390 struct inode *inode = container_of(head, struct inode, i_rcu);
391 struct ceph_inode_info *ci = ceph_inode(inode);
392
fa0d7e3d
NP
393 kmem_cache_free(ceph_inode_cachep, ci);
394}
395
355da1eb
SW
396void ceph_destroy_inode(struct inode *inode)
397{
398 struct ceph_inode_info *ci = ceph_inode(inode);
399 struct ceph_inode_frag *frag;
400 struct rb_node *n;
401
402 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
403
99ccbd22
MT
404 ceph_fscache_unregister_inode_cookie(ci);
405
355da1eb
SW
406 ceph_queue_caps_release(inode);
407
8b218b8a
SW
408 /*
409 * we may still have a snap_realm reference if there are stray
410 * caps in i_cap_exporting_issued or i_snap_caps.
411 */
412 if (ci->i_snap_realm) {
413 struct ceph_mds_client *mdsc =
3d14c5d2 414 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
415 struct ceph_snap_realm *realm = ci->i_snap_realm;
416
417 dout(" dropping residual ref to snap realm %p\n", realm);
418 spin_lock(&realm->inodes_with_caps_lock);
419 list_del_init(&ci->i_snap_realm_item);
420 spin_unlock(&realm->inodes_with_caps_lock);
421 ceph_put_snap_realm(mdsc, realm);
422 }
423
355da1eb
SW
424 kfree(ci->i_symlink);
425 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
426 frag = rb_entry(n, struct ceph_inode_frag, node);
427 rb_erase(n, &ci->i_fragtree);
428 kfree(frag);
429 }
430
431 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
432 if (ci->i_xattrs.blob)
433 ceph_buffer_put(ci->i_xattrs.blob);
434 if (ci->i_xattrs.prealloc_blob)
435 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 436
fa0d7e3d 437 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
438}
439
9f12bd11
YZ
440int ceph_drop_inode(struct inode *inode)
441{
442 /*
443 * Positve dentry and corresponding inode are always accompanied
444 * in MDS reply. So no need to keep inode in the cache after
445 * dropping all its aliases.
446 */
447 return 1;
448}
449
355da1eb
SW
450/*
451 * Helpers to fill in size, ctime, mtime, and atime. We have to be
452 * careful because either the client or MDS may have more up to date
453 * info, depending on which capabilities are held, and whether
454 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
455 * and size are monotonically increasing, except when utimes() or
456 * truncate() increments the corresponding _seq values.)
457 */
458int ceph_fill_file_size(struct inode *inode, int issued,
459 u32 truncate_seq, u64 truncate_size, u64 size)
460{
461 struct ceph_inode_info *ci = ceph_inode(inode);
462 int queue_trunc = 0;
463
464 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
465 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
466 dout("size %lld -> %llu\n", inode->i_size, size);
467 inode->i_size = size;
468 inode->i_blocks = (size + (1<<9) - 1) >> 9;
469 ci->i_reported_size = size;
470 if (truncate_seq != ci->i_truncate_seq) {
471 dout("truncate_seq %u -> %u\n",
472 ci->i_truncate_seq, truncate_seq);
473 ci->i_truncate_seq = truncate_seq;
b0d7c223
YZ
474
475 /* the MDS should have revoked these caps */
476 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
477 CEPH_CAP_FILE_RD |
478 CEPH_CAP_FILE_WR |
479 CEPH_CAP_FILE_LAZYIO));
3d497d85
YS
480 /*
481 * If we hold relevant caps, or in the case where we're
482 * not the only client referencing this file and we
483 * don't hold those caps, then we need to check whether
484 * the file is either opened or mmaped
485 */
b0d7c223
YZ
486 if ((issued & (CEPH_CAP_FILE_CACHE|
487 CEPH_CAP_FILE_BUFFER)) ||
3d497d85
YS
488 mapping_mapped(inode->i_mapping) ||
489 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
490 ci->i_truncate_pending++;
491 queue_trunc = 1;
492 }
493 }
494 }
495 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
496 ci->i_truncate_size != truncate_size) {
497 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
498 truncate_size);
499 ci->i_truncate_size = truncate_size;
500 }
99ccbd22
MT
501
502 if (queue_trunc)
503 ceph_fscache_invalidate(inode);
504
355da1eb
SW
505 return queue_trunc;
506}
507
508void ceph_fill_file_time(struct inode *inode, int issued,
509 u64 time_warp_seq, struct timespec *ctime,
510 struct timespec *mtime, struct timespec *atime)
511{
512 struct ceph_inode_info *ci = ceph_inode(inode);
513 int warn = 0;
514
515 if (issued & (CEPH_CAP_FILE_EXCL|
516 CEPH_CAP_FILE_WR|
d8672d64
SW
517 CEPH_CAP_FILE_BUFFER|
518 CEPH_CAP_AUTH_EXCL|
519 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
520 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
521 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
522 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
523 ctime->tv_sec, ctime->tv_nsec);
524 inode->i_ctime = *ctime;
525 }
526 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
527 /* the MDS did a utimes() */
528 dout("mtime %ld.%09ld -> %ld.%09ld "
529 "tw %d -> %d\n",
530 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
531 mtime->tv_sec, mtime->tv_nsec,
532 ci->i_time_warp_seq, (int)time_warp_seq);
533
534 inode->i_mtime = *mtime;
535 inode->i_atime = *atime;
536 ci->i_time_warp_seq = time_warp_seq;
537 } else if (time_warp_seq == ci->i_time_warp_seq) {
538 /* nobody did utimes(); take the max */
539 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
540 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
541 inode->i_mtime.tv_sec,
542 inode->i_mtime.tv_nsec,
543 mtime->tv_sec, mtime->tv_nsec);
544 inode->i_mtime = *mtime;
545 }
546 if (timespec_compare(atime, &inode->i_atime) > 0) {
547 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
548 inode->i_atime.tv_sec,
549 inode->i_atime.tv_nsec,
550 atime->tv_sec, atime->tv_nsec);
551 inode->i_atime = *atime;
552 }
553 } else if (issued & CEPH_CAP_FILE_EXCL) {
554 /* we did a utimes(); ignore mds values */
555 } else {
556 warn = 1;
557 }
558 } else {
d8672d64 559 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
560 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
561 inode->i_ctime = *ctime;
562 inode->i_mtime = *mtime;
563 inode->i_atime = *atime;
564 ci->i_time_warp_seq = time_warp_seq;
565 } else {
566 warn = 1;
567 }
568 }
569 if (warn) /* time_warp_seq shouldn't go backwards */
570 dout("%p mds time_warp_seq %llu < %u\n",
571 inode, time_warp_seq, ci->i_time_warp_seq);
572}
573
574/*
575 * Populate an inode based on info from mds. May be called on new or
576 * existing inodes.
577 */
578static int fill_inode(struct inode *inode,
579 struct ceph_mds_reply_info_in *iinfo,
580 struct ceph_mds_reply_dirfrag *dirinfo,
581 struct ceph_mds_session *session,
582 unsigned long ttl_from, int cap_fmode,
583 struct ceph_cap_reservation *caps_reservation)
584{
585 struct ceph_mds_reply_inode *info = iinfo->in;
586 struct ceph_inode_info *ci = ceph_inode(inode);
587 int i;
dfabbed6 588 int issued = 0, implemented;
355da1eb
SW
589 struct timespec mtime, atime, ctime;
590 u32 nsplits;
53e879a4
YZ
591 struct ceph_inode_frag *frag;
592 struct rb_node *rb_node;
355da1eb
SW
593 struct ceph_buffer *xattr_blob = NULL;
594 int err = 0;
595 int queue_trunc = 0;
596
597 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
598 inode, ceph_vinop(inode), le64_to_cpu(info->version),
599 ci->i_version);
600
601 /*
602 * prealloc xattr data, if it looks like we'll need it. only
603 * if len > 4 (meaning there are actually xattrs; the first 4
604 * bytes are the xattr count).
605 */
606 if (iinfo->xattr_len > 4) {
b6c1d5b8 607 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
608 if (!xattr_blob)
609 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
610 iinfo->xattr_len);
611 }
612
be655596 613 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
614
615 /*
616 * provided version will be odd if inode value is projected,
8bd59e01
SW
617 * even if stable. skip the update if we have newer stable
618 * info (ours>=theirs, e.g. due to racing mds replies), unless
619 * we are getting projected (unstable) info (in which case the
620 * version is odd, and we want ours>theirs).
621 * us them
622 * 2 2 skip
623 * 3 2 skip
624 * 3 3 update
355da1eb
SW
625 */
626 if (le64_to_cpu(info->version) > 0 &&
8bd59e01 627 (ci->i_version & ~1) >= le64_to_cpu(info->version))
355da1eb 628 goto no_change;
dfabbed6 629
355da1eb
SW
630 issued = __ceph_caps_issued(ci, &implemented);
631 issued |= implemented | __ceph_caps_dirty(ci);
632
633 /* update inode */
634 ci->i_version = le64_to_cpu(info->version);
635 inode->i_version++;
636 inode->i_rdev = le32_to_cpu(info->rdev);
637
638 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
639 inode->i_mode = le32_to_cpu(info->mode);
ab871b90
EB
640 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
641 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
355da1eb 642 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
643 from_kuid(&init_user_ns, inode->i_uid),
644 from_kgid(&init_user_ns, inode->i_gid));
355da1eb
SW
645 }
646
647 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 648 set_nlink(inode, le32_to_cpu(info->nlink));
355da1eb
SW
649
650 /* be careful with mtime, atime, size */
651 ceph_decode_timespec(&atime, &info->atime);
652 ceph_decode_timespec(&mtime, &info->mtime);
653 ceph_decode_timespec(&ctime, &info->ctime);
654 queue_trunc = ceph_fill_file_size(inode, issued,
655 le32_to_cpu(info->truncate_seq),
656 le64_to_cpu(info->truncate_size),
355da1eb
SW
657 le64_to_cpu(info->size));
658 ceph_fill_file_time(inode, issued,
659 le32_to_cpu(info->time_warp_seq),
660 &ctime, &mtime, &atime);
661
912a9b03
SW
662 /* only update max_size on auth cap */
663 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
664 ci->i_max_size != le64_to_cpu(info->max_size)) {
665 dout("max_size %lld -> %llu\n", ci->i_max_size,
666 le64_to_cpu(info->max_size));
667 ci->i_max_size = le64_to_cpu(info->max_size);
668 }
669
355da1eb
SW
670 ci->i_layout = info->layout;
671 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
672
673 /* xattrs */
674 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
675 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
676 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
677 if (ci->i_xattrs.blob)
678 ceph_buffer_put(ci->i_xattrs.blob);
679 ci->i_xattrs.blob = xattr_blob;
680 if (xattr_blob)
681 memcpy(ci->i_xattrs.blob->vec.iov_base,
682 iinfo->xattr_data, iinfo->xattr_len);
683 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
7221fe4c 684 ceph_forget_all_cached_acls(inode);
a6424e48 685 xattr_blob = NULL;
355da1eb
SW
686 }
687
688 inode->i_mapping->a_ops = &ceph_aops;
689 inode->i_mapping->backing_dev_info =
640ef79d 690 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
355da1eb
SW
691
692 switch (inode->i_mode & S_IFMT) {
693 case S_IFIFO:
694 case S_IFBLK:
695 case S_IFCHR:
696 case S_IFSOCK:
697 init_special_inode(inode, inode->i_mode, inode->i_rdev);
698 inode->i_op = &ceph_file_iops;
699 break;
700 case S_IFREG:
701 inode->i_op = &ceph_file_iops;
702 inode->i_fop = &ceph_file_fops;
703 break;
704 case S_IFLNK:
705 inode->i_op = &ceph_symlink_iops;
706 if (!ci->i_symlink) {
810339ec 707 u32 symlen = iinfo->symlink_len;
355da1eb
SW
708 char *sym;
709
be655596 710 spin_unlock(&ci->i_ceph_lock);
355da1eb 711
810339ec
XW
712 err = -EINVAL;
713 if (WARN_ON(symlen != inode->i_size))
714 goto out;
715
355da1eb 716 err = -ENOMEM;
810339ec 717 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
355da1eb
SW
718 if (!sym)
719 goto out;
355da1eb 720
be655596 721 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
722 if (!ci->i_symlink)
723 ci->i_symlink = sym;
724 else
725 kfree(sym); /* lost a race */
726 }
727 break;
728 case S_IFDIR:
729 inode->i_op = &ceph_dir_iops;
730 inode->i_fop = &ceph_dir_fops;
731
14303d20
SW
732 ci->i_dir_layout = iinfo->dir_layout;
733
355da1eb
SW
734 ci->i_files = le64_to_cpu(info->files);
735 ci->i_subdirs = le64_to_cpu(info->subdirs);
736 ci->i_rbytes = le64_to_cpu(info->rbytes);
737 ci->i_rfiles = le64_to_cpu(info->rfiles);
738 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
739 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
740 break;
741 default:
742 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
743 ceph_vinop(inode), inode->i_mode);
744 }
745
a8673d61
YZ
746 /* set dir completion flag? */
747 if (S_ISDIR(inode->i_mode) &&
748 ci->i_files == 0 && ci->i_subdirs == 0 &&
749 ceph_snap(inode) == CEPH_NOSNAP &&
750 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
751 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
2f276c51 752 !__ceph_dir_is_complete(ci)) {
a8673d61 753 dout(" marking %p complete (empty)\n", inode);
2f276c51 754 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
a8673d61
YZ
755 ci->i_max_offset = 2;
756 }
355da1eb 757no_change:
be655596 758 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
759
760 /* queue truncate if we saw i_size decrease */
761 if (queue_trunc)
3c6f6b79 762 ceph_queue_vmtruncate(inode);
355da1eb
SW
763
764 /* populate frag tree */
765 /* FIXME: move me up, if/when version reflects fragtree changes */
766 nsplits = le32_to_cpu(info->fragtree.nsplits);
767 mutex_lock(&ci->i_fragtree_mutex);
53e879a4 768 rb_node = rb_first(&ci->i_fragtree);
355da1eb
SW
769 for (i = 0; i < nsplits; i++) {
770 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
53e879a4
YZ
771 frag = NULL;
772 while (rb_node) {
773 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
774 if (ceph_frag_compare(frag->frag, id) >= 0) {
775 if (frag->frag != id)
776 frag = NULL;
777 else
778 rb_node = rb_next(rb_node);
779 break;
780 }
781 rb_node = rb_next(rb_node);
782 rb_erase(&frag->node, &ci->i_fragtree);
783 kfree(frag);
784 frag = NULL;
785 }
786 if (!frag) {
787 frag = __get_or_create_frag(ci, id);
788 if (IS_ERR(frag))
789 continue;
790 }
355da1eb
SW
791 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
792 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
793 }
53e879a4
YZ
794 while (rb_node) {
795 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
796 rb_node = rb_next(rb_node);
797 rb_erase(&frag->node, &ci->i_fragtree);
798 kfree(frag);
799 }
355da1eb
SW
800 mutex_unlock(&ci->i_fragtree_mutex);
801
802 /* were we issued a capability? */
803 if (info->cap.caps) {
804 if (ceph_snap(inode) == CEPH_NOSNAP) {
805 ceph_add_cap(inode, session,
806 le64_to_cpu(info->cap.cap_id),
807 cap_fmode,
808 le32_to_cpu(info->cap.caps),
809 le32_to_cpu(info->cap.wanted),
810 le32_to_cpu(info->cap.seq),
811 le32_to_cpu(info->cap.mseq),
812 le64_to_cpu(info->cap.realm),
813 info->cap.flags,
814 caps_reservation);
815 } else {
be655596 816 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
817 dout(" %p got snap_caps %s\n", inode,
818 ceph_cap_string(le32_to_cpu(info->cap.caps)));
819 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
820 if (cap_fmode >= 0)
821 __ceph_get_fmode(ci, cap_fmode);
be655596 822 spin_unlock(&ci->i_ceph_lock);
355da1eb 823 }
04d000eb
SW
824 } else if (cap_fmode >= 0) {
825 pr_warning("mds issued no caps on %llx.%llx\n",
826 ceph_vinop(inode));
827 __ceph_get_fmode(ci, cap_fmode);
355da1eb
SW
828 }
829
830 /* update delegation info? */
831 if (dirinfo)
832 ceph_fill_dirfrag(inode, dirinfo);
833
834 err = 0;
835
836out:
b6c1d5b8
SW
837 if (xattr_blob)
838 ceph_buffer_put(xattr_blob);
355da1eb
SW
839 return err;
840}
841
842/*
843 * caller should hold session s_mutex.
844 */
845static void update_dentry_lease(struct dentry *dentry,
846 struct ceph_mds_reply_lease *lease,
847 struct ceph_mds_session *session,
848 unsigned long from_time)
849{
850 struct ceph_dentry_info *di = ceph_dentry(dentry);
851 long unsigned duration = le32_to_cpu(lease->duration_ms);
852 long unsigned ttl = from_time + (duration * HZ) / 1000;
853 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
854 struct inode *dir;
855
856 /* only track leases on regular dentries */
857 if (dentry->d_op != &ceph_dentry_ops)
858 return;
859
860 spin_lock(&dentry->d_lock);
2f90b852
SW
861 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
862 dentry, duration, ttl);
355da1eb
SW
863
864 /* make lease_rdcache_gen match directory */
865 dir = dentry->d_parent->d_inode;
866 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
867
2f90b852 868 if (duration == 0)
355da1eb
SW
869 goto out_unlock;
870
871 if (di->lease_gen == session->s_cap_gen &&
872 time_before(ttl, dentry->d_time))
873 goto out_unlock; /* we already have a newer lease. */
874
875 if (di->lease_session && di->lease_session != session)
876 goto out_unlock;
877
878 ceph_dentry_lru_touch(dentry);
879
880 if (!di->lease_session)
881 di->lease_session = ceph_get_mds_session(session);
882 di->lease_gen = session->s_cap_gen;
883 di->lease_seq = le32_to_cpu(lease->seq);
884 di->lease_renew_after = half_ttl;
885 di->lease_renew_from = 0;
886 dentry->d_time = ttl;
887out_unlock:
888 spin_unlock(&dentry->d_lock);
889 return;
890}
891
1cd3935b
SW
892/*
893 * Set dentry's directory position based on the current dir's max, and
894 * order it in d_subdirs, so that dcache_readdir behaves.
4f177264
SW
895 *
896 * Always called under directory's i_mutex.
1cd3935b
SW
897 */
898static void ceph_set_dentry_offset(struct dentry *dn)
899{
900 struct dentry *dir = dn->d_parent;
4f177264 901 struct inode *inode = dir->d_inode;
b8cd952b 902 struct ceph_inode_info *ci;
1cd3935b
SW
903 struct ceph_dentry_info *di;
904
905 BUG_ON(!inode);
906
b8cd952b 907 ci = ceph_inode(inode);
1cd3935b
SW
908 di = ceph_dentry(dn);
909
be655596 910 spin_lock(&ci->i_ceph_lock);
2f276c51 911 if (!__ceph_dir_is_complete(ci)) {
be655596 912 spin_unlock(&ci->i_ceph_lock);
1cd3935b
SW
913 return;
914 }
915 di->offset = ceph_inode(inode)->i_max_offset++;
be655596 916 spin_unlock(&ci->i_ceph_lock);
1cd3935b 917
2fd6b7f5
NP
918 spin_lock(&dir->d_lock);
919 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
13a4214c 920 list_move(&dn->d_u.d_child, &dir->d_subdirs);
1cd3935b
SW
921 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
922 dn->d_u.d_child.prev, dn->d_u.d_child.next);
923 spin_unlock(&dn->d_lock);
2fd6b7f5 924 spin_unlock(&dir->d_lock);
1cd3935b
SW
925}
926
355da1eb
SW
927/*
928 * splice a dentry to an inode.
929 * caller must hold directory i_mutex for this to be safe.
930 *
931 * we will only rehash the resulting dentry if @prehash is
932 * true; @prehash will be set to false (for the benefit of
933 * the caller) if we fail.
934 */
935static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
467c5251 936 bool *prehash, bool set_offset)
355da1eb
SW
937{
938 struct dentry *realdn;
939
1cd3935b
SW
940 BUG_ON(dn->d_inode);
941
355da1eb
SW
942 /* dn must be unhashed */
943 if (!d_unhashed(dn))
944 d_drop(dn);
945 realdn = d_materialise_unique(dn, in);
946 if (IS_ERR(realdn)) {
d69ed05a
SW
947 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
948 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
949 if (prehash)
950 *prehash = false; /* don't rehash on error */
951 dn = realdn; /* note realdn contains the error */
952 goto out;
953 } else if (realdn) {
954 dout("dn %p (%d) spliced with %p (%d) "
955 "inode %p ino %llx.%llx\n",
84d08fa8
AV
956 dn, d_count(dn),
957 realdn, d_count(realdn),
355da1eb
SW
958 realdn->d_inode, ceph_vinop(realdn->d_inode));
959 dput(dn);
960 dn = realdn;
961 } else {
962 BUG_ON(!ceph_dentry(dn));
355da1eb
SW
963 dout("dn %p attached to %p ino %llx.%llx\n",
964 dn, dn->d_inode, ceph_vinop(dn->d_inode));
965 }
966 if ((!prehash || *prehash) && d_unhashed(dn))
967 d_rehash(dn);
467c5251
SW
968 if (set_offset)
969 ceph_set_dentry_offset(dn);
355da1eb
SW
970out:
971 return dn;
972}
973
974/*
975 * Incorporate results into the local cache. This is either just
976 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
977 * after a lookup).
978 *
979 * A reply may contain
980 * a directory inode along with a dentry.
981 * and/or a target inode
982 *
983 * Called with snap_rwsem (read).
984 */
985int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
986 struct ceph_mds_session *session)
987{
988 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
989 struct inode *in = NULL;
990 struct ceph_mds_reply_inode *ininfo;
991 struct ceph_vino vino;
3d14c5d2 992 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
993 int err = 0;
994
995 dout("fill_trace %p is_dentry %d is_target %d\n", req,
996 rinfo->head->is_dentry, rinfo->head->is_target);
997
998#if 0
999 /*
1000 * Debugging hook:
1001 *
1002 * If we resend completed ops to a recovering mds, we get no
1003 * trace. Since that is very rare, pretend this is the case
1004 * to ensure the 'no trace' handlers in the callers behave.
1005 *
1006 * Fill in inodes unconditionally to avoid breaking cap
1007 * invariants.
1008 */
1009 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1010 pr_info("fill_trace faking empty trace on %lld %s\n",
1011 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1012 if (rinfo->head->is_dentry) {
1013 rinfo->head->is_dentry = 0;
1014 err = fill_inode(req->r_locked_dir,
1015 &rinfo->diri, rinfo->dirfrag,
1016 session, req->r_request_started, -1);
1017 }
1018 if (rinfo->head->is_target) {
1019 rinfo->head->is_target = 0;
1020 ininfo = rinfo->targeti.in;
1021 vino.ino = le64_to_cpu(ininfo->ino);
1022 vino.snap = le64_to_cpu(ininfo->snapid);
1023 in = ceph_get_inode(sb, vino);
1024 err = fill_inode(in, &rinfo->targeti, NULL,
1025 session, req->r_request_started,
1026 req->r_fmode);
1027 iput(in);
1028 }
1029 }
1030#endif
1031
1032 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1033 dout("fill_trace reply is empty!\n");
167c9e35
SW
1034 if (rinfo->head->result == 0 && req->r_locked_dir)
1035 ceph_invalidate_dir_request(req);
355da1eb
SW
1036 return 0;
1037 }
1038
1039 if (rinfo->head->is_dentry) {
5b1daecd
SW
1040 struct inode *dir = req->r_locked_dir;
1041
6c5e50fa
SW
1042 if (dir) {
1043 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
1044 session, req->r_request_started, -1,
1045 &req->r_caps_reservation);
1046 if (err < 0)
1047 return err;
1048 } else {
1049 WARN_ON_ONCE(1);
1050 }
5b1daecd
SW
1051 }
1052
86b58d13
YZ
1053 if (rinfo->head->is_target) {
1054 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1055 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1056
1057 in = ceph_get_inode(sb, vino);
1058 if (IS_ERR(in)) {
1059 err = PTR_ERR(in);
1060 goto done;
1061 }
1062 req->r_target_inode = in;
1063
1064 err = fill_inode(in, &rinfo->targeti, NULL,
1065 session, req->r_request_started,
1066 (le32_to_cpu(rinfo->head->result) == 0) ?
1067 req->r_fmode : -1,
1068 &req->r_caps_reservation);
1069 if (err < 0) {
1070 pr_err("fill_inode badness %p %llx.%llx\n",
1071 in, ceph_vinop(in));
1072 goto done;
1073 }
1074 }
1075
9358c6d4
SW
1076 /*
1077 * ignore null lease/binding on snapdir ENOENT, or else we
1078 * will have trouble splicing in the virtual snapdir later
1079 */
1080 if (rinfo->head->is_dentry && !req->r_aborted &&
6c5e50fa 1081 req->r_locked_dir &&
9358c6d4 1082 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1083 fsc->mount_options->snapdir_name,
9358c6d4 1084 req->r_dentry->d_name.len))) {
355da1eb
SW
1085 /*
1086 * lookup link rename : null -> possibly existing inode
1087 * mknod symlink mkdir : null -> new inode
1088 * unlink : linked -> null
1089 */
1090 struct inode *dir = req->r_locked_dir;
1091 struct dentry *dn = req->r_dentry;
1092 bool have_dir_cap, have_lease;
1093
1094 BUG_ON(!dn);
1095 BUG_ON(!dir);
1096 BUG_ON(dn->d_parent->d_inode != dir);
1097 BUG_ON(ceph_ino(dir) !=
1098 le64_to_cpu(rinfo->diri.in->ino));
1099 BUG_ON(ceph_snap(dir) !=
1100 le64_to_cpu(rinfo->diri.in->snapid));
1101
355da1eb
SW
1102 /* do we have a lease on the whole dir? */
1103 have_dir_cap =
1104 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1105 CEPH_CAP_FILE_SHARED);
1106
1107 /* do we have a dn lease? */
1108 have_lease = have_dir_cap ||
2f90b852 1109 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1110 if (!have_lease)
1111 dout("fill_trace no dentry lease or dir cap\n");
1112
1113 /* rename? */
1114 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1115 dout(" src %p '%.*s' dst %p '%.*s'\n",
1116 req->r_old_dentry,
1117 req->r_old_dentry->d_name.len,
1118 req->r_old_dentry->d_name.name,
1119 dn, dn->d_name.len, dn->d_name.name);
1120 dout("fill_trace doing d_move %p -> %p\n",
1121 req->r_old_dentry, dn);
c10f5e12 1122
355da1eb
SW
1123 d_move(req->r_old_dentry, dn);
1124 dout(" src %p '%.*s' dst %p '%.*s'\n",
1125 req->r_old_dentry,
1126 req->r_old_dentry->d_name.len,
1127 req->r_old_dentry->d_name.name,
1128 dn, dn->d_name.len, dn->d_name.name);
81a6cf2d 1129
c4a29f26
SW
1130 /* ensure target dentry is invalidated, despite
1131 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1132 ceph_invalidate_dentry_lease(dn);
1133
09adc80c
SW
1134 /*
1135 * d_move() puts the renamed dentry at the end of
1136 * d_subdirs. We need to assign it an appropriate
2f276c51
YZ
1137 * directory offset so we can behave when dir is
1138 * complete.
09adc80c
SW
1139 */
1140 ceph_set_dentry_offset(req->r_old_dentry);
99ccbd22 1141 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1142 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1143
355da1eb 1144 dn = req->r_old_dentry; /* use old_dentry */
355da1eb
SW
1145 }
1146
1147 /* null dentry? */
1148 if (!rinfo->head->is_target) {
1149 dout("fill_trace null dentry\n");
1150 if (dn->d_inode) {
1151 dout("d_delete %p\n", dn);
1152 d_delete(dn);
1153 } else {
1154 dout("d_instantiate %p NULL\n", dn);
1155 d_instantiate(dn, NULL);
1156 if (have_lease && d_unhashed(dn))
1157 d_rehash(dn);
1158 update_dentry_lease(dn, rinfo->dlease,
1159 session,
1160 req->r_request_started);
1161 }
1162 goto done;
1163 }
1164
1165 /* attach proper inode */
86b58d13
YZ
1166 if (!dn->d_inode) {
1167 ihold(in);
467c5251 1168 dn = splice_dentry(dn, in, &have_lease, true);
355da1eb
SW
1169 if (IS_ERR(dn)) {
1170 err = PTR_ERR(dn);
1171 goto done;
1172 }
1173 req->r_dentry = dn; /* may have spliced */
86b58d13 1174 } else if (dn->d_inode && dn->d_inode != in) {
355da1eb 1175 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
86b58d13
YZ
1176 dn, dn->d_inode, ceph_vinop(dn->d_inode),
1177 ceph_vinop(in));
355da1eb 1178 have_lease = false;
355da1eb
SW
1179 }
1180
1181 if (have_lease)
1182 update_dentry_lease(dn, rinfo->dlease, session,
1183 req->r_request_started);
1184 dout(" final dn %p\n", dn);
86b58d13
YZ
1185 } else if (!req->r_aborted &&
1186 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1187 req->r_op == CEPH_MDS_OP_MKSNAP)) {
355da1eb
SW
1188 struct dentry *dn = req->r_dentry;
1189
1190 /* fill out a snapdir LOOKUPSNAP dentry */
1191 BUG_ON(!dn);
1192 BUG_ON(!req->r_locked_dir);
1193 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1194 ininfo = rinfo->targeti.in;
1195 vino.ino = le64_to_cpu(ininfo->ino);
1196 vino.snap = le64_to_cpu(ininfo->snapid);
355da1eb 1197 dout(" linking snapped dir %p to dn %p\n", in, dn);
86b58d13 1198 ihold(in);
467c5251 1199 dn = splice_dentry(dn, in, NULL, true);
355da1eb
SW
1200 if (IS_ERR(dn)) {
1201 err = PTR_ERR(dn);
1202 goto done;
1203 }
1204 req->r_dentry = dn; /* may have spliced */
355da1eb 1205 }
355da1eb
SW
1206done:
1207 dout("fill_trace done err=%d\n", err);
1208 return err;
1209}
1210
1211/*
1212 * Prepopulate our cache with readdir results, leases, etc.
1213 */
79f9f99a
SW
1214static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1215 struct ceph_mds_session *session)
1216{
1217 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1218 int i, err = 0;
1219
1220 for (i = 0; i < rinfo->dir_nr; i++) {
1221 struct ceph_vino vino;
1222 struct inode *in;
1223 int rc;
1224
1225 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1226 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1227
1228 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1229 if (IS_ERR(in)) {
1230 err = PTR_ERR(in);
1231 dout("new_inode badness got %d\n", err);
1232 continue;
1233 }
1234 rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
1235 req->r_request_started, -1,
1236 &req->r_caps_reservation);
1237 if (rc < 0) {
1238 pr_err("fill_inode badness on %p got %d\n", in, rc);
1239 err = rc;
1240 continue;
1241 }
1242 }
1243
1244 return err;
1245}
1246
355da1eb
SW
1247int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1248 struct ceph_mds_session *session)
1249{
1250 struct dentry *parent = req->r_dentry;
1251 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1252 struct qstr dname;
1253 struct dentry *dn;
1254 struct inode *in;
86b58d13 1255 int err = 0, ret, i;
355da1eb
SW
1256 struct inode *snapdir = NULL;
1257 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
355da1eb 1258 struct ceph_dentry_info *di;
81c6aea5
YZ
1259 u64 r_readdir_offset = req->r_readdir_offset;
1260 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1261
1262 if (rinfo->dir_dir &&
1263 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1264 dout("readdir_prepopulate got new frag %x -> %x\n",
1265 frag, le32_to_cpu(rinfo->dir_dir->frag));
1266 frag = le32_to_cpu(rinfo->dir_dir->frag);
1267 if (ceph_frag_is_leftmost(frag))
1268 r_readdir_offset = 2;
1269 else
1270 r_readdir_offset = 0;
1271 }
355da1eb 1272
79f9f99a
SW
1273 if (req->r_aborted)
1274 return readdir_prepopulate_inodes_only(req, session);
355da1eb
SW
1275
1276 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1277 snapdir = ceph_get_snapdir(parent->d_inode);
1278 parent = d_find_alias(snapdir);
1279 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1280 rinfo->dir_nr, parent);
1281 } else {
1282 dout("readdir_prepopulate %d items under dn %p\n",
1283 rinfo->dir_nr, parent);
1284 if (rinfo->dir_dir)
1285 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1286 }
1287
86b58d13 1288 /* FIXME: release caps/leases if error occurs */
355da1eb
SW
1289 for (i = 0; i < rinfo->dir_nr; i++) {
1290 struct ceph_vino vino;
1291
1292 dname.name = rinfo->dir_dname[i];
1293 dname.len = rinfo->dir_dname_len[i];
1294 dname.hash = full_name_hash(dname.name, dname.len);
1295
1296 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1297 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1298
1299retry_lookup:
1300 dn = d_lookup(parent, &dname);
1301 dout("d_lookup on parent=%p name=%.*s got %p\n",
1302 parent, dname.len, dname.name, dn);
1303
1304 if (!dn) {
1305 dn = d_alloc(parent, &dname);
1306 dout("d_alloc %p '%.*s' = %p\n", parent,
1307 dname.len, dname.name, dn);
1308 if (dn == NULL) {
1309 dout("d_alloc badness\n");
1310 err = -ENOMEM;
1311 goto out;
1312 }
86b58d13
YZ
1313 ret = ceph_init_dentry(dn);
1314 if (ret < 0) {
8c696737 1315 dput(dn);
86b58d13 1316 err = ret;
355da1eb 1317 goto out;
8c696737 1318 }
355da1eb
SW
1319 } else if (dn->d_inode &&
1320 (ceph_ino(dn->d_inode) != vino.ino ||
1321 ceph_snap(dn->d_inode) != vino.snap)) {
1322 dout(" dn %p points to wrong inode %p\n",
1323 dn, dn->d_inode);
1324 d_delete(dn);
1325 dput(dn);
1326 goto retry_lookup;
1327 } else {
1328 /* reorder parent's d_subdirs */
2fd6b7f5
NP
1329 spin_lock(&parent->d_lock);
1330 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
355da1eb
SW
1331 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1332 spin_unlock(&dn->d_lock);
2fd6b7f5 1333 spin_unlock(&parent->d_lock);
355da1eb
SW
1334 }
1335
355da1eb
SW
1336 /* inode */
1337 if (dn->d_inode) {
1338 in = dn->d_inode;
1339 } else {
1340 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1341 if (IS_ERR(in)) {
355da1eb 1342 dout("new_inode badness\n");
2744c171 1343 d_drop(dn);
355da1eb 1344 dput(dn);
ac1f12ef 1345 err = PTR_ERR(in);
355da1eb
SW
1346 goto out;
1347 }
355da1eb
SW
1348 }
1349
1350 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1351 req->r_request_started, -1,
1352 &req->r_caps_reservation) < 0) {
1353 pr_err("fill_inode badness on %p\n", in);
86b58d13
YZ
1354 if (!dn->d_inode)
1355 iput(in);
1356 d_drop(dn);
d69ed05a 1357 goto next_item;
355da1eb 1358 }
86b58d13
YZ
1359
1360 if (!dn->d_inode) {
1361 dn = splice_dentry(dn, in, NULL, false);
1362 if (IS_ERR(dn)) {
1363 err = PTR_ERR(dn);
1364 dn = NULL;
1365 goto next_item;
1366 }
1367 }
1368
1369 di = dn->d_fsdata;
1370 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1371
1372 update_dentry_lease(dn, rinfo->dir_dlease[i],
1373 req->r_session,
1374 req->r_request_started);
d69ed05a
SW
1375next_item:
1376 if (dn)
1377 dput(dn);
355da1eb 1378 }
86b58d13
YZ
1379 if (err == 0)
1380 req->r_did_prepopulate = true;
355da1eb
SW
1381
1382out:
1383 if (snapdir) {
1384 iput(snapdir);
1385 dput(parent);
1386 }
1387 dout("readdir_prepopulate done\n");
1388 return err;
1389}
1390
1391int ceph_inode_set_size(struct inode *inode, loff_t size)
1392{
1393 struct ceph_inode_info *ci = ceph_inode(inode);
1394 int ret = 0;
1395
be655596 1396 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1397 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1398 inode->i_size = size;
1399 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1400
1401 /* tell the MDS if we are approaching max_size */
1402 if ((size << 1) >= ci->i_max_size &&
1403 (ci->i_reported_size << 1) < ci->i_max_size)
1404 ret = 1;
1405
be655596 1406 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1407 return ret;
1408}
1409
1410/*
1411 * Write back inode data in a worker thread. (This can't be done
1412 * in the message handler context.)
1413 */
3c6f6b79
SW
1414void ceph_queue_writeback(struct inode *inode)
1415{
15a2015f 1416 ihold(inode);
3c6f6b79
SW
1417 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1418 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1419 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79 1420 } else {
2c27c9a5 1421 dout("ceph_queue_writeback %p failed\n", inode);
15a2015f 1422 iput(inode);
3c6f6b79
SW
1423 }
1424}
1425
1426static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1427{
1428 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1429 i_wb_work);
1430 struct inode *inode = &ci->vfs_inode;
1431
1432 dout("writeback %p\n", inode);
1433 filemap_fdatawrite(&inode->i_data);
1434 iput(inode);
1435}
1436
3c6f6b79
SW
1437/*
1438 * queue an async invalidation
1439 */
1440void ceph_queue_invalidate(struct inode *inode)
1441{
15a2015f 1442 ihold(inode);
3c6f6b79
SW
1443 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1444 &ceph_inode(inode)->i_pg_inv_work)) {
1445 dout("ceph_queue_invalidate %p\n", inode);
3c6f6b79
SW
1446 } else {
1447 dout("ceph_queue_invalidate %p failed\n", inode);
15a2015f 1448 iput(inode);
3c6f6b79
SW
1449 }
1450}
1451
355da1eb
SW
1452/*
1453 * Invalidate inode pages in a worker thread. (This can't be done
1454 * in the message handler context.)
1455 */
3c6f6b79 1456static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1457{
1458 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1459 i_pg_inv_work);
1460 struct inode *inode = &ci->vfs_inode;
1461 u32 orig_gen;
1462 int check = 0;
1463
b0d7c223 1464 mutex_lock(&ci->i_truncate_mutex);
be655596 1465 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1466 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1467 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1468 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
9563f88c
YZ
1469 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1470 check = 1;
be655596 1471 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1472 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1473 goto out;
1474 }
1475 orig_gen = ci->i_rdcache_gen;
be655596 1476 spin_unlock(&ci->i_ceph_lock);
355da1eb 1477
b0d7c223 1478 truncate_inode_pages(inode->i_mapping, 0);
355da1eb 1479
be655596 1480 spin_lock(&ci->i_ceph_lock);
cd045cb4
SW
1481 if (orig_gen == ci->i_rdcache_gen &&
1482 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1483 dout("invalidate_pages %p gen %d successful\n", inode,
1484 ci->i_rdcache_gen);
cd045cb4 1485 ci->i_rdcache_revoking--;
355da1eb
SW
1486 check = 1;
1487 } else {
cd045cb4
SW
1488 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1489 inode, orig_gen, ci->i_rdcache_gen,
1490 ci->i_rdcache_revoking);
9563f88c
YZ
1491 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1492 check = 1;
355da1eb 1493 }
be655596 1494 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1495 mutex_unlock(&ci->i_truncate_mutex);
9563f88c 1496out:
355da1eb
SW
1497 if (check)
1498 ceph_check_caps(ci, 0, NULL);
355da1eb
SW
1499 iput(inode);
1500}
1501
1502
1503/*
3f99969f 1504 * called by trunc_wq;
355da1eb
SW
1505 *
1506 * We also truncate in a separate thread as well.
1507 */
3c6f6b79 1508static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1509{
1510 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1511 i_vmtruncate_work);
1512 struct inode *inode = &ci->vfs_inode;
1513
1514 dout("vmtruncate_work %p\n", inode);
b415bf4f 1515 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1516 iput(inode);
1517}
1518
3c6f6b79
SW
1519/*
1520 * Queue an async vmtruncate. If we fail to queue work, we will handle
1521 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1522 */
1523void ceph_queue_vmtruncate(struct inode *inode)
1524{
1525 struct ceph_inode_info *ci = ceph_inode(inode);
1526
15a2015f 1527 ihold(inode);
99ccbd22 1528
640ef79d 1529 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1530 &ci->i_vmtruncate_work)) {
1531 dout("ceph_queue_vmtruncate %p\n", inode);
3c6f6b79
SW
1532 } else {
1533 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1534 inode, ci->i_truncate_pending);
15a2015f 1535 iput(inode);
3c6f6b79
SW
1536 }
1537}
1538
355da1eb 1539/*
355da1eb
SW
1540 * Make sure any pending truncation is applied before doing anything
1541 * that may depend on it.
1542 */
b415bf4f 1543void __ceph_do_pending_vmtruncate(struct inode *inode)
355da1eb
SW
1544{
1545 struct ceph_inode_info *ci = ceph_inode(inode);
1546 u64 to;
a85f50b6 1547 int wrbuffer_refs, finish = 0;
355da1eb 1548
b0d7c223 1549 mutex_lock(&ci->i_truncate_mutex);
355da1eb 1550retry:
be655596 1551 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1552 if (ci->i_truncate_pending == 0) {
1553 dout("__do_pending_vmtruncate %p none pending\n", inode);
be655596 1554 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1555 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1556 return;
1557 }
1558
1559 /*
1560 * make sure any dirty snapped pages are flushed before we
1561 * possibly truncate them.. so write AND block!
1562 */
1563 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1564 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1565 inode);
be655596 1566 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1567 filemap_write_and_wait_range(&inode->i_data, 0,
1568 inode->i_sb->s_maxbytes);
1569 goto retry;
1570 }
1571
b0d7c223
YZ
1572 /* there should be no reader or writer */
1573 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1574
355da1eb
SW
1575 to = ci->i_truncate_size;
1576 wrbuffer_refs = ci->i_wrbuffer_ref;
1577 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1578 ci->i_truncate_pending, to);
be655596 1579 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1580
1581 truncate_inode_pages(inode->i_mapping, to);
1582
be655596 1583 spin_lock(&ci->i_ceph_lock);
a85f50b6
YZ
1584 if (to == ci->i_truncate_size) {
1585 ci->i_truncate_pending = 0;
1586 finish = 1;
1587 }
be655596 1588 spin_unlock(&ci->i_ceph_lock);
a85f50b6
YZ
1589 if (!finish)
1590 goto retry;
355da1eb 1591
b0d7c223
YZ
1592 mutex_unlock(&ci->i_truncate_mutex);
1593
355da1eb
SW
1594 if (wrbuffer_refs == 0)
1595 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
a85f50b6
YZ
1596
1597 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1598}
1599
355da1eb
SW
1600/*
1601 * symlinks
1602 */
1603static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1604{
1605 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1606 nd_set_link(nd, ci->i_symlink);
1607 return NULL;
1608}
1609
1610static const struct inode_operations ceph_symlink_iops = {
1611 .readlink = generic_readlink,
1612 .follow_link = ceph_sym_follow_link,
0b932672
YZ
1613 .setattr = ceph_setattr,
1614 .getattr = ceph_getattr,
1615 .setxattr = ceph_setxattr,
1616 .getxattr = ceph_getxattr,
1617 .listxattr = ceph_listxattr,
1618 .removexattr = ceph_removexattr,
7221fe4c 1619 .get_acl = ceph_get_acl,
72466d0b 1620 .set_acl = ceph_set_acl,
355da1eb
SW
1621};
1622
1623/*
1624 * setattr
1625 */
1626int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1627{
1628 struct inode *inode = dentry->d_inode;
1629 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1630 const unsigned int ia_valid = attr->ia_valid;
1631 struct ceph_mds_request *req;
3d14c5d2 1632 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
355da1eb
SW
1633 int issued;
1634 int release = 0, dirtied = 0;
1635 int mask = 0;
1636 int err = 0;
fca65b4a 1637 int inode_dirty_flags = 0;
355da1eb
SW
1638
1639 if (ceph_snap(inode) != CEPH_NOSNAP)
1640 return -EROFS;
1641
355da1eb
SW
1642 err = inode_change_ok(inode, attr);
1643 if (err != 0)
1644 return err;
1645
1646 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1647 USE_AUTH_MDS);
1648 if (IS_ERR(req))
1649 return PTR_ERR(req);
1650
be655596 1651 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1652 issued = __ceph_caps_issued(ci, NULL);
1653 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1654
1655 if (ia_valid & ATTR_UID) {
1656 dout("setattr %p uid %d -> %d\n", inode,
bd2bae6a
EB
1657 from_kuid(&init_user_ns, inode->i_uid),
1658 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1659 if (issued & CEPH_CAP_AUTH_EXCL) {
1660 inode->i_uid = attr->ia_uid;
1661 dirtied |= CEPH_CAP_AUTH_EXCL;
1662 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1663 !uid_eq(attr->ia_uid, inode->i_uid)) {
1664 req->r_args.setattr.uid = cpu_to_le32(
1665 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1666 mask |= CEPH_SETATTR_UID;
1667 release |= CEPH_CAP_AUTH_SHARED;
1668 }
1669 }
1670 if (ia_valid & ATTR_GID) {
1671 dout("setattr %p gid %d -> %d\n", inode,
bd2bae6a
EB
1672 from_kgid(&init_user_ns, inode->i_gid),
1673 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1674 if (issued & CEPH_CAP_AUTH_EXCL) {
1675 inode->i_gid = attr->ia_gid;
1676 dirtied |= CEPH_CAP_AUTH_EXCL;
1677 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1678 !gid_eq(attr->ia_gid, inode->i_gid)) {
1679 req->r_args.setattr.gid = cpu_to_le32(
1680 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1681 mask |= CEPH_SETATTR_GID;
1682 release |= CEPH_CAP_AUTH_SHARED;
1683 }
1684 }
1685 if (ia_valid & ATTR_MODE) {
1686 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1687 attr->ia_mode);
1688 if (issued & CEPH_CAP_AUTH_EXCL) {
1689 inode->i_mode = attr->ia_mode;
1690 dirtied |= CEPH_CAP_AUTH_EXCL;
1691 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1692 attr->ia_mode != inode->i_mode) {
7221fe4c 1693 inode->i_mode = attr->ia_mode;
355da1eb
SW
1694 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1695 mask |= CEPH_SETATTR_MODE;
1696 release |= CEPH_CAP_AUTH_SHARED;
1697 }
1698 }
1699
1700 if (ia_valid & ATTR_ATIME) {
1701 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1702 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1703 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1704 if (issued & CEPH_CAP_FILE_EXCL) {
1705 ci->i_time_warp_seq++;
1706 inode->i_atime = attr->ia_atime;
1707 dirtied |= CEPH_CAP_FILE_EXCL;
1708 } else if ((issued & CEPH_CAP_FILE_WR) &&
1709 timespec_compare(&inode->i_atime,
1710 &attr->ia_atime) < 0) {
1711 inode->i_atime = attr->ia_atime;
1712 dirtied |= CEPH_CAP_FILE_WR;
1713 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1714 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1715 ceph_encode_timespec(&req->r_args.setattr.atime,
1716 &attr->ia_atime);
1717 mask |= CEPH_SETATTR_ATIME;
1718 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1719 CEPH_CAP_FILE_WR;
1720 }
1721 }
1722 if (ia_valid & ATTR_MTIME) {
1723 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1724 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1725 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1726 if (issued & CEPH_CAP_FILE_EXCL) {
1727 ci->i_time_warp_seq++;
1728 inode->i_mtime = attr->ia_mtime;
1729 dirtied |= CEPH_CAP_FILE_EXCL;
1730 } else if ((issued & CEPH_CAP_FILE_WR) &&
1731 timespec_compare(&inode->i_mtime,
1732 &attr->ia_mtime) < 0) {
1733 inode->i_mtime = attr->ia_mtime;
1734 dirtied |= CEPH_CAP_FILE_WR;
1735 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1736 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1737 ceph_encode_timespec(&req->r_args.setattr.mtime,
1738 &attr->ia_mtime);
1739 mask |= CEPH_SETATTR_MTIME;
1740 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1741 CEPH_CAP_FILE_WR;
1742 }
1743 }
1744 if (ia_valid & ATTR_SIZE) {
1745 dout("setattr %p size %lld -> %lld\n", inode,
1746 inode->i_size, attr->ia_size);
1747 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1748 err = -EINVAL;
1749 goto out;
1750 }
1751 if ((issued & CEPH_CAP_FILE_EXCL) &&
1752 attr->ia_size > inode->i_size) {
1753 inode->i_size = attr->ia_size;
355da1eb
SW
1754 inode->i_blocks =
1755 (attr->ia_size + (1 << 9) - 1) >> 9;
1756 inode->i_ctime = attr->ia_ctime;
1757 ci->i_reported_size = attr->ia_size;
1758 dirtied |= CEPH_CAP_FILE_EXCL;
1759 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1760 attr->ia_size != inode->i_size) {
1761 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1762 req->r_args.setattr.old_size =
1763 cpu_to_le64(inode->i_size);
1764 mask |= CEPH_SETATTR_SIZE;
1765 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1766 CEPH_CAP_FILE_WR;
1767 }
1768 }
1769
1770 /* these do nothing */
1771 if (ia_valid & ATTR_CTIME) {
1772 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1773 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1774 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1775 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1776 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1777 only ? "ctime only" : "ignored");
1778 inode->i_ctime = attr->ia_ctime;
1779 if (only) {
1780 /*
1781 * if kernel wants to dirty ctime but nothing else,
1782 * we need to choose a cap to dirty under, or do
1783 * a almost-no-op setattr
1784 */
1785 if (issued & CEPH_CAP_AUTH_EXCL)
1786 dirtied |= CEPH_CAP_AUTH_EXCL;
1787 else if (issued & CEPH_CAP_FILE_EXCL)
1788 dirtied |= CEPH_CAP_FILE_EXCL;
1789 else if (issued & CEPH_CAP_XATTR_EXCL)
1790 dirtied |= CEPH_CAP_XATTR_EXCL;
1791 else
1792 mask |= CEPH_SETATTR_CTIME;
1793 }
1794 }
1795 if (ia_valid & ATTR_FILE)
1796 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1797
1798 if (dirtied) {
fca65b4a 1799 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
355da1eb
SW
1800 inode->i_ctime = CURRENT_TIME;
1801 }
1802
1803 release &= issued;
be655596 1804 spin_unlock(&ci->i_ceph_lock);
355da1eb 1805
fca65b4a
SW
1806 if (inode_dirty_flags)
1807 __mark_inode_dirty(inode, inode_dirty_flags);
1808
7221fe4c 1809 if (ia_valid & ATTR_MODE) {
4db658ea 1810 err = posix_acl_chmod(inode, attr->ia_mode);
7221fe4c
GZ
1811 if (err)
1812 goto out_put;
1813 }
1814
355da1eb 1815 if (mask) {
70b666c3
SW
1816 req->r_inode = inode;
1817 ihold(inode);
355da1eb
SW
1818 req->r_inode_drop = release;
1819 req->r_args.setattr.mask = cpu_to_le32(mask);
1820 req->r_num_caps = 1;
752c8bdc 1821 err = ceph_mdsc_do_request(mdsc, NULL, req);
355da1eb
SW
1822 }
1823 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1824 ceph_cap_string(dirtied), mask);
1825
1826 ceph_mdsc_put_request(req);
b0d7c223
YZ
1827 if (mask & CEPH_SETATTR_SIZE)
1828 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1829 return err;
1830out:
be655596 1831 spin_unlock(&ci->i_ceph_lock);
7221fe4c 1832out_put:
355da1eb
SW
1833 ceph_mdsc_put_request(req);
1834 return err;
1835}
1836
1837/*
1838 * Verify that we have a lease on the given mask. If not,
1839 * do a getattr against an mds.
1840 */
1841int ceph_do_getattr(struct inode *inode, int mask)
1842{
3d14c5d2
YS
1843 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1844 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
1845 struct ceph_mds_request *req;
1846 int err;
1847
1848 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1849 dout("do_getattr inode %p SNAPDIR\n", inode);
1850 return 0;
1851 }
1852
b7495fc2 1853 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
355da1eb
SW
1854 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1855 return 0;
1856
1857 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1858 if (IS_ERR(req))
1859 return PTR_ERR(req);
70b666c3
SW
1860 req->r_inode = inode;
1861 ihold(inode);
355da1eb
SW
1862 req->r_num_caps = 1;
1863 req->r_args.getattr.mask = cpu_to_le32(mask);
1864 err = ceph_mdsc_do_request(mdsc, NULL, req);
1865 ceph_mdsc_put_request(req);
1866 dout("do_getattr result=%d\n", err);
1867 return err;
1868}
1869
1870
1871/*
1872 * Check inode permissions. We verify we have a valid value for
1873 * the AUTH cap, then call the generic handler.
1874 */
10556cb2 1875int ceph_permission(struct inode *inode, int mask)
355da1eb 1876{
b74c79e9
NP
1877 int err;
1878
10556cb2 1879 if (mask & MAY_NOT_BLOCK)
b74c79e9
NP
1880 return -ECHILD;
1881
1882 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
355da1eb
SW
1883
1884 if (!err)
2830ba7f 1885 err = generic_permission(inode, mask);
355da1eb
SW
1886 return err;
1887}
1888
1889/*
1890 * Get all attributes. Hopefully somedata we'll have a statlite()
1891 * and can limit the fields we require to be accurate.
1892 */
1893int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1894 struct kstat *stat)
1895{
1896 struct inode *inode = dentry->d_inode;
232d4b01 1897 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1898 int err;
1899
1900 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1901 if (!err) {
1902 generic_fillattr(inode, stat);
ad1fee96 1903 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
1904 if (ceph_snap(inode) != CEPH_NOSNAP)
1905 stat->dev = ceph_snap(inode);
1906 else
1907 stat->dev = 0;
232d4b01 1908 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
1909 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1910 RBYTES))
1911 stat->size = ci->i_rbytes;
1912 else
1913 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 1914 stat->blocks = 0;
355da1eb 1915 stat->blksize = 65536;
232d4b01 1916 }
355da1eb
SW
1917 }
1918 return err;
1919}