]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/ceph/caps.c
rbd: fix a couple warnings
[mirror_ubuntu-artful-kernel.git] / fs / ceph / caps.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
a8599bd8
SW
2
3#include <linux/fs.h>
4#include <linux/kernel.h>
5#include <linux/sched.h>
5a0e3ad6 6#include <linux/slab.h>
a8599bd8
SW
7#include <linux/vmalloc.h>
8#include <linux/wait.h>
f1a3d572 9#include <linux/writeback.h>
a8599bd8
SW
10
11#include "super.h"
3d14c5d2
YS
12#include "mds_client.h"
13#include <linux/ceph/decode.h>
14#include <linux/ceph/messenger.h>
a8599bd8
SW
15
16/*
17 * Capability management
18 *
19 * The Ceph metadata servers control client access to inode metadata
20 * and file data by issuing capabilities, granting clients permission
21 * to read and/or write both inode field and file data to OSDs
22 * (storage nodes). Each capability consists of a set of bits
23 * indicating which operations are allowed.
24 *
25 * If the client holds a *_SHARED cap, the client has a coherent value
26 * that can be safely read from the cached inode.
27 *
28 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
29 * client is allowed to change inode attributes (e.g., file size,
30 * mtime), note its dirty state in the ceph_cap, and asynchronously
31 * flush that metadata change to the MDS.
32 *
33 * In the event of a conflicting operation (perhaps by another
34 * client), the MDS will revoke the conflicting client capabilities.
35 *
36 * In order for a client to cache an inode, it must hold a capability
37 * with at least one MDS server. When inodes are released, release
38 * notifications are batched and periodically sent en masse to the MDS
39 * cluster to release server state.
40 */
41
42
43/*
44 * Generate readable cap strings for debugging output.
45 */
46#define MAX_CAP_STR 20
47static char cap_str[MAX_CAP_STR][40];
48static DEFINE_SPINLOCK(cap_str_lock);
49static int last_cap_str;
50
51static char *gcap_string(char *s, int c)
52{
53 if (c & CEPH_CAP_GSHARED)
54 *s++ = 's';
55 if (c & CEPH_CAP_GEXCL)
56 *s++ = 'x';
57 if (c & CEPH_CAP_GCACHE)
58 *s++ = 'c';
59 if (c & CEPH_CAP_GRD)
60 *s++ = 'r';
61 if (c & CEPH_CAP_GWR)
62 *s++ = 'w';
63 if (c & CEPH_CAP_GBUFFER)
64 *s++ = 'b';
65 if (c & CEPH_CAP_GLAZYIO)
66 *s++ = 'l';
67 return s;
68}
69
70const char *ceph_cap_string(int caps)
71{
72 int i;
73 char *s;
74 int c;
75
76 spin_lock(&cap_str_lock);
77 i = last_cap_str++;
78 if (last_cap_str == MAX_CAP_STR)
79 last_cap_str = 0;
80 spin_unlock(&cap_str_lock);
81
82 s = cap_str[i];
83
84 if (caps & CEPH_CAP_PIN)
85 *s++ = 'p';
86
87 c = (caps >> CEPH_CAP_SAUTH) & 3;
88 if (c) {
89 *s++ = 'A';
90 s = gcap_string(s, c);
91 }
92
93 c = (caps >> CEPH_CAP_SLINK) & 3;
94 if (c) {
95 *s++ = 'L';
96 s = gcap_string(s, c);
97 }
98
99 c = (caps >> CEPH_CAP_SXATTR) & 3;
100 if (c) {
101 *s++ = 'X';
102 s = gcap_string(s, c);
103 }
104
105 c = caps >> CEPH_CAP_SFILE;
106 if (c) {
107 *s++ = 'F';
108 s = gcap_string(s, c);
109 }
110
111 if (s == cap_str[i])
112 *s++ = '-';
113 *s = 0;
114 return cap_str[i];
115}
116
37151668 117void ceph_caps_init(struct ceph_mds_client *mdsc)
a8599bd8 118{
37151668
YS
119 INIT_LIST_HEAD(&mdsc->caps_list);
120 spin_lock_init(&mdsc->caps_list_lock);
a8599bd8
SW
121}
122
37151668 123void ceph_caps_finalize(struct ceph_mds_client *mdsc)
a8599bd8
SW
124{
125 struct ceph_cap *cap;
126
37151668
YS
127 spin_lock(&mdsc->caps_list_lock);
128 while (!list_empty(&mdsc->caps_list)) {
129 cap = list_first_entry(&mdsc->caps_list,
130 struct ceph_cap, caps_item);
a8599bd8
SW
131 list_del(&cap->caps_item);
132 kmem_cache_free(ceph_cap_cachep, cap);
133 }
37151668
YS
134 mdsc->caps_total_count = 0;
135 mdsc->caps_avail_count = 0;
136 mdsc->caps_use_count = 0;
137 mdsc->caps_reserve_count = 0;
138 mdsc->caps_min_count = 0;
139 spin_unlock(&mdsc->caps_list_lock);
85ccce43
SW
140}
141
37151668 142void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
85ccce43 143{
37151668
YS
144 spin_lock(&mdsc->caps_list_lock);
145 mdsc->caps_min_count += delta;
146 BUG_ON(mdsc->caps_min_count < 0);
147 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
148}
149
37151668
YS
150int ceph_reserve_caps(struct ceph_mds_client *mdsc,
151 struct ceph_cap_reservation *ctx, int need)
a8599bd8
SW
152{
153 int i;
154 struct ceph_cap *cap;
155 int have;
156 int alloc = 0;
157 LIST_HEAD(newcaps);
158 int ret = 0;
159
160 dout("reserve caps ctx=%p need=%d\n", ctx, need);
161
162 /* first reserve any caps that are already allocated */
37151668
YS
163 spin_lock(&mdsc->caps_list_lock);
164 if (mdsc->caps_avail_count >= need)
a8599bd8
SW
165 have = need;
166 else
37151668
YS
167 have = mdsc->caps_avail_count;
168 mdsc->caps_avail_count -= have;
169 mdsc->caps_reserve_count += have;
170 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
171 mdsc->caps_reserve_count +
172 mdsc->caps_avail_count);
173 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
174
175 for (i = have; i < need; i++) {
176 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
177 if (!cap) {
178 ret = -ENOMEM;
179 goto out_alloc_count;
180 }
181 list_add(&cap->caps_item, &newcaps);
182 alloc++;
183 }
184 BUG_ON(have + alloc != need);
185
37151668
YS
186 spin_lock(&mdsc->caps_list_lock);
187 mdsc->caps_total_count += alloc;
188 mdsc->caps_reserve_count += alloc;
189 list_splice(&newcaps, &mdsc->caps_list);
a8599bd8 190
37151668
YS
191 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
192 mdsc->caps_reserve_count +
193 mdsc->caps_avail_count);
194 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
195
196 ctx->count = need;
197 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
37151668
YS
198 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
199 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8
SW
200 return 0;
201
202out_alloc_count:
203 /* we didn't manage to reserve as much as we needed */
204 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
205 ctx, need, have);
206 return ret;
207}
208
37151668
YS
209int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
210 struct ceph_cap_reservation *ctx)
a8599bd8
SW
211{
212 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
213 if (ctx->count) {
37151668
YS
214 spin_lock(&mdsc->caps_list_lock);
215 BUG_ON(mdsc->caps_reserve_count < ctx->count);
216 mdsc->caps_reserve_count -= ctx->count;
217 mdsc->caps_avail_count += ctx->count;
a8599bd8
SW
218 ctx->count = 0;
219 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
37151668
YS
220 mdsc->caps_total_count, mdsc->caps_use_count,
221 mdsc->caps_reserve_count, mdsc->caps_avail_count);
222 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
223 mdsc->caps_reserve_count +
224 mdsc->caps_avail_count);
225 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
226 }
227 return 0;
228}
229
37151668
YS
230static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
231 struct ceph_cap_reservation *ctx)
a8599bd8
SW
232{
233 struct ceph_cap *cap = NULL;
234
235 /* temporary, until we do something about cap import/export */
443b3760
SW
236 if (!ctx) {
237 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
238 if (cap) {
4d1d0534 239 spin_lock(&mdsc->caps_list_lock);
37151668
YS
240 mdsc->caps_use_count++;
241 mdsc->caps_total_count++;
4d1d0534 242 spin_unlock(&mdsc->caps_list_lock);
443b3760
SW
243 }
244 return cap;
245 }
a8599bd8 246
37151668 247 spin_lock(&mdsc->caps_list_lock);
a8599bd8 248 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
37151668
YS
249 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
250 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8 251 BUG_ON(!ctx->count);
37151668
YS
252 BUG_ON(ctx->count > mdsc->caps_reserve_count);
253 BUG_ON(list_empty(&mdsc->caps_list));
a8599bd8
SW
254
255 ctx->count--;
37151668
YS
256 mdsc->caps_reserve_count--;
257 mdsc->caps_use_count++;
a8599bd8 258
37151668 259 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
a8599bd8
SW
260 list_del(&cap->caps_item);
261
37151668
YS
262 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
263 mdsc->caps_reserve_count + mdsc->caps_avail_count);
264 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
265 return cap;
266}
267
37151668 268void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
a8599bd8 269{
37151668 270 spin_lock(&mdsc->caps_list_lock);
7c1332b8 271 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
37151668
YS
272 cap, mdsc->caps_total_count, mdsc->caps_use_count,
273 mdsc->caps_reserve_count, mdsc->caps_avail_count);
274 mdsc->caps_use_count--;
a8599bd8 275 /*
85ccce43
SW
276 * Keep some preallocated caps around (ceph_min_count), to
277 * avoid lots of free/alloc churn.
a8599bd8 278 */
37151668
YS
279 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
280 mdsc->caps_min_count) {
281 mdsc->caps_total_count--;
a8599bd8
SW
282 kmem_cache_free(ceph_cap_cachep, cap);
283 } else {
37151668
YS
284 mdsc->caps_avail_count++;
285 list_add(&cap->caps_item, &mdsc->caps_list);
a8599bd8
SW
286 }
287
37151668
YS
288 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
289 mdsc->caps_reserve_count + mdsc->caps_avail_count);
290 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
291}
292
3d14c5d2 293void ceph_reservation_status(struct ceph_fs_client *fsc,
85ccce43
SW
294 int *total, int *avail, int *used, int *reserved,
295 int *min)
a8599bd8 296{
3d14c5d2 297 struct ceph_mds_client *mdsc = fsc->mdsc;
37151668 298
a8599bd8 299 if (total)
37151668 300 *total = mdsc->caps_total_count;
a8599bd8 301 if (avail)
37151668 302 *avail = mdsc->caps_avail_count;
a8599bd8 303 if (used)
37151668 304 *used = mdsc->caps_use_count;
a8599bd8 305 if (reserved)
37151668 306 *reserved = mdsc->caps_reserve_count;
85ccce43 307 if (min)
37151668 308 *min = mdsc->caps_min_count;
a8599bd8
SW
309}
310
311/*
312 * Find ceph_cap for given mds, if any.
313 *
be655596 314 * Called with i_ceph_lock held.
a8599bd8
SW
315 */
316static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317{
318 struct ceph_cap *cap;
319 struct rb_node *n = ci->i_caps.rb_node;
320
321 while (n) {
322 cap = rb_entry(n, struct ceph_cap, ci_node);
323 if (mds < cap->mds)
324 n = n->rb_left;
325 else if (mds > cap->mds)
326 n = n->rb_right;
327 else
328 return cap;
329 }
330 return NULL;
331}
332
2bc50259
GF
333struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
334{
335 struct ceph_cap *cap;
336
be655596 337 spin_lock(&ci->i_ceph_lock);
2bc50259 338 cap = __get_cap_for_mds(ci, mds);
be655596 339 spin_unlock(&ci->i_ceph_lock);
2bc50259
GF
340 return cap;
341}
342
a8599bd8 343/*
33caad32 344 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
a8599bd8 345 */
ca81f3f6 346static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
a8599bd8
SW
347{
348 struct ceph_cap *cap;
349 int mds = -1;
350 struct rb_node *p;
351
33caad32 352 /* prefer mds with WR|BUFFER|EXCL caps */
a8599bd8
SW
353 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
354 cap = rb_entry(p, struct ceph_cap, ci_node);
355 mds = cap->mds;
a8599bd8
SW
356 if (cap->issued & (CEPH_CAP_FILE_WR |
357 CEPH_CAP_FILE_BUFFER |
358 CEPH_CAP_FILE_EXCL))
359 break;
360 }
361 return mds;
362}
363
364int ceph_get_cap_mds(struct inode *inode)
365{
be655596 366 struct ceph_inode_info *ci = ceph_inode(inode);
a8599bd8 367 int mds;
be655596 368 spin_lock(&ci->i_ceph_lock);
ca81f3f6 369 mds = __ceph_get_cap_mds(ceph_inode(inode));
be655596 370 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
371 return mds;
372}
373
374/*
be655596 375 * Called under i_ceph_lock.
a8599bd8
SW
376 */
377static void __insert_cap_node(struct ceph_inode_info *ci,
378 struct ceph_cap *new)
379{
380 struct rb_node **p = &ci->i_caps.rb_node;
381 struct rb_node *parent = NULL;
382 struct ceph_cap *cap = NULL;
383
384 while (*p) {
385 parent = *p;
386 cap = rb_entry(parent, struct ceph_cap, ci_node);
387 if (new->mds < cap->mds)
388 p = &(*p)->rb_left;
389 else if (new->mds > cap->mds)
390 p = &(*p)->rb_right;
391 else
392 BUG();
393 }
394
395 rb_link_node(&new->ci_node, parent, p);
396 rb_insert_color(&new->ci_node, &ci->i_caps);
397}
398
399/*
400 * (re)set cap hold timeouts, which control the delayed release
401 * of unused caps back to the MDS. Should be called on cap use.
402 */
403static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
404 struct ceph_inode_info *ci)
405{
3d14c5d2 406 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
a8599bd8
SW
407
408 ci->i_hold_caps_min = round_jiffies(jiffies +
409 ma->caps_wanted_delay_min * HZ);
410 ci->i_hold_caps_max = round_jiffies(jiffies +
411 ma->caps_wanted_delay_max * HZ);
412 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
413 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
414}
415
416/*
417 * (Re)queue cap at the end of the delayed cap release list.
418 *
419 * If I_FLUSH is set, leave the inode at the front of the list.
420 *
be655596 421 * Caller holds i_ceph_lock
a8599bd8
SW
422 * -> we take mdsc->cap_delay_lock
423 */
424static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
425 struct ceph_inode_info *ci)
426{
427 __cap_set_timeouts(mdsc, ci);
428 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
429 ci->i_ceph_flags, ci->i_hold_caps_max);
430 if (!mdsc->stopping) {
431 spin_lock(&mdsc->cap_delay_lock);
432 if (!list_empty(&ci->i_cap_delay_list)) {
433 if (ci->i_ceph_flags & CEPH_I_FLUSH)
434 goto no_change;
435 list_del_init(&ci->i_cap_delay_list);
436 }
437 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
438no_change:
439 spin_unlock(&mdsc->cap_delay_lock);
440 }
441}
442
443/*
444 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
445 * indicating we should send a cap message to flush dirty metadata
446 * asap, and move to the front of the delayed cap list.
447 */
448static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
449 struct ceph_inode_info *ci)
450{
451 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
452 spin_lock(&mdsc->cap_delay_lock);
453 ci->i_ceph_flags |= CEPH_I_FLUSH;
454 if (!list_empty(&ci->i_cap_delay_list))
455 list_del_init(&ci->i_cap_delay_list);
456 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
457 spin_unlock(&mdsc->cap_delay_lock);
458}
459
460/*
461 * Cancel delayed work on cap.
462 *
be655596 463 * Caller must hold i_ceph_lock.
a8599bd8
SW
464 */
465static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
466 struct ceph_inode_info *ci)
467{
468 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
469 if (list_empty(&ci->i_cap_delay_list))
470 return;
471 spin_lock(&mdsc->cap_delay_lock);
472 list_del_init(&ci->i_cap_delay_list);
473 spin_unlock(&mdsc->cap_delay_lock);
474}
475
476/*
477 * Common issue checks for add_cap, handle_cap_grant.
478 */
479static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
480 unsigned issued)
481{
482 unsigned had = __ceph_caps_issued(ci, NULL);
483
484 /*
485 * Each time we receive FILE_CACHE anew, we increment
486 * i_rdcache_gen.
487 */
2962507c
SW
488 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
489 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
a8599bd8
SW
490 ci->i_rdcache_gen++;
491
492 /*
2f276c51 493 * if we are newly issued FILE_SHARED, mark dir not complete; we
a8599bd8
SW
494 * don't know what happened to this directory while we didn't
495 * have the cap.
496 */
497 if ((issued & CEPH_CAP_FILE_SHARED) &&
498 (had & CEPH_CAP_FILE_SHARED) == 0) {
499 ci->i_shared_gen++;
a8673d61
YZ
500 if (S_ISDIR(ci->vfs_inode.i_mode)) {
501 dout(" marking %p NOT complete\n", &ci->vfs_inode);
2f276c51 502 __ceph_dir_clear_complete(ci);
a8673d61 503 }
a8599bd8
SW
504 }
505}
506
507/*
508 * Add a capability under the given MDS session.
509 *
510 * Caller should hold session snap_rwsem (read) and s_mutex.
511 *
512 * @fmode is the open file mode, if we are opening a file, otherwise
513 * it is < 0. (This is so we can atomically add the cap and add an
514 * open file reference to it.)
515 */
516int ceph_add_cap(struct inode *inode,
517 struct ceph_mds_session *session, u64 cap_id,
518 int fmode, unsigned issued, unsigned wanted,
519 unsigned seq, unsigned mseq, u64 realmino, int flags,
520 struct ceph_cap_reservation *caps_reservation)
521{
3d14c5d2 522 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8
SW
523 struct ceph_inode_info *ci = ceph_inode(inode);
524 struct ceph_cap *new_cap = NULL;
525 struct ceph_cap *cap;
526 int mds = session->s_mds;
527 int actual_wanted;
528
529 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
530 session->s_mds, cap_id, ceph_cap_string(issued), seq);
531
532 /*
533 * If we are opening the file, include file mode wanted bits
534 * in wanted.
535 */
536 if (fmode >= 0)
537 wanted |= ceph_caps_for_mode(fmode);
538
539retry:
be655596 540 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
541 cap = __get_cap_for_mds(ci, mds);
542 if (!cap) {
543 if (new_cap) {
544 cap = new_cap;
545 new_cap = NULL;
546 } else {
be655596 547 spin_unlock(&ci->i_ceph_lock);
37151668 548 new_cap = get_cap(mdsc, caps_reservation);
a8599bd8
SW
549 if (new_cap == NULL)
550 return -ENOMEM;
551 goto retry;
552 }
553
554 cap->issued = 0;
555 cap->implemented = 0;
556 cap->mds = mds;
557 cap->mds_wanted = 0;
964266cc 558 cap->mseq = 0;
a8599bd8
SW
559
560 cap->ci = ci;
561 __insert_cap_node(ci, cap);
562
563 /* clear out old exporting info? (i.e. on cap import) */
564 if (ci->i_cap_exporting_mds == mds) {
565 ci->i_cap_exporting_issued = 0;
566 ci->i_cap_exporting_mseq = 0;
567 ci->i_cap_exporting_mds = -1;
568 }
569
570 /* add to session cap list */
571 cap->session = session;
572 spin_lock(&session->s_cap_lock);
573 list_add_tail(&cap->session_caps, &session->s_caps);
574 session->s_nr_caps++;
575 spin_unlock(&session->s_cap_lock);
3540303f
SW
576 } else if (new_cap)
577 ceph_put_cap(mdsc, new_cap);
a8599bd8
SW
578
579 if (!ci->i_snap_realm) {
580 /*
581 * add this inode to the appropriate snap realm
582 */
583 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
584 realmino);
585 if (realm) {
586 ceph_get_snap_realm(mdsc, realm);
587 spin_lock(&realm->inodes_with_caps_lock);
588 ci->i_snap_realm = realm;
589 list_add(&ci->i_snap_realm_item,
590 &realm->inodes_with_caps);
591 spin_unlock(&realm->inodes_with_caps_lock);
592 } else {
593 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
594 realmino);
b8cd07e7 595 WARN_ON(!realm);
a8599bd8
SW
596 }
597 }
598
599 __check_cap_issue(ci, cap, issued);
600
601 /*
602 * If we are issued caps we don't want, or the mds' wanted
603 * value appears to be off, queue a check so we'll release
604 * later and/or update the mds wanted value.
605 */
606 actual_wanted = __ceph_caps_wanted(ci);
607 if ((wanted & ~actual_wanted) ||
608 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
609 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
610 ceph_cap_string(issued), ceph_cap_string(wanted),
611 ceph_cap_string(actual_wanted));
612 __cap_delay_requeue(mdsc, ci);
613 }
614
b8c2f3ae
YZ
615 if (flags & CEPH_CAP_FLAG_AUTH) {
616 if (ci->i_auth_cap == NULL ||
617 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0)
618 ci->i_auth_cap = cap;
619 } else if (ci->i_auth_cap == cap) {
a8599bd8 620 ci->i_auth_cap = NULL;
8a92a119
YZ
621 spin_lock(&mdsc->cap_dirty_lock);
622 if (!list_empty(&ci->i_dirty_item)) {
623 dout(" moving %p to cap_dirty_migrating\n", inode);
624 list_move(&ci->i_dirty_item,
625 &mdsc->cap_dirty_migrating);
626 }
627 spin_unlock(&mdsc->cap_dirty_lock);
628 }
a8599bd8
SW
629
630 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
631 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
632 ceph_cap_string(issued|cap->issued), seq, mds);
633 cap->cap_id = cap_id;
634 cap->issued = issued;
635 cap->implemented |= issued;
964266cc
YZ
636 if (mseq > cap->mseq)
637 cap->mds_wanted = wanted;
638 else
639 cap->mds_wanted |= wanted;
a8599bd8
SW
640 cap->seq = seq;
641 cap->issue_seq = seq;
642 cap->mseq = mseq;
685f9a5d 643 cap->cap_gen = session->s_cap_gen;
a8599bd8
SW
644
645 if (fmode >= 0)
646 __ceph_get_fmode(ci, fmode);
be655596 647 spin_unlock(&ci->i_ceph_lock);
03066f23 648 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
649 return 0;
650}
651
652/*
653 * Return true if cap has not timed out and belongs to the current
654 * generation of the MDS session (i.e. has not gone 'stale' due to
655 * us losing touch with the mds).
656 */
657static int __cap_is_valid(struct ceph_cap *cap)
658{
659 unsigned long ttl;
cdac8303 660 u32 gen;
a8599bd8 661
d8fb02ab 662 spin_lock(&cap->session->s_gen_ttl_lock);
a8599bd8
SW
663 gen = cap->session->s_cap_gen;
664 ttl = cap->session->s_cap_ttl;
d8fb02ab 665 spin_unlock(&cap->session->s_gen_ttl_lock);
a8599bd8 666
685f9a5d 667 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
a8599bd8
SW
668 dout("__cap_is_valid %p cap %p issued %s "
669 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
685f9a5d 670 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
a8599bd8
SW
671 return 0;
672 }
673
674 return 1;
675}
676
677/*
678 * Return set of valid cap bits issued to us. Note that caps time
679 * out, and may be invalidated in bulk if the client session times out
680 * and session->s_cap_gen is bumped.
681 */
682int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
683{
7af8f1e4 684 int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
a8599bd8
SW
685 struct ceph_cap *cap;
686 struct rb_node *p;
687
688 if (implemented)
689 *implemented = 0;
690 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
691 cap = rb_entry(p, struct ceph_cap, ci_node);
692 if (!__cap_is_valid(cap))
693 continue;
694 dout("__ceph_caps_issued %p cap %p issued %s\n",
695 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
696 have |= cap->issued;
697 if (implemented)
698 *implemented |= cap->implemented;
699 }
700 return have;
701}
702
703/*
704 * Get cap bits issued by caps other than @ocap
705 */
706int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
707{
708 int have = ci->i_snap_caps;
709 struct ceph_cap *cap;
710 struct rb_node *p;
711
712 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
713 cap = rb_entry(p, struct ceph_cap, ci_node);
714 if (cap == ocap)
715 continue;
716 if (!__cap_is_valid(cap))
717 continue;
718 have |= cap->issued;
719 }
720 return have;
721}
722
723/*
724 * Move a cap to the end of the LRU (oldest caps at list head, newest
725 * at list tail).
726 */
727static void __touch_cap(struct ceph_cap *cap)
728{
729 struct ceph_mds_session *s = cap->session;
730
a8599bd8 731 spin_lock(&s->s_cap_lock);
7c1332b8 732 if (s->s_cap_iterator == NULL) {
5dacf091
SW
733 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
734 s->s_mds);
735 list_move_tail(&cap->session_caps, &s->s_caps);
736 } else {
737 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
738 &cap->ci->vfs_inode, cap, s->s_mds);
739 }
a8599bd8
SW
740 spin_unlock(&s->s_cap_lock);
741}
742
743/*
744 * Check if we hold the given mask. If so, move the cap(s) to the
745 * front of their respective LRUs. (This is the preferred way for
746 * callers to check for caps they want.)
747 */
748int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
749{
750 struct ceph_cap *cap;
751 struct rb_node *p;
752 int have = ci->i_snap_caps;
753
754 if ((have & mask) == mask) {
755 dout("__ceph_caps_issued_mask %p snap issued %s"
756 " (mask %s)\n", &ci->vfs_inode,
757 ceph_cap_string(have),
758 ceph_cap_string(mask));
759 return 1;
760 }
761
762 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
763 cap = rb_entry(p, struct ceph_cap, ci_node);
764 if (!__cap_is_valid(cap))
765 continue;
766 if ((cap->issued & mask) == mask) {
767 dout("__ceph_caps_issued_mask %p cap %p issued %s"
768 " (mask %s)\n", &ci->vfs_inode, cap,
769 ceph_cap_string(cap->issued),
770 ceph_cap_string(mask));
771 if (touch)
772 __touch_cap(cap);
773 return 1;
774 }
775
776 /* does a combination of caps satisfy mask? */
777 have |= cap->issued;
778 if ((have & mask) == mask) {
779 dout("__ceph_caps_issued_mask %p combo issued %s"
780 " (mask %s)\n", &ci->vfs_inode,
781 ceph_cap_string(cap->issued),
782 ceph_cap_string(mask));
783 if (touch) {
784 struct rb_node *q;
785
25985edc 786 /* touch this + preceding caps */
a8599bd8
SW
787 __touch_cap(cap);
788 for (q = rb_first(&ci->i_caps); q != p;
789 q = rb_next(q)) {
790 cap = rb_entry(q, struct ceph_cap,
791 ci_node);
792 if (!__cap_is_valid(cap))
793 continue;
794 __touch_cap(cap);
795 }
796 }
797 return 1;
798 }
799 }
800
801 return 0;
802}
803
804/*
805 * Return true if mask caps are currently being revoked by an MDS.
806 */
807int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
808{
809 struct inode *inode = &ci->vfs_inode;
810 struct ceph_cap *cap;
811 struct rb_node *p;
812 int ret = 0;
813
be655596 814 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
815 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
816 cap = rb_entry(p, struct ceph_cap, ci_node);
817 if (__cap_is_valid(cap) &&
818 (cap->implemented & ~cap->issued & mask)) {
819 ret = 1;
820 break;
821 }
822 }
be655596 823 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
824 dout("ceph_caps_revoking %p %s = %d\n", inode,
825 ceph_cap_string(mask), ret);
826 return ret;
827}
828
829int __ceph_caps_used(struct ceph_inode_info *ci)
830{
831 int used = 0;
832 if (ci->i_pin_ref)
833 used |= CEPH_CAP_PIN;
834 if (ci->i_rd_ref)
835 used |= CEPH_CAP_FILE_RD;
a43fb731 836 if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
a8599bd8
SW
837 used |= CEPH_CAP_FILE_CACHE;
838 if (ci->i_wr_ref)
839 used |= CEPH_CAP_FILE_WR;
d3d0720d 840 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
a8599bd8
SW
841 used |= CEPH_CAP_FILE_BUFFER;
842 return used;
843}
844
845/*
846 * wanted, by virtue of open file modes
847 */
848int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
849{
850 int want = 0;
851 int mode;
33caad32 852 for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
a8599bd8
SW
853 if (ci->i_nr_by_mode[mode])
854 want |= ceph_caps_for_mode(mode);
855 return want;
856}
857
858/*
859 * Return caps we have registered with the MDS(s) as 'wanted'.
860 */
861int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
862{
863 struct ceph_cap *cap;
864 struct rb_node *p;
865 int mds_wanted = 0;
866
867 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
868 cap = rb_entry(p, struct ceph_cap, ci_node);
869 if (!__cap_is_valid(cap))
870 continue;
871 mds_wanted |= cap->mds_wanted;
872 }
873 return mds_wanted;
874}
875
876/*
be655596 877 * called under i_ceph_lock
a8599bd8
SW
878 */
879static int __ceph_is_any_caps(struct ceph_inode_info *ci)
880{
881 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
882}
883
884/*
f818a736
SW
885 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
886 *
be655596 887 * caller should hold i_ceph_lock.
a6369741 888 * caller will not hold session s_mutex if called from destroy_inode.
a8599bd8 889 */
7c1332b8 890void __ceph_remove_cap(struct ceph_cap *cap)
a8599bd8
SW
891{
892 struct ceph_mds_session *session = cap->session;
893 struct ceph_inode_info *ci = cap->ci;
640ef79d 894 struct ceph_mds_client *mdsc =
3d14c5d2 895 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
f818a736 896 int removed = 0;
a8599bd8
SW
897
898 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
899
7c1332b8
SW
900 /* remove from session list */
901 spin_lock(&session->s_cap_lock);
902 if (session->s_cap_iterator == cap) {
903 /* not yet, we are iterating over this very cap */
904 dout("__ceph_remove_cap delaying %p removal from session %p\n",
905 cap, cap->session);
906 } else {
907 list_del_init(&cap->session_caps);
908 session->s_nr_caps--;
909 cap->session = NULL;
f818a736 910 removed = 1;
7c1332b8 911 }
f818a736
SW
912 /* protect backpointer with s_cap_lock: see iterate_session_caps */
913 cap->ci = NULL;
7c1332b8
SW
914 spin_unlock(&session->s_cap_lock);
915
f818a736
SW
916 /* remove from inode list */
917 rb_erase(&cap->ci_node, &ci->i_caps);
918 if (ci->i_auth_cap == cap)
919 ci->i_auth_cap = NULL;
920
921 if (removed)
37151668 922 ceph_put_cap(mdsc, cap);
a8599bd8
SW
923
924 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
925 struct ceph_snap_realm *realm = ci->i_snap_realm;
926 spin_lock(&realm->inodes_with_caps_lock);
927 list_del_init(&ci->i_snap_realm_item);
928 ci->i_snap_realm_counter++;
929 ci->i_snap_realm = NULL;
930 spin_unlock(&realm->inodes_with_caps_lock);
931 ceph_put_snap_realm(mdsc, realm);
932 }
933 if (!__ceph_is_any_real_caps(ci))
934 __cap_delay_cancel(mdsc, ci);
935}
936
937/*
938 * Build and send a cap message to the given MDS.
939 *
940 * Caller should be holding s_mutex.
941 */
942static int send_cap_msg(struct ceph_mds_session *session,
943 u64 ino, u64 cid, int op,
944 int caps, int wanted, int dirty,
945 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
946 u64 size, u64 max_size,
947 struct timespec *mtime, struct timespec *atime,
948 u64 time_warp_seq,
05cb11c1 949 kuid_t uid, kgid_t gid, umode_t mode,
a8599bd8
SW
950 u64 xattr_version,
951 struct ceph_buffer *xattrs_buf,
952 u64 follows)
953{
954 struct ceph_mds_caps *fc;
955 struct ceph_msg *msg;
956
957 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
958 " seq %u/%u mseq %u follows %lld size %llu/%llu"
959 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
960 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
961 ceph_cap_string(dirty),
962 seq, issue_seq, mseq, follows, size, max_size,
963 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
964
b61c2763 965 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false);
a79832f2
SW
966 if (!msg)
967 return -ENOMEM;
a8599bd8 968
6df058c0 969 msg->hdr.tid = cpu_to_le64(flush_tid);
a8599bd8 970
6df058c0 971 fc = msg->front.iov_base;
a8599bd8
SW
972 memset(fc, 0, sizeof(*fc));
973
974 fc->cap_id = cpu_to_le64(cid);
975 fc->op = cpu_to_le32(op);
976 fc->seq = cpu_to_le32(seq);
a8599bd8
SW
977 fc->issue_seq = cpu_to_le32(issue_seq);
978 fc->migrate_seq = cpu_to_le32(mseq);
979 fc->caps = cpu_to_le32(caps);
980 fc->wanted = cpu_to_le32(wanted);
981 fc->dirty = cpu_to_le32(dirty);
982 fc->ino = cpu_to_le64(ino);
983 fc->snap_follows = cpu_to_le64(follows);
984
985 fc->size = cpu_to_le64(size);
986 fc->max_size = cpu_to_le64(max_size);
987 if (mtime)
988 ceph_encode_timespec(&fc->mtime, mtime);
989 if (atime)
990 ceph_encode_timespec(&fc->atime, atime);
991 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
992
05cb11c1
EB
993 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
994 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
a8599bd8
SW
995 fc->mode = cpu_to_le32(mode);
996
997 fc->xattr_version = cpu_to_le64(xattr_version);
998 if (xattrs_buf) {
999 msg->middle = ceph_buffer_get(xattrs_buf);
1000 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1001 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1002 }
1003
1004 ceph_con_send(&session->s_con, msg);
1005 return 0;
1006}
1007
d40ee0dc
YZ
1008void __queue_cap_release(struct ceph_mds_session *session,
1009 u64 ino, u64 cap_id, u32 migrate_seq,
1010 u32 issue_seq)
3d7ded4d
SW
1011{
1012 struct ceph_msg *msg;
1013 struct ceph_mds_cap_release *head;
1014 struct ceph_mds_cap_item *item;
1015
1016 spin_lock(&session->s_cap_lock);
1017 BUG_ON(!session->s_num_cap_releases);
1018 msg = list_first_entry(&session->s_cap_releases,
1019 struct ceph_msg, list_head);
1020
1021 dout(" adding %llx release to mds%d msg %p (%d left)\n",
1022 ino, session->s_mds, msg, session->s_num_cap_releases);
1023
1024 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1025 head = msg->front.iov_base;
b905a7f8 1026 le32_add_cpu(&head->num, 1);
3d7ded4d
SW
1027 item = msg->front.iov_base + msg->front.iov_len;
1028 item->ino = cpu_to_le64(ino);
1029 item->cap_id = cpu_to_le64(cap_id);
1030 item->migrate_seq = cpu_to_le32(migrate_seq);
1031 item->seq = cpu_to_le32(issue_seq);
1032
1033 session->s_num_cap_releases--;
1034
1035 msg->front.iov_len += sizeof(*item);
1036 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1037 dout(" release msg %p full\n", msg);
1038 list_move_tail(&msg->list_head, &session->s_cap_releases_done);
1039 } else {
1040 dout(" release msg %p at %d/%d (%d)\n", msg,
1041 (int)le32_to_cpu(head->num),
1042 (int)CEPH_CAPS_PER_RELEASE,
1043 (int)msg->front.iov_len);
1044 }
1045 spin_unlock(&session->s_cap_lock);
1046}
1047
a8599bd8 1048/*
a6369741 1049 * Queue cap releases when an inode is dropped from our cache. Since
be655596 1050 * inode is about to be destroyed, there is no need for i_ceph_lock.
a8599bd8
SW
1051 */
1052void ceph_queue_caps_release(struct inode *inode)
1053{
1054 struct ceph_inode_info *ci = ceph_inode(inode);
1055 struct rb_node *p;
1056
a8599bd8
SW
1057 p = rb_first(&ci->i_caps);
1058 while (p) {
1059 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1060 struct ceph_mds_session *session = cap->session;
a8599bd8 1061
3d7ded4d
SW
1062 __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1063 cap->mseq, cap->issue_seq);
a8599bd8 1064 p = rb_next(p);
7c1332b8 1065 __ceph_remove_cap(cap);
a8599bd8 1066 }
a8599bd8
SW
1067}
1068
1069/*
1070 * Send a cap msg on the given inode. Update our caps state, then
be655596 1071 * drop i_ceph_lock and send the message.
a8599bd8
SW
1072 *
1073 * Make note of max_size reported/requested from mds, revoked caps
1074 * that have now been implemented.
1075 *
1076 * Make half-hearted attempt ot to invalidate page cache if we are
1077 * dropping RDCACHE. Note that this will leave behind locked pages
1078 * that we'll then need to deal with elsewhere.
1079 *
1080 * Return non-zero if delayed release, or we experienced an error
1081 * such that the caller should requeue + retry later.
1082 *
be655596 1083 * called with i_ceph_lock, then drops it.
a8599bd8
SW
1084 * caller should hold snap_rwsem (read), s_mutex.
1085 */
1086static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1087 int op, int used, int want, int retain, int flushing,
1088 unsigned *pflush_tid)
be655596 1089 __releases(cap->ci->i_ceph_lock)
a8599bd8
SW
1090{
1091 struct ceph_inode_info *ci = cap->ci;
1092 struct inode *inode = &ci->vfs_inode;
1093 u64 cap_id = cap->cap_id;
68c28323 1094 int held, revoking, dropping, keep;
a8599bd8
SW
1095 u64 seq, issue_seq, mseq, time_warp_seq, follows;
1096 u64 size, max_size;
1097 struct timespec mtime, atime;
1098 int wake = 0;
5706b27d 1099 umode_t mode;
05cb11c1
EB
1100 kuid_t uid;
1101 kgid_t gid;
a8599bd8
SW
1102 struct ceph_mds_session *session;
1103 u64 xattr_version = 0;
082afec9 1104 struct ceph_buffer *xattr_blob = NULL;
a8599bd8
SW
1105 int delayed = 0;
1106 u64 flush_tid = 0;
1107 int i;
1108 int ret;
1109
68c28323
SW
1110 held = cap->issued | cap->implemented;
1111 revoking = cap->implemented & ~cap->issued;
1112 retain &= ~revoking;
1113 dropping = cap->issued & ~retain;
1114
a8599bd8
SW
1115 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1116 inode, cap, cap->session,
1117 ceph_cap_string(held), ceph_cap_string(held & retain),
1118 ceph_cap_string(revoking));
1119 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1120
1121 session = cap->session;
1122
1123 /* don't release wanted unless we've waited a bit. */
1124 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1125 time_before(jiffies, ci->i_hold_caps_min)) {
1126 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1127 ceph_cap_string(cap->issued),
1128 ceph_cap_string(cap->issued & retain),
1129 ceph_cap_string(cap->mds_wanted),
1130 ceph_cap_string(want));
1131 want |= cap->mds_wanted;
1132 retain |= cap->issued;
1133 delayed = 1;
1134 }
1135 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1136
1137 cap->issued &= retain; /* drop bits we don't want */
1138 if (cap->implemented & ~cap->issued) {
1139 /*
1140 * Wake up any waiters on wanted -> needed transition.
1141 * This is due to the weird transition from buffered
1142 * to sync IO... we need to flush dirty pages _before_
1143 * allowing sync writes to avoid reordering.
1144 */
1145 wake = 1;
1146 }
1147 cap->implemented &= cap->issued | used;
1148 cap->mds_wanted = want;
1149
1150 if (flushing) {
1151 /*
1152 * assign a tid for flush operations so we can avoid
1153 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1154 * clean type races. track latest tid for every bit
1155 * so we can handle flush AxFw, flush Fw, and have the
1156 * first ack clean Ax.
1157 */
1158 flush_tid = ++ci->i_cap_flush_last_tid;
1159 if (pflush_tid)
1160 *pflush_tid = flush_tid;
1161 dout(" cap_flush_tid %d\n", (int)flush_tid);
1162 for (i = 0; i < CEPH_CAP_BITS; i++)
1163 if (flushing & (1 << i))
1164 ci->i_cap_flush_tid[i] = flush_tid;
7d8cb26d
SW
1165
1166 follows = ci->i_head_snapc->seq;
1167 } else {
1168 follows = 0;
a8599bd8
SW
1169 }
1170
1171 keep = cap->implemented;
1172 seq = cap->seq;
1173 issue_seq = cap->issue_seq;
1174 mseq = cap->mseq;
1175 size = inode->i_size;
1176 ci->i_reported_size = size;
1177 max_size = ci->i_wanted_max_size;
1178 ci->i_requested_max_size = max_size;
1179 mtime = inode->i_mtime;
1180 atime = inode->i_atime;
1181 time_warp_seq = ci->i_time_warp_seq;
a8599bd8
SW
1182 uid = inode->i_uid;
1183 gid = inode->i_gid;
1184 mode = inode->i_mode;
1185
082afec9 1186 if (flushing & CEPH_CAP_XATTR_EXCL) {
a8599bd8 1187 __ceph_build_xattrs_blob(ci);
082afec9
SW
1188 xattr_blob = ci->i_xattrs.blob;
1189 xattr_version = ci->i_xattrs.version;
a8599bd8
SW
1190 }
1191
be655596 1192 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1193
a8599bd8
SW
1194 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1195 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1196 size, max_size, &mtime, &atime, time_warp_seq,
082afec9 1197 uid, gid, mode, xattr_version, xattr_blob,
a8599bd8
SW
1198 follows);
1199 if (ret < 0) {
1200 dout("error sending cap msg, must requeue %p\n", inode);
1201 delayed = 1;
1202 }
1203
1204 if (wake)
03066f23 1205 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
1206
1207 return delayed;
1208}
1209
1210/*
1211 * When a snapshot is taken, clients accumulate dirty metadata on
1212 * inodes with capabilities in ceph_cap_snaps to describe the file
1213 * state at the time the snapshot was taken. This must be flushed
1214 * asynchronously back to the MDS once sync writes complete and dirty
1215 * data is written out.
1216 *
e835124c
SW
1217 * Unless @again is true, skip cap_snaps that were already sent to
1218 * the MDS (i.e., during this session).
1219 *
be655596 1220 * Called under i_ceph_lock. Takes s_mutex as needed.
a8599bd8
SW
1221 */
1222void __ceph_flush_snaps(struct ceph_inode_info *ci,
e835124c
SW
1223 struct ceph_mds_session **psession,
1224 int again)
be655596
SW
1225 __releases(ci->i_ceph_lock)
1226 __acquires(ci->i_ceph_lock)
a8599bd8
SW
1227{
1228 struct inode *inode = &ci->vfs_inode;
1229 int mds;
1230 struct ceph_cap_snap *capsnap;
1231 u32 mseq;
3d14c5d2 1232 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8
SW
1233 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1234 session->s_mutex */
1235 u64 next_follows = 0; /* keep track of how far we've gotten through the
1236 i_cap_snaps list, and skip these entries next time
1237 around to avoid an infinite loop */
1238
1239 if (psession)
1240 session = *psession;
1241
1242 dout("__flush_snaps %p\n", inode);
1243retry:
1244 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1245 /* avoid an infiniute loop after retry */
1246 if (capsnap->follows < next_follows)
1247 continue;
1248 /*
1249 * we need to wait for sync writes to complete and for dirty
1250 * pages to be written out.
1251 */
1252 if (capsnap->dirty_pages || capsnap->writing)
cfc0bf66 1253 break;
a8599bd8 1254
819ccbfa
SW
1255 /*
1256 * if cap writeback already occurred, we should have dropped
1257 * the capsnap in ceph_put_wrbuffer_cap_refs.
1258 */
1259 BUG_ON(capsnap->dirty == 0);
1260
a8599bd8 1261 /* pick mds, take s_mutex */
ca81f3f6
SW
1262 if (ci->i_auth_cap == NULL) {
1263 dout("no auth cap (migrating?), doing nothing\n");
1264 goto out;
1265 }
e835124c
SW
1266
1267 /* only flush each capsnap once */
1268 if (!again && !list_empty(&capsnap->flushing_item)) {
1269 dout("already flushed %p, skipping\n", capsnap);
1270 continue;
1271 }
1272
ca81f3f6
SW
1273 mds = ci->i_auth_cap->session->s_mds;
1274 mseq = ci->i_auth_cap->mseq;
1275
a8599bd8
SW
1276 if (session && session->s_mds != mds) {
1277 dout("oops, wrong session %p mutex\n", session);
1278 mutex_unlock(&session->s_mutex);
1279 ceph_put_mds_session(session);
1280 session = NULL;
1281 }
1282 if (!session) {
be655596 1283 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1284 mutex_lock(&mdsc->mutex);
1285 session = __ceph_lookup_mds_session(mdsc, mds);
1286 mutex_unlock(&mdsc->mutex);
1287 if (session) {
1288 dout("inverting session/ino locks on %p\n",
1289 session);
1290 mutex_lock(&session->s_mutex);
1291 }
1292 /*
1293 * if session == NULL, we raced against a cap
ca81f3f6
SW
1294 * deletion or migration. retry, and we'll
1295 * get a better @mds value next time.
a8599bd8 1296 */
be655596 1297 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1298 goto retry;
1299 }
1300
1301 capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1302 atomic_inc(&capsnap->nref);
1303 if (!list_empty(&capsnap->flushing_item))
1304 list_del_init(&capsnap->flushing_item);
1305 list_add_tail(&capsnap->flushing_item,
1306 &session->s_cap_snaps_flushing);
be655596 1307 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1308
cfc0bf66
SW
1309 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1310 inode, capsnap, capsnap->follows, capsnap->flush_tid);
a8599bd8
SW
1311 send_cap_msg(session, ceph_vino(inode).ino, 0,
1312 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1313 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1314 capsnap->size, 0,
1315 &capsnap->mtime, &capsnap->atime,
1316 capsnap->time_warp_seq,
1317 capsnap->uid, capsnap->gid, capsnap->mode,
4a625be4 1318 capsnap->xattr_version, capsnap->xattr_blob,
a8599bd8
SW
1319 capsnap->follows);
1320
1321 next_follows = capsnap->follows + 1;
1322 ceph_put_cap_snap(capsnap);
1323
be655596 1324 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1325 goto retry;
1326 }
1327
1328 /* we flushed them all; remove this inode from the queue */
1329 spin_lock(&mdsc->snap_flush_lock);
1330 list_del_init(&ci->i_snap_flush_item);
1331 spin_unlock(&mdsc->snap_flush_lock);
1332
ca81f3f6 1333out:
a8599bd8
SW
1334 if (psession)
1335 *psession = session;
1336 else if (session) {
1337 mutex_unlock(&session->s_mutex);
1338 ceph_put_mds_session(session);
1339 }
1340}
1341
1342static void ceph_flush_snaps(struct ceph_inode_info *ci)
1343{
be655596 1344 spin_lock(&ci->i_ceph_lock);
e835124c 1345 __ceph_flush_snaps(ci, NULL, 0);
be655596 1346 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1347}
1348
76e3b390 1349/*
fca65b4a
SW
1350 * Mark caps dirty. If inode is newly dirty, return the dirty flags.
1351 * Caller is then responsible for calling __mark_inode_dirty with the
1352 * returned flags value.
76e3b390 1353 */
fca65b4a 1354int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
76e3b390 1355{
640ef79d 1356 struct ceph_mds_client *mdsc =
3d14c5d2 1357 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
76e3b390
SW
1358 struct inode *inode = &ci->vfs_inode;
1359 int was = ci->i_dirty_caps;
1360 int dirty = 0;
1361
1362 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1363 ceph_cap_string(mask), ceph_cap_string(was),
1364 ceph_cap_string(was | mask));
1365 ci->i_dirty_caps |= mask;
1366 if (was == 0) {
7d8cb26d
SW
1367 if (!ci->i_head_snapc)
1368 ci->i_head_snapc = ceph_get_snap_context(
1369 ci->i_snap_realm->cached_context);
0685235f
YZ
1370 dout(" inode %p now dirty snapc %p auth cap %p\n",
1371 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
76e3b390
SW
1372 BUG_ON(!list_empty(&ci->i_dirty_item));
1373 spin_lock(&mdsc->cap_dirty_lock);
0685235f
YZ
1374 if (ci->i_auth_cap)
1375 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1376 else
1377 list_add(&ci->i_dirty_item,
1378 &mdsc->cap_dirty_migrating);
76e3b390
SW
1379 spin_unlock(&mdsc->cap_dirty_lock);
1380 if (ci->i_flushing_caps == 0) {
3772d26d 1381 ihold(inode);
76e3b390
SW
1382 dirty |= I_DIRTY_SYNC;
1383 }
1384 }
1385 BUG_ON(list_empty(&ci->i_dirty_item));
1386 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1387 (mask & CEPH_CAP_FILE_BUFFER))
1388 dirty |= I_DIRTY_DATASYNC;
76e3b390 1389 __cap_delay_requeue(mdsc, ci);
fca65b4a 1390 return dirty;
76e3b390
SW
1391}
1392
a8599bd8
SW
1393/*
1394 * Add dirty inode to the flushing list. Assigned a seq number so we
1395 * can wait for caps to flush without starving.
cdc35f96 1396 *
be655596 1397 * Called under i_ceph_lock.
a8599bd8 1398 */
cdc35f96 1399static int __mark_caps_flushing(struct inode *inode,
a8599bd8
SW
1400 struct ceph_mds_session *session)
1401{
3d14c5d2 1402 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 1403 struct ceph_inode_info *ci = ceph_inode(inode);
cdc35f96 1404 int flushing;
50b885b9 1405
cdc35f96 1406 BUG_ON(ci->i_dirty_caps == 0);
a8599bd8 1407 BUG_ON(list_empty(&ci->i_dirty_item));
cdc35f96
SW
1408
1409 flushing = ci->i_dirty_caps;
1410 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1411 ceph_cap_string(flushing),
1412 ceph_cap_string(ci->i_flushing_caps),
1413 ceph_cap_string(ci->i_flushing_caps | flushing));
1414 ci->i_flushing_caps |= flushing;
1415 ci->i_dirty_caps = 0;
afcdaea3 1416 dout(" inode %p now !dirty\n", inode);
cdc35f96 1417
a8599bd8 1418 spin_lock(&mdsc->cap_dirty_lock);
afcdaea3
SW
1419 list_del_init(&ci->i_dirty_item);
1420
1421 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
a8599bd8
SW
1422 if (list_empty(&ci->i_flushing_item)) {
1423 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1424 mdsc->num_cap_flushing++;
afcdaea3
SW
1425 dout(" inode %p now flushing seq %lld\n", inode,
1426 ci->i_cap_flush_seq);
1427 } else {
1428 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1429 dout(" inode %p now flushing (more) seq %lld\n", inode,
a8599bd8
SW
1430 ci->i_cap_flush_seq);
1431 }
1432 spin_unlock(&mdsc->cap_dirty_lock);
cdc35f96
SW
1433
1434 return flushing;
a8599bd8
SW
1435}
1436
5ecad6fd
SW
1437/*
1438 * try to invalidate mapping pages without blocking.
1439 */
5ecad6fd
SW
1440static int try_nonblocking_invalidate(struct inode *inode)
1441{
1442 struct ceph_inode_info *ci = ceph_inode(inode);
1443 u32 invalidating_gen = ci->i_rdcache_gen;
1444
be655596 1445 spin_unlock(&ci->i_ceph_lock);
5ecad6fd 1446 invalidate_mapping_pages(&inode->i_data, 0, -1);
be655596 1447 spin_lock(&ci->i_ceph_lock);
5ecad6fd 1448
18a38193 1449 if (inode->i_data.nrpages == 0 &&
5ecad6fd
SW
1450 invalidating_gen == ci->i_rdcache_gen) {
1451 /* success. */
1452 dout("try_nonblocking_invalidate %p success\n", inode);
cd045cb4
SW
1453 /* save any racing async invalidate some trouble */
1454 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
5ecad6fd
SW
1455 return 0;
1456 }
1457 dout("try_nonblocking_invalidate %p failed\n", inode);
1458 return -1;
1459}
1460
a8599bd8
SW
1461/*
1462 * Swiss army knife function to examine currently used and wanted
1463 * versus held caps. Release, flush, ack revoked caps to mds as
1464 * appropriate.
1465 *
1466 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1467 * cap release further.
1468 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1469 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1470 * further delay.
1471 */
1472void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1473 struct ceph_mds_session *session)
1474{
3d14c5d2
YS
1475 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1476 struct ceph_mds_client *mdsc = fsc->mdsc;
a8599bd8
SW
1477 struct inode *inode = &ci->vfs_inode;
1478 struct ceph_cap *cap;
395c312b 1479 int file_wanted, used, cap_used;
a8599bd8 1480 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
cbd03635 1481 int issued, implemented, want, retain, revoking, flushing = 0;
a8599bd8
SW
1482 int mds = -1; /* keep track of how far we've gone through i_caps list
1483 to avoid an infinite loop on retry */
1484 struct rb_node *p;
1485 int tried_invalidate = 0;
1486 int delayed = 0, sent = 0, force_requeue = 0, num;
cbd03635 1487 int queue_invalidate = 0;
a8599bd8
SW
1488 int is_delayed = flags & CHECK_CAPS_NODELAY;
1489
1490 /* if we are unmounting, flush any unused caps immediately. */
1491 if (mdsc->stopping)
1492 is_delayed = 1;
1493
be655596 1494 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1495
1496 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1497 flags |= CHECK_CAPS_FLUSH;
1498
1499 /* flush snaps first time around only */
1500 if (!list_empty(&ci->i_cap_snaps))
e835124c 1501 __ceph_flush_snaps(ci, &session, 0);
a8599bd8
SW
1502 goto retry_locked;
1503retry:
be655596 1504 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1505retry_locked:
1506 file_wanted = __ceph_caps_file_wanted(ci);
1507 used = __ceph_caps_used(ci);
1508 want = file_wanted | used;
cbd03635
SW
1509 issued = __ceph_caps_issued(ci, &implemented);
1510 revoking = implemented & ~issued;
a8599bd8
SW
1511
1512 retain = want | CEPH_CAP_PIN;
1513 if (!mdsc->stopping && inode->i_nlink > 0) {
1514 if (want) {
1515 retain |= CEPH_CAP_ANY; /* be greedy */
1516 } else {
1517 retain |= CEPH_CAP_ANY_SHARED;
1518 /*
1519 * keep RD only if we didn't have the file open RW,
1520 * because then the mds would revoke it anyway to
1521 * journal max_size=0.
1522 */
1523 if (ci->i_max_size == 0)
1524 retain |= CEPH_CAP_ANY_RD;
1525 }
1526 }
1527
1528 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
cbd03635 1529 " issued %s revoking %s retain %s %s%s%s\n", inode,
a8599bd8
SW
1530 ceph_cap_string(file_wanted),
1531 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1532 ceph_cap_string(ci->i_flushing_caps),
cbd03635 1533 ceph_cap_string(issued), ceph_cap_string(revoking),
a8599bd8
SW
1534 ceph_cap_string(retain),
1535 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1536 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1537 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1538
1539 /*
1540 * If we no longer need to hold onto old our caps, and we may
1541 * have cached pages, but don't want them, then try to invalidate.
1542 * If we fail, it's because pages are locked.... try again later.
1543 */
1544 if ((!is_delayed || mdsc->stopping) &&
1545 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
93afd449 1546 inode->i_data.nrpages && /* have cached pages */
cbd03635 1547 (file_wanted == 0 || /* no open files */
2962507c
SW
1548 (revoking & (CEPH_CAP_FILE_CACHE|
1549 CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */
a8599bd8 1550 !tried_invalidate) {
a8599bd8 1551 dout("check_caps trying to invalidate on %p\n", inode);
5ecad6fd 1552 if (try_nonblocking_invalidate(inode) < 0) {
2962507c
SW
1553 if (revoking & (CEPH_CAP_FILE_CACHE|
1554 CEPH_CAP_FILE_LAZYIO)) {
5ecad6fd
SW
1555 dout("check_caps queuing invalidate\n");
1556 queue_invalidate = 1;
1557 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1558 } else {
1559 dout("check_caps failed to invalidate pages\n");
1560 /* we failed to invalidate pages. check these
1561 caps again later. */
1562 force_requeue = 1;
1563 __cap_set_timeouts(mdsc, ci);
1564 }
a8599bd8
SW
1565 }
1566 tried_invalidate = 1;
1567 goto retry_locked;
1568 }
1569
1570 num = 0;
1571 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1572 cap = rb_entry(p, struct ceph_cap, ci_node);
1573 num++;
1574
1575 /* avoid looping forever */
1576 if (mds >= cap->mds ||
1577 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1578 continue;
1579
1580 /* NOTE: no side-effects allowed, until we take s_mutex */
1581
395c312b
YZ
1582 cap_used = used;
1583 if (ci->i_auth_cap && cap != ci->i_auth_cap)
1584 cap_used &= ~ci->i_auth_cap->issued;
1585
a8599bd8 1586 revoking = cap->implemented & ~cap->issued;
395c312b 1587 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
088b3f5e 1588 cap->mds, cap, ceph_cap_string(cap->issued),
395c312b 1589 ceph_cap_string(cap_used),
088b3f5e
SW
1590 ceph_cap_string(cap->implemented),
1591 ceph_cap_string(revoking));
a8599bd8
SW
1592
1593 if (cap == ci->i_auth_cap &&
1594 (cap->issued & CEPH_CAP_FILE_WR)) {
1595 /* request larger max_size from MDS? */
1596 if (ci->i_wanted_max_size > ci->i_max_size &&
1597 ci->i_wanted_max_size > ci->i_requested_max_size) {
1598 dout("requesting new max_size\n");
1599 goto ack;
1600 }
1601
1602 /* approaching file_max? */
1603 if ((inode->i_size << 1) >= ci->i_max_size &&
1604 (ci->i_reported_size << 1) < ci->i_max_size) {
1605 dout("i_size approaching max_size\n");
1606 goto ack;
1607 }
1608 }
1609 /* flush anything dirty? */
1610 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1611 ci->i_dirty_caps) {
1612 dout("flushing dirty caps\n");
1613 goto ack;
1614 }
1615
1616 /* completed revocation? going down and there are no caps? */
395c312b 1617 if (revoking && (revoking & cap_used) == 0) {
a8599bd8
SW
1618 dout("completed revocation of %s\n",
1619 ceph_cap_string(cap->implemented & ~cap->issued));
1620 goto ack;
1621 }
1622
1623 /* want more caps from mds? */
1624 if (want & ~(cap->mds_wanted | cap->issued))
1625 goto ack;
1626
1627 /* things we might delay */
1628 if ((cap->issued & ~retain) == 0 &&
1629 cap->mds_wanted == want)
1630 continue; /* nope, all good */
1631
1632 if (is_delayed)
1633 goto ack;
1634
1635 /* delay? */
1636 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1637 time_before(jiffies, ci->i_hold_caps_max)) {
1638 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1639 ceph_cap_string(cap->issued),
1640 ceph_cap_string(cap->issued & retain),
1641 ceph_cap_string(cap->mds_wanted),
1642 ceph_cap_string(want));
1643 delayed++;
1644 continue;
1645 }
1646
1647ack:
e9964c10
SW
1648 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1649 dout(" skipping %p I_NOFLUSH set\n", inode);
1650 continue;
1651 }
1652
a8599bd8
SW
1653 if (session && session != cap->session) {
1654 dout("oops, wrong session %p mutex\n", session);
1655 mutex_unlock(&session->s_mutex);
1656 session = NULL;
1657 }
1658 if (!session) {
1659 session = cap->session;
1660 if (mutex_trylock(&session->s_mutex) == 0) {
1661 dout("inverting session/ino locks on %p\n",
1662 session);
be655596 1663 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1664 if (took_snap_rwsem) {
1665 up_read(&mdsc->snap_rwsem);
1666 took_snap_rwsem = 0;
1667 }
1668 mutex_lock(&session->s_mutex);
1669 goto retry;
1670 }
1671 }
1672 /* take snap_rwsem after session mutex */
1673 if (!took_snap_rwsem) {
1674 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1675 dout("inverting snap/in locks on %p\n",
1676 inode);
be655596 1677 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1678 down_read(&mdsc->snap_rwsem);
1679 took_snap_rwsem = 1;
1680 goto retry;
1681 }
1682 took_snap_rwsem = 1;
1683 }
1684
cdc35f96
SW
1685 if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1686 flushing = __mark_caps_flushing(inode, session);
24be0c48
SW
1687 else
1688 flushing = 0;
a8599bd8
SW
1689
1690 mds = cap->mds; /* remember mds, so we don't repeat */
1691 sent++;
1692
be655596 1693 /* __send_cap drops i_ceph_lock */
395c312b
YZ
1694 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1695 want, retain, flushing, NULL);
be655596 1696 goto retry; /* retake i_ceph_lock and restart our cap scan. */
a8599bd8
SW
1697 }
1698
1699 /*
1700 * Reschedule delayed caps release if we delayed anything,
1701 * otherwise cancel.
1702 */
1703 if (delayed && is_delayed)
1704 force_requeue = 1; /* __send_cap delayed release; requeue */
1705 if (!delayed && !is_delayed)
1706 __cap_delay_cancel(mdsc, ci);
1707 else if (!is_delayed || force_requeue)
1708 __cap_delay_requeue(mdsc, ci);
1709
be655596 1710 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1711
cbd03635 1712 if (queue_invalidate)
3c6f6b79 1713 ceph_queue_invalidate(inode);
cbd03635 1714
cdc2ce05 1715 if (session)
a8599bd8
SW
1716 mutex_unlock(&session->s_mutex);
1717 if (took_snap_rwsem)
1718 up_read(&mdsc->snap_rwsem);
1719}
1720
a8599bd8
SW
1721/*
1722 * Try to flush dirty caps back to the auth mds.
1723 */
1724static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1725 unsigned *flush_tid)
1726{
3d14c5d2 1727 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8
SW
1728 struct ceph_inode_info *ci = ceph_inode(inode);
1729 int unlock_session = session ? 0 : 1;
1730 int flushing = 0;
1731
1732retry:
be655596 1733 spin_lock(&ci->i_ceph_lock);
e9964c10
SW
1734 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1735 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1736 goto out;
1737 }
a8599bd8
SW
1738 if (ci->i_dirty_caps && ci->i_auth_cap) {
1739 struct ceph_cap *cap = ci->i_auth_cap;
1740 int used = __ceph_caps_used(ci);
1741 int want = __ceph_caps_wanted(ci);
1742 int delayed;
1743
1744 if (!session) {
be655596 1745 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1746 session = cap->session;
1747 mutex_lock(&session->s_mutex);
1748 goto retry;
1749 }
1750 BUG_ON(session != cap->session);
1751 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1752 goto out;
1753
cdc35f96 1754 flushing = __mark_caps_flushing(inode, session);
a8599bd8 1755
be655596 1756 /* __send_cap drops i_ceph_lock */
a8599bd8
SW
1757 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1758 cap->issued | cap->implemented, flushing,
1759 flush_tid);
1760 if (!delayed)
1761 goto out_unlocked;
1762
be655596 1763 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1764 __cap_delay_requeue(mdsc, ci);
1765 }
1766out:
be655596 1767 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1768out_unlocked:
1769 if (session && unlock_session)
1770 mutex_unlock(&session->s_mutex);
1771 return flushing;
1772}
1773
1774/*
1775 * Return true if we've flushed caps through the given flush_tid.
1776 */
1777static int caps_are_flushed(struct inode *inode, unsigned tid)
1778{
1779 struct ceph_inode_info *ci = ceph_inode(inode);
a5ee751c 1780 int i, ret = 1;
a8599bd8 1781
be655596 1782 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1783 for (i = 0; i < CEPH_CAP_BITS; i++)
1784 if ((ci->i_flushing_caps & (1 << i)) &&
1785 ci->i_cap_flush_tid[i] <= tid) {
1786 /* still flushing this bit */
1787 ret = 0;
1788 break;
1789 }
be655596 1790 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1791 return ret;
1792}
1793
1794/*
1795 * Wait on any unsafe replies for the given inode. First wait on the
1796 * newest request, and make that the upper bound. Then, if there are
1797 * more requests, keep waiting on the oldest as long as it is still older
1798 * than the original request.
1799 */
1800static void sync_write_wait(struct inode *inode)
1801{
1802 struct ceph_inode_info *ci = ceph_inode(inode);
1803 struct list_head *head = &ci->i_unsafe_writes;
1804 struct ceph_osd_request *req;
1805 u64 last_tid;
1806
1807 spin_lock(&ci->i_unsafe_lock);
1808 if (list_empty(head))
1809 goto out;
1810
1811 /* set upper bound as _last_ entry in chain */
1812 req = list_entry(head->prev, struct ceph_osd_request,
1813 r_unsafe_item);
1814 last_tid = req->r_tid;
1815
1816 do {
1817 ceph_osdc_get_request(req);
1818 spin_unlock(&ci->i_unsafe_lock);
1819 dout("sync_write_wait on tid %llu (until %llu)\n",
1820 req->r_tid, last_tid);
1821 wait_for_completion(&req->r_safe_completion);
1822 spin_lock(&ci->i_unsafe_lock);
1823 ceph_osdc_put_request(req);
1824
1825 /*
1826 * from here on look at first entry in chain, since we
1827 * only want to wait for anything older than last_tid
1828 */
1829 if (list_empty(head))
1830 break;
1831 req = list_entry(head->next, struct ceph_osd_request,
1832 r_unsafe_item);
1833 } while (req->r_tid < last_tid);
1834out:
1835 spin_unlock(&ci->i_unsafe_lock);
1836}
1837
02c24a82 1838int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
a8599bd8 1839{
7ea80859 1840 struct inode *inode = file->f_mapping->host;
a8599bd8
SW
1841 struct ceph_inode_info *ci = ceph_inode(inode);
1842 unsigned flush_tid;
1843 int ret;
1844 int dirty;
1845
1846 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1847 sync_write_wait(inode);
1848
02c24a82 1849 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
a8599bd8
SW
1850 if (ret < 0)
1851 return ret;
02c24a82 1852 mutex_lock(&inode->i_mutex);
a8599bd8
SW
1853
1854 dirty = try_flush_caps(inode, NULL, &flush_tid);
1855 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1856
1857 /*
1858 * only wait on non-file metadata writeback (the mds
1859 * can recover size and mtime, so we don't need to
1860 * wait for that)
1861 */
1862 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1863 dout("fsync waiting for flush_tid %u\n", flush_tid);
1864 ret = wait_event_interruptible(ci->i_cap_wq,
1865 caps_are_flushed(inode, flush_tid));
1866 }
1867
1868 dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
02c24a82 1869 mutex_unlock(&inode->i_mutex);
a8599bd8
SW
1870 return ret;
1871}
1872
1873/*
1874 * Flush any dirty caps back to the mds. If we aren't asked to wait,
1875 * queue inode for flush but don't do so immediately, because we can
1876 * get by with fewer MDS messages if we wait for data writeback to
1877 * complete first.
1878 */
f1a3d572 1879int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
a8599bd8
SW
1880{
1881 struct ceph_inode_info *ci = ceph_inode(inode);
1882 unsigned flush_tid;
1883 int err = 0;
1884 int dirty;
f1a3d572 1885 int wait = wbc->sync_mode == WB_SYNC_ALL;
a8599bd8
SW
1886
1887 dout("write_inode %p wait=%d\n", inode, wait);
1888 if (wait) {
1889 dirty = try_flush_caps(inode, NULL, &flush_tid);
1890 if (dirty)
1891 err = wait_event_interruptible(ci->i_cap_wq,
1892 caps_are_flushed(inode, flush_tid));
1893 } else {
640ef79d 1894 struct ceph_mds_client *mdsc =
3d14c5d2 1895 ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 1896
be655596 1897 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1898 if (__ceph_caps_dirty(ci))
1899 __cap_delay_requeue_front(mdsc, ci);
be655596 1900 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1901 }
1902 return err;
1903}
1904
1905/*
1906 * After a recovering MDS goes active, we need to resend any caps
1907 * we were flushing.
1908 *
1909 * Caller holds session->s_mutex.
1910 */
1911static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1912 struct ceph_mds_session *session)
1913{
1914 struct ceph_cap_snap *capsnap;
1915
1916 dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1917 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1918 flushing_item) {
1919 struct ceph_inode_info *ci = capsnap->ci;
1920 struct inode *inode = &ci->vfs_inode;
1921 struct ceph_cap *cap;
1922
be655596 1923 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1924 cap = ci->i_auth_cap;
1925 if (cap && cap->session == session) {
1926 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1927 cap, capsnap);
e835124c 1928 __ceph_flush_snaps(ci, &session, 1);
a8599bd8
SW
1929 } else {
1930 pr_err("%p auth cap %p not mds%d ???\n", inode,
1931 cap, session->s_mds);
a8599bd8 1932 }
be655596 1933 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1934 }
1935}
1936
1937void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1938 struct ceph_mds_session *session)
1939{
1940 struct ceph_inode_info *ci;
1941
1942 kick_flushing_capsnaps(mdsc, session);
1943
1944 dout("kick_flushing_caps mds%d\n", session->s_mds);
1945 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1946 struct inode *inode = &ci->vfs_inode;
1947 struct ceph_cap *cap;
1948 int delayed = 0;
1949
be655596 1950 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1951 cap = ci->i_auth_cap;
1952 if (cap && cap->session == session) {
1953 dout("kick_flushing_caps %p cap %p %s\n", inode,
1954 cap, ceph_cap_string(ci->i_flushing_caps));
1955 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1956 __ceph_caps_used(ci),
1957 __ceph_caps_wanted(ci),
1958 cap->issued | cap->implemented,
1959 ci->i_flushing_caps, NULL);
1960 if (delayed) {
be655596 1961 spin_lock(&ci->i_ceph_lock);
a8599bd8 1962 __cap_delay_requeue(mdsc, ci);
be655596 1963 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1964 }
1965 } else {
1966 pr_err("%p auth cap %p not mds%d ???\n", inode,
1967 cap, session->s_mds);
be655596 1968 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1969 }
1970 }
1971}
1972
088b3f5e
SW
1973static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1974 struct ceph_mds_session *session,
1975 struct inode *inode)
1976{
1977 struct ceph_inode_info *ci = ceph_inode(inode);
1978 struct ceph_cap *cap;
1979 int delayed = 0;
1980
be655596 1981 spin_lock(&ci->i_ceph_lock);
088b3f5e
SW
1982 cap = ci->i_auth_cap;
1983 dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
1984 ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
1985 __ceph_flush_snaps(ci, &session, 1);
1986 if (ci->i_flushing_caps) {
1987 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1988 __ceph_caps_used(ci),
1989 __ceph_caps_wanted(ci),
1990 cap->issued | cap->implemented,
1991 ci->i_flushing_caps, NULL);
1992 if (delayed) {
be655596 1993 spin_lock(&ci->i_ceph_lock);
088b3f5e 1994 __cap_delay_requeue(mdsc, ci);
be655596 1995 spin_unlock(&ci->i_ceph_lock);
088b3f5e
SW
1996 }
1997 } else {
be655596 1998 spin_unlock(&ci->i_ceph_lock);
088b3f5e
SW
1999 }
2000}
2001
a8599bd8
SW
2002
2003/*
2004 * Take references to capabilities we hold, so that we don't release
2005 * them to the MDS prematurely.
2006 *
be655596 2007 * Protected by i_ceph_lock.
a8599bd8
SW
2008 */
2009static void __take_cap_refs(struct ceph_inode_info *ci, int got)
2010{
2011 if (got & CEPH_CAP_PIN)
2012 ci->i_pin_ref++;
2013 if (got & CEPH_CAP_FILE_RD)
2014 ci->i_rd_ref++;
2015 if (got & CEPH_CAP_FILE_CACHE)
2016 ci->i_rdcache_ref++;
2017 if (got & CEPH_CAP_FILE_WR)
2018 ci->i_wr_ref++;
2019 if (got & CEPH_CAP_FILE_BUFFER) {
d3d0720d 2020 if (ci->i_wb_ref == 0)
3772d26d 2021 ihold(&ci->vfs_inode);
d3d0720d
HC
2022 ci->i_wb_ref++;
2023 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2024 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
a8599bd8
SW
2025 }
2026}
2027
2028/*
2029 * Try to grab cap references. Specify those refs we @want, and the
2030 * minimal set we @need. Also include the larger offset we are writing
2031 * to (when applicable), and check against max_size here as well.
2032 * Note that caller is responsible for ensuring max_size increases are
2033 * requested from the MDS.
2034 */
2035static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2036 int *got, loff_t endoff, int *check_max, int *err)
2037{
2038 struct inode *inode = &ci->vfs_inode;
2039 int ret = 0;
2040 int have, implemented;
195d3ce2 2041 int file_wanted;
a8599bd8
SW
2042
2043 dout("get_cap_refs %p need %s want %s\n", inode,
2044 ceph_cap_string(need), ceph_cap_string(want));
be655596 2045 spin_lock(&ci->i_ceph_lock);
a8599bd8 2046
195d3ce2
SW
2047 /* make sure file is actually open */
2048 file_wanted = __ceph_caps_file_wanted(ci);
2049 if ((file_wanted & need) == 0) {
2050 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2051 ceph_cap_string(need), ceph_cap_string(file_wanted));
a8599bd8
SW
2052 *err = -EBADF;
2053 ret = 1;
2054 goto out;
2055 }
2056
37505d57
YZ
2057 /* finish pending truncate */
2058 while (ci->i_truncate_pending) {
2059 spin_unlock(&ci->i_ceph_lock);
2060 __ceph_do_pending_vmtruncate(inode, !(need & CEPH_CAP_FILE_WR));
2061 spin_lock(&ci->i_ceph_lock);
2062 }
2063
a8599bd8
SW
2064 if (need & CEPH_CAP_FILE_WR) {
2065 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2066 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2067 inode, endoff, ci->i_max_size);
2068 if (endoff > ci->i_wanted_max_size) {
2069 *check_max = 1;
2070 ret = 1;
2071 }
2072 goto out;
2073 }
2074 /*
2075 * If a sync write is in progress, we must wait, so that we
2076 * can get a final snapshot value for size+mtime.
2077 */
2078 if (__ceph_have_pending_cap_snap(ci)) {
2079 dout("get_cap_refs %p cap_snap_pending\n", inode);
2080 goto out;
2081 }
2082 }
2083 have = __ceph_caps_issued(ci, &implemented);
2084
a8599bd8
SW
2085 if ((have & need) == need) {
2086 /*
2087 * Look at (implemented & ~have & not) so that we keep waiting
2088 * on transition from wanted -> needed caps. This is needed
2089 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2090 * going before a prior buffered writeback happens.
2091 */
2092 int not = want & ~(have & need);
2093 int revoking = implemented & ~have;
2094 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2095 inode, ceph_cap_string(have), ceph_cap_string(not),
2096 ceph_cap_string(revoking));
2097 if ((revoking & not) == 0) {
2098 *got = need | (have & want);
2099 __take_cap_refs(ci, *got);
2100 ret = 1;
2101 }
2102 } else {
2103 dout("get_cap_refs %p have %s needed %s\n", inode,
2104 ceph_cap_string(have), ceph_cap_string(need));
2105 }
2106out:
be655596 2107 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2108 dout("get_cap_refs %p ret %d got %s\n", inode,
2109 ret, ceph_cap_string(*got));
2110 return ret;
2111}
2112
2113/*
2114 * Check the offset we are writing up to against our current
2115 * max_size. If necessary, tell the MDS we want to write to
2116 * a larger offset.
2117 */
2118static void check_max_size(struct inode *inode, loff_t endoff)
2119{
2120 struct ceph_inode_info *ci = ceph_inode(inode);
2121 int check = 0;
2122
2123 /* do we need to explicitly request a larger max_size? */
be655596 2124 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2125 if ((endoff >= ci->i_max_size ||
2126 endoff > (inode->i_size << 1)) &&
2127 endoff > ci->i_wanted_max_size) {
2128 dout("write %p at large endoff %llu, req max_size\n",
2129 inode, endoff);
2130 ci->i_wanted_max_size = endoff;
2131 check = 1;
2132 }
be655596 2133 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2134 if (check)
2135 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2136}
2137
2138/*
2139 * Wait for caps, and take cap references. If we can't get a WR cap
2140 * due to a small max_size, make sure we check_max_size (and possibly
2141 * ask the mds) so we don't get hung up indefinitely.
2142 */
2143int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2144 loff_t endoff)
2145{
2146 int check_max, ret, err;
2147
2148retry:
2149 if (endoff > 0)
2150 check_max_size(&ci->vfs_inode, endoff);
2151 check_max = 0;
2152 err = 0;
2153 ret = wait_event_interruptible(ci->i_cap_wq,
2154 try_get_cap_refs(ci, need, want,
2155 got, endoff,
2156 &check_max, &err));
2157 if (err)
2158 ret = err;
2159 if (check_max)
2160 goto retry;
2161 return ret;
2162}
2163
2164/*
2165 * Take cap refs. Caller must already know we hold at least one ref
2166 * on the caps in question or we don't know this is safe.
2167 */
2168void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2169{
be655596 2170 spin_lock(&ci->i_ceph_lock);
a8599bd8 2171 __take_cap_refs(ci, caps);
be655596 2172 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2173}
2174
2175/*
2176 * Release cap refs.
2177 *
2178 * If we released the last ref on any given cap, call ceph_check_caps
2179 * to release (or schedule a release).
2180 *
2181 * If we are releasing a WR cap (from a sync write), finalize any affected
2182 * cap_snap, and wake up any waiters.
2183 */
2184void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2185{
2186 struct inode *inode = &ci->vfs_inode;
2187 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2188 struct ceph_cap_snap *capsnap;
2189
be655596 2190 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2191 if (had & CEPH_CAP_PIN)
2192 --ci->i_pin_ref;
2193 if (had & CEPH_CAP_FILE_RD)
2194 if (--ci->i_rd_ref == 0)
2195 last++;
2196 if (had & CEPH_CAP_FILE_CACHE)
2197 if (--ci->i_rdcache_ref == 0)
2198 last++;
2199 if (had & CEPH_CAP_FILE_BUFFER) {
d3d0720d 2200 if (--ci->i_wb_ref == 0) {
a8599bd8
SW
2201 last++;
2202 put++;
2203 }
d3d0720d
HC
2204 dout("put_cap_refs %p wb %d -> %d (?)\n",
2205 inode, ci->i_wb_ref+1, ci->i_wb_ref);
a8599bd8
SW
2206 }
2207 if (had & CEPH_CAP_FILE_WR)
2208 if (--ci->i_wr_ref == 0) {
2209 last++;
2210 if (!list_empty(&ci->i_cap_snaps)) {
2211 capsnap = list_first_entry(&ci->i_cap_snaps,
2212 struct ceph_cap_snap,
2213 ci_item);
2214 if (capsnap->writing) {
2215 capsnap->writing = 0;
2216 flushsnaps =
2217 __ceph_finish_cap_snap(ci,
2218 capsnap);
2219 wake = 1;
2220 }
2221 }
2222 }
be655596 2223 spin_unlock(&ci->i_ceph_lock);
a8599bd8 2224
819ccbfa
SW
2225 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2226 last ? " last" : "", put ? " put" : "");
a8599bd8
SW
2227
2228 if (last && !flushsnaps)
2229 ceph_check_caps(ci, 0, NULL);
2230 else if (flushsnaps)
2231 ceph_flush_snaps(ci);
2232 if (wake)
03066f23 2233 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
2234 if (put)
2235 iput(inode);
2236}
2237
2238/*
2239 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2240 * context. Adjust per-snap dirty page accounting as appropriate.
2241 * Once all dirty data for a cap_snap is flushed, flush snapped file
2242 * metadata back to the MDS. If we dropped the last ref, call
2243 * ceph_check_caps.
2244 */
2245void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2246 struct ceph_snap_context *snapc)
2247{
2248 struct inode *inode = &ci->vfs_inode;
2249 int last = 0;
819ccbfa
SW
2250 int complete_capsnap = 0;
2251 int drop_capsnap = 0;
a8599bd8
SW
2252 int found = 0;
2253 struct ceph_cap_snap *capsnap = NULL;
2254
be655596 2255 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2256 ci->i_wrbuffer_ref -= nr;
2257 last = !ci->i_wrbuffer_ref;
2258
2259 if (ci->i_head_snapc == snapc) {
2260 ci->i_wrbuffer_ref_head -= nr;
7d8cb26d
SW
2261 if (ci->i_wrbuffer_ref_head == 0 &&
2262 ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2263 BUG_ON(!ci->i_head_snapc);
a8599bd8
SW
2264 ceph_put_snap_context(ci->i_head_snapc);
2265 ci->i_head_snapc = NULL;
2266 }
2267 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2268 inode,
2269 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2270 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2271 last ? " LAST" : "");
2272 } else {
2273 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2274 if (capsnap->context == snapc) {
2275 found = 1;
a8599bd8
SW
2276 break;
2277 }
2278 }
2279 BUG_ON(!found);
819ccbfa
SW
2280 capsnap->dirty_pages -= nr;
2281 if (capsnap->dirty_pages == 0) {
2282 complete_capsnap = 1;
2283 if (capsnap->dirty == 0)
2284 /* cap writeback completed before we created
2285 * the cap_snap; no FLUSHSNAP is needed */
2286 drop_capsnap = 1;
2287 }
a8599bd8 2288 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
819ccbfa 2289 " snap %lld %d/%d -> %d/%d %s%s%s\n",
a8599bd8
SW
2290 inode, capsnap, capsnap->context->seq,
2291 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2292 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2293 last ? " (wrbuffer last)" : "",
819ccbfa
SW
2294 complete_capsnap ? " (complete capsnap)" : "",
2295 drop_capsnap ? " (drop capsnap)" : "");
2296 if (drop_capsnap) {
2297 ceph_put_snap_context(capsnap->context);
2298 list_del(&capsnap->ci_item);
2299 list_del(&capsnap->flushing_item);
2300 ceph_put_cap_snap(capsnap);
2301 }
a8599bd8
SW
2302 }
2303
be655596 2304 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2305
2306 if (last) {
2307 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2308 iput(inode);
819ccbfa 2309 } else if (complete_capsnap) {
a8599bd8 2310 ceph_flush_snaps(ci);
03066f23 2311 wake_up_all(&ci->i_cap_wq);
a8599bd8 2312 }
819ccbfa
SW
2313 if (drop_capsnap)
2314 iput(inode);
a8599bd8
SW
2315}
2316
2317/*
2318 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2319 * actually be a revocation if it specifies a smaller cap set.)
2320 *
be655596 2321 * caller holds s_mutex and i_ceph_lock, we drop both.
15637c8b 2322 *
a8599bd8
SW
2323 * return value:
2324 * 0 - ok
2325 * 1 - check_caps on auth cap only (writeback)
2326 * 2 - check_caps (ack revoke)
2327 */
15637c8b
SW
2328static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2329 struct ceph_mds_session *session,
2330 struct ceph_cap *cap,
2331 struct ceph_buffer *xattr_buf)
be655596 2332 __releases(ci->i_ceph_lock)
a8599bd8
SW
2333{
2334 struct ceph_inode_info *ci = ceph_inode(inode);
2335 int mds = session->s_mds;
2f56f56a 2336 int seq = le32_to_cpu(grant->seq);
a8599bd8
SW
2337 int newcaps = le32_to_cpu(grant->caps);
2338 int issued, implemented, used, wanted, dirty;
2339 u64 size = le64_to_cpu(grant->size);
2340 u64 max_size = le64_to_cpu(grant->max_size);
2341 struct timespec mtime, atime, ctime;
15637c8b 2342 int check_caps = 0;
a8599bd8
SW
2343 int wake = 0;
2344 int writeback = 0;
2345 int revoked_rdcache = 0;
3c6f6b79 2346 int queue_invalidate = 0;
a8599bd8 2347
2f56f56a
SW
2348 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2349 inode, cap, mds, seq, ceph_cap_string(newcaps));
a8599bd8
SW
2350 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2351 inode->i_size);
2352
2353 /*
2354 * If CACHE is being revoked, and we have no dirty buffers,
2355 * try to invalidate (once). (If there are dirty buffers, we
2356 * will invalidate _after_ writeback.)
2357 */
3b454c49
SW
2358 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2359 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
bcd2cbd1 2360 !ci->i_wrbuffer_ref) {
5ecad6fd
SW
2361 if (try_nonblocking_invalidate(inode) == 0) {
2362 revoked_rdcache = 1;
2363 } else {
a8599bd8
SW
2364 /* there were locked pages.. invalidate later
2365 in a separate thread. */
2366 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
3c6f6b79 2367 queue_invalidate = 1;
a8599bd8
SW
2368 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2369 }
a8599bd8 2370 }
a8599bd8
SW
2371 }
2372
2373 /* side effects now are allowed */
2374
2375 issued = __ceph_caps_issued(ci, &implemented);
2376 issued |= implemented | __ceph_caps_dirty(ci);
2377
685f9a5d 2378 cap->cap_gen = session->s_cap_gen;
a8599bd8
SW
2379
2380 __check_cap_issue(ci, cap, newcaps);
2381
2382 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2383 inode->i_mode = le32_to_cpu(grant->mode);
05cb11c1
EB
2384 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2385 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
a8599bd8 2386 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
2387 from_kuid(&init_user_ns, inode->i_uid),
2388 from_kgid(&init_user_ns, inode->i_gid));
a8599bd8
SW
2389 }
2390
2391 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 2392 set_nlink(inode, le32_to_cpu(grant->nlink));
a8599bd8
SW
2393
2394 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2395 int len = le32_to_cpu(grant->xattr_len);
2396 u64 version = le64_to_cpu(grant->xattr_version);
2397
2398 if (version > ci->i_xattrs.version) {
2399 dout(" got new xattrs v%llu on %p len %d\n",
2400 version, inode, len);
2401 if (ci->i_xattrs.blob)
2402 ceph_buffer_put(ci->i_xattrs.blob);
2403 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2404 ci->i_xattrs.version = version;
2405 }
2406 }
2407
2408 /* size/ctime/mtime/atime? */
2409 ceph_fill_file_size(inode, issued,
2410 le32_to_cpu(grant->truncate_seq),
2411 le64_to_cpu(grant->truncate_size), size);
2412 ceph_decode_timespec(&mtime, &grant->mtime);
2413 ceph_decode_timespec(&atime, &grant->atime);
2414 ceph_decode_timespec(&ctime, &grant->ctime);
2415 ceph_fill_file_time(inode, issued,
2416 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2417 &atime);
2418
2419 /* max size increase? */
5e62ad30 2420 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
a8599bd8
SW
2421 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2422 ci->i_max_size = max_size;
2423 if (max_size >= ci->i_wanted_max_size) {
2424 ci->i_wanted_max_size = 0; /* reset */
2425 ci->i_requested_max_size = 0;
2426 }
2427 wake = 1;
2428 }
2429
2430 /* check cap bits */
2431 wanted = __ceph_caps_wanted(ci);
2432 used = __ceph_caps_used(ci);
2433 dirty = __ceph_caps_dirty(ci);
2434 dout(" my wanted = %s, used = %s, dirty %s\n",
2435 ceph_cap_string(wanted),
2436 ceph_cap_string(used),
2437 ceph_cap_string(dirty));
2438 if (wanted != le32_to_cpu(grant->wanted)) {
2439 dout("mds wanted %s -> %s\n",
2440 ceph_cap_string(le32_to_cpu(grant->wanted)),
2441 ceph_cap_string(wanted));
390306c3
YZ
2442 /* imported cap may not have correct mds_wanted */
2443 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
2444 check_caps = 1;
a8599bd8
SW
2445 }
2446
2447 cap->seq = seq;
2448
2449 /* file layout may have changed */
2450 ci->i_layout = grant->layout;
2451
2452 /* revocation, grant, or no-op? */
2453 if (cap->issued & ~newcaps) {
3b454c49
SW
2454 int revoking = cap->issued & ~newcaps;
2455
2456 dout("revocation: %s -> %s (revoking %s)\n",
2457 ceph_cap_string(cap->issued),
2458 ceph_cap_string(newcaps),
2459 ceph_cap_string(revoking));
0eb6cd49 2460 if (revoking & used & CEPH_CAP_FILE_BUFFER)
3b454c49
SW
2461 writeback = 1; /* initiate writeback; will delay ack */
2462 else if (revoking == CEPH_CAP_FILE_CACHE &&
2463 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2464 queue_invalidate)
2465 ; /* do nothing yet, invalidation will be queued */
2466 else if (cap == ci->i_auth_cap)
2467 check_caps = 1; /* check auth cap only */
2468 else
2469 check_caps = 2; /* check all caps */
a8599bd8 2470 cap->issued = newcaps;
978097c9 2471 cap->implemented |= newcaps;
a8599bd8
SW
2472 } else if (cap->issued == newcaps) {
2473 dout("caps unchanged: %s -> %s\n",
2474 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2475 } else {
2476 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2477 ceph_cap_string(newcaps));
2478 cap->issued = newcaps;
2479 cap->implemented |= newcaps; /* add bits only, to
2480 * avoid stepping on a
2481 * pending revocation */
2482 wake = 1;
2483 }
978097c9 2484 BUG_ON(cap->issued & ~cap->implemented);
a8599bd8 2485
be655596 2486 spin_unlock(&ci->i_ceph_lock);
3c6f6b79 2487 if (writeback)
a8599bd8
SW
2488 /*
2489 * queue inode for writeback: we can't actually call
2490 * filemap_write_and_wait, etc. from message handler
2491 * context.
2492 */
3c6f6b79
SW
2493 ceph_queue_writeback(inode);
2494 if (queue_invalidate)
2495 ceph_queue_invalidate(inode);
a8599bd8 2496 if (wake)
03066f23 2497 wake_up_all(&ci->i_cap_wq);
15637c8b
SW
2498
2499 if (check_caps == 1)
2500 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2501 session);
2502 else if (check_caps == 2)
2503 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2504 else
2505 mutex_unlock(&session->s_mutex);
a8599bd8
SW
2506}
2507
2508/*
2509 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2510 * MDS has been safely committed.
2511 */
6df058c0 2512static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
2513 struct ceph_mds_caps *m,
2514 struct ceph_mds_session *session,
2515 struct ceph_cap *cap)
be655596 2516 __releases(ci->i_ceph_lock)
a8599bd8
SW
2517{
2518 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2 2519 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8
SW
2520 unsigned seq = le32_to_cpu(m->seq);
2521 int dirty = le32_to_cpu(m->dirty);
2522 int cleaned = 0;
afcdaea3 2523 int drop = 0;
a8599bd8
SW
2524 int i;
2525
2526 for (i = 0; i < CEPH_CAP_BITS; i++)
2527 if ((dirty & (1 << i)) &&
2528 flush_tid == ci->i_cap_flush_tid[i])
2529 cleaned |= 1 << i;
2530
2531 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2532 " flushing %s -> %s\n",
2533 inode, session->s_mds, seq, ceph_cap_string(dirty),
2534 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2535 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2536
2537 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2538 goto out;
2539
a8599bd8 2540 ci->i_flushing_caps &= ~cleaned;
a8599bd8
SW
2541
2542 spin_lock(&mdsc->cap_dirty_lock);
2543 if (ci->i_flushing_caps == 0) {
2544 list_del_init(&ci->i_flushing_item);
2545 if (!list_empty(&session->s_cap_flushing))
2546 dout(" mds%d still flushing cap on %p\n",
2547 session->s_mds,
2548 &list_entry(session->s_cap_flushing.next,
2549 struct ceph_inode_info,
2550 i_flushing_item)->vfs_inode);
2551 mdsc->num_cap_flushing--;
03066f23 2552 wake_up_all(&mdsc->cap_flushing_wq);
a8599bd8 2553 dout(" inode %p now !flushing\n", inode);
afcdaea3
SW
2554
2555 if (ci->i_dirty_caps == 0) {
2556 dout(" inode %p now clean\n", inode);
2557 BUG_ON(!list_empty(&ci->i_dirty_item));
2558 drop = 1;
7d8cb26d
SW
2559 if (ci->i_wrbuffer_ref_head == 0) {
2560 BUG_ON(!ci->i_head_snapc);
2561 ceph_put_snap_context(ci->i_head_snapc);
2562 ci->i_head_snapc = NULL;
2563 }
76e3b390
SW
2564 } else {
2565 BUG_ON(list_empty(&ci->i_dirty_item));
afcdaea3 2566 }
a8599bd8
SW
2567 }
2568 spin_unlock(&mdsc->cap_dirty_lock);
03066f23 2569 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
2570
2571out:
be655596 2572 spin_unlock(&ci->i_ceph_lock);
afcdaea3 2573 if (drop)
a8599bd8
SW
2574 iput(inode);
2575}
2576
2577/*
2578 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
2579 * throw away our cap_snap.
2580 *
2581 * Caller hold s_mutex.
2582 */
6df058c0 2583static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
2584 struct ceph_mds_caps *m,
2585 struct ceph_mds_session *session)
2586{
2587 struct ceph_inode_info *ci = ceph_inode(inode);
2588 u64 follows = le64_to_cpu(m->snap_follows);
a8599bd8
SW
2589 struct ceph_cap_snap *capsnap;
2590 int drop = 0;
2591
2592 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2593 inode, ci, session->s_mds, follows);
2594
be655596 2595 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2596 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2597 if (capsnap->follows == follows) {
2598 if (capsnap->flush_tid != flush_tid) {
2599 dout(" cap_snap %p follows %lld tid %lld !="
2600 " %lld\n", capsnap, follows,
2601 flush_tid, capsnap->flush_tid);
2602 break;
2603 }
2604 WARN_ON(capsnap->dirty_pages || capsnap->writing);
819ccbfa
SW
2605 dout(" removing %p cap_snap %p follows %lld\n",
2606 inode, capsnap, follows);
a8599bd8
SW
2607 ceph_put_snap_context(capsnap->context);
2608 list_del(&capsnap->ci_item);
2609 list_del(&capsnap->flushing_item);
2610 ceph_put_cap_snap(capsnap);
2611 drop = 1;
2612 break;
2613 } else {
2614 dout(" skipping cap_snap %p follows %lld\n",
2615 capsnap, capsnap->follows);
2616 }
2617 }
be655596 2618 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2619 if (drop)
2620 iput(inode);
2621}
2622
2623/*
2624 * Handle TRUNC from MDS, indicating file truncation.
2625 *
2626 * caller hold s_mutex.
2627 */
2628static void handle_cap_trunc(struct inode *inode,
2629 struct ceph_mds_caps *trunc,
2630 struct ceph_mds_session *session)
be655596 2631 __releases(ci->i_ceph_lock)
a8599bd8
SW
2632{
2633 struct ceph_inode_info *ci = ceph_inode(inode);
2634 int mds = session->s_mds;
2635 int seq = le32_to_cpu(trunc->seq);
2636 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2637 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2638 u64 size = le64_to_cpu(trunc->size);
2639 int implemented = 0;
2640 int dirty = __ceph_caps_dirty(ci);
2641 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2642 int queue_trunc = 0;
2643
2644 issued |= implemented | dirty;
2645
2646 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2647 inode, mds, seq, truncate_size, truncate_seq);
2648 queue_trunc = ceph_fill_file_size(inode, issued,
2649 truncate_seq, truncate_size, size);
be655596 2650 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2651
2652 if (queue_trunc)
3c6f6b79 2653 ceph_queue_vmtruncate(inode);
a8599bd8
SW
2654}
2655
2656/*
2657 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
2658 * different one. If we are the most recent migration we've seen (as
2659 * indicated by mseq), make note of the migrating cap bits for the
2660 * duration (until we see the corresponding IMPORT).
2661 *
2662 * caller holds s_mutex
2663 */
2664static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
154f42c2
SW
2665 struct ceph_mds_session *session,
2666 int *open_target_sessions)
a8599bd8 2667{
db354052 2668 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8
SW
2669 struct ceph_inode_info *ci = ceph_inode(inode);
2670 int mds = session->s_mds;
2671 unsigned mseq = le32_to_cpu(ex->migrate_seq);
2672 struct ceph_cap *cap = NULL, *t;
2673 struct rb_node *p;
2674 int remember = 1;
2675
2676 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2677 inode, ci, mds, mseq);
2678
be655596 2679 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2680
2681 /* make sure we haven't seen a higher mseq */
2682 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2683 t = rb_entry(p, struct ceph_cap, ci_node);
2684 if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2685 dout(" higher mseq on cap from mds%d\n",
2686 t->session->s_mds);
2687 remember = 0;
2688 }
2689 if (t->session->s_mds == mds)
2690 cap = t;
2691 }
2692
2693 if (cap) {
2694 if (remember) {
2695 /* make note */
2696 ci->i_cap_exporting_mds = mds;
2697 ci->i_cap_exporting_mseq = mseq;
2698 ci->i_cap_exporting_issued = cap->issued;
154f42c2
SW
2699
2700 /*
2701 * make sure we have open sessions with all possible
2702 * export targets, so that we get the matching IMPORT
2703 */
2704 *open_target_sessions = 1;
db354052
SW
2705
2706 /*
2707 * we can't flush dirty caps that we've seen the
2708 * EXPORT but no IMPORT for
2709 */
2710 spin_lock(&mdsc->cap_dirty_lock);
2711 if (!list_empty(&ci->i_dirty_item)) {
2712 dout(" moving %p to cap_dirty_migrating\n",
2713 inode);
2714 list_move(&ci->i_dirty_item,
2715 &mdsc->cap_dirty_migrating);
2716 }
2717 spin_unlock(&mdsc->cap_dirty_lock);
a8599bd8 2718 }
7c1332b8 2719 __ceph_remove_cap(cap);
a8599bd8 2720 }
4ea0043a 2721 /* else, we already released it */
a8599bd8 2722
be655596 2723 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2724}
2725
2726/*
2727 * Handle cap IMPORT. If there are temp bits from an older EXPORT,
2728 * clean them up.
2729 *
2730 * caller holds s_mutex.
2731 */
2732static void handle_cap_import(struct ceph_mds_client *mdsc,
2733 struct inode *inode, struct ceph_mds_caps *im,
2734 struct ceph_mds_session *session,
2735 void *snaptrace, int snaptrace_len)
2736{
2737 struct ceph_inode_info *ci = ceph_inode(inode);
2738 int mds = session->s_mds;
2739 unsigned issued = le32_to_cpu(im->caps);
2740 unsigned wanted = le32_to_cpu(im->wanted);
2741 unsigned seq = le32_to_cpu(im->seq);
2742 unsigned mseq = le32_to_cpu(im->migrate_seq);
2743 u64 realmino = le64_to_cpu(im->realm);
2744 u64 cap_id = le64_to_cpu(im->cap_id);
2745
2746 if (ci->i_cap_exporting_mds >= 0 &&
2747 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2748 dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2749 " - cleared exporting from mds%d\n",
2750 inode, ci, mds, mseq,
2751 ci->i_cap_exporting_mds);
2752 ci->i_cap_exporting_issued = 0;
2753 ci->i_cap_exporting_mseq = 0;
2754 ci->i_cap_exporting_mds = -1;
db354052
SW
2755
2756 spin_lock(&mdsc->cap_dirty_lock);
2757 if (!list_empty(&ci->i_dirty_item)) {
2758 dout(" moving %p back to cap_dirty\n", inode);
2759 list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
2760 }
2761 spin_unlock(&mdsc->cap_dirty_lock);
a8599bd8
SW
2762 } else {
2763 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2764 inode, ci, mds, mseq);
2765 }
2766
2767 down_write(&mdsc->snap_rwsem);
2768 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2769 false);
2770 downgrade_write(&mdsc->snap_rwsem);
2771 ceph_add_cap(inode, session, cap_id, -1,
2772 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2773 NULL /* no caps context */);
088b3f5e 2774 kick_flushing_inode_caps(mdsc, session, inode);
a8599bd8 2775 up_read(&mdsc->snap_rwsem);
feb4cc9b
SW
2776
2777 /* make sure we re-request max_size, if necessary */
be655596 2778 spin_lock(&ci->i_ceph_lock);
0e5e1774 2779 ci->i_wanted_max_size = 0; /* reset */
feb4cc9b 2780 ci->i_requested_max_size = 0;
be655596 2781 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2782}
2783
2784/*
2785 * Handle a caps message from the MDS.
2786 *
2787 * Identify the appropriate session, inode, and call the right handler
2788 * based on the cap op.
2789 */
2790void ceph_handle_caps(struct ceph_mds_session *session,
2791 struct ceph_msg *msg)
2792{
2793 struct ceph_mds_client *mdsc = session->s_mdsc;
3d14c5d2 2794 struct super_block *sb = mdsc->fsc->sb;
a8599bd8 2795 struct inode *inode;
be655596 2796 struct ceph_inode_info *ci;
a8599bd8
SW
2797 struct ceph_cap *cap;
2798 struct ceph_mds_caps *h;
2600d2dd 2799 int mds = session->s_mds;
a8599bd8 2800 int op;
3d7ded4d 2801 u32 seq, mseq;
a8599bd8
SW
2802 struct ceph_vino vino;
2803 u64 cap_id;
2804 u64 size, max_size;
6df058c0 2805 u64 tid;
70edb55b 2806 void *snaptrace;
ce1fbc8d
SW
2807 size_t snaptrace_len;
2808 void *flock;
2809 u32 flock_len;
154f42c2 2810 int open_target_sessions = 0;
a8599bd8
SW
2811
2812 dout("handle_caps from mds%d\n", mds);
2813
2814 /* decode */
6df058c0 2815 tid = le64_to_cpu(msg->hdr.tid);
a8599bd8
SW
2816 if (msg->front.iov_len < sizeof(*h))
2817 goto bad;
2818 h = msg->front.iov_base;
2819 op = le32_to_cpu(h->op);
2820 vino.ino = le64_to_cpu(h->ino);
2821 vino.snap = CEPH_NOSNAP;
2822 cap_id = le64_to_cpu(h->cap_id);
2823 seq = le32_to_cpu(h->seq);
3d7ded4d 2824 mseq = le32_to_cpu(h->migrate_seq);
a8599bd8
SW
2825 size = le64_to_cpu(h->size);
2826 max_size = le64_to_cpu(h->max_size);
2827
ce1fbc8d
SW
2828 snaptrace = h + 1;
2829 snaptrace_len = le32_to_cpu(h->snap_trace_len);
2830
2831 if (le16_to_cpu(msg->hdr.version) >= 2) {
2832 void *p, *end;
2833
2834 p = snaptrace + snaptrace_len;
2835 end = msg->front.iov_base + msg->front.iov_len;
2836 ceph_decode_32_safe(&p, end, flock_len, bad);
2837 flock = p;
2838 } else {
2839 flock = NULL;
2840 flock_len = 0;
2841 }
2842
a8599bd8
SW
2843 mutex_lock(&session->s_mutex);
2844 session->s_seq++;
2845 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2846 (unsigned)seq);
2847
66f58691
YZ
2848 if (op == CEPH_CAP_OP_IMPORT)
2849 ceph_add_cap_releases(mdsc, session);
2850
a8599bd8
SW
2851 /* lookup ino */
2852 inode = ceph_find_inode(sb, vino);
be655596 2853 ci = ceph_inode(inode);
a8599bd8
SW
2854 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2855 vino.snap, inode);
2856 if (!inode) {
2857 dout(" i don't have ino %llx\n", vino.ino);
3d7ded4d
SW
2858
2859 if (op == CEPH_CAP_OP_IMPORT)
2860 __queue_cap_release(session, vino.ino, cap_id,
2861 mseq, seq);
21b559de 2862 goto flush_cap_releases;
a8599bd8
SW
2863 }
2864
2865 /* these will work even if we don't have a cap yet */
2866 switch (op) {
2867 case CEPH_CAP_OP_FLUSHSNAP_ACK:
6df058c0 2868 handle_cap_flushsnap_ack(inode, tid, h, session);
a8599bd8
SW
2869 goto done;
2870
2871 case CEPH_CAP_OP_EXPORT:
154f42c2 2872 handle_cap_export(inode, h, session, &open_target_sessions);
a8599bd8
SW
2873 goto done;
2874
2875 case CEPH_CAP_OP_IMPORT:
2876 handle_cap_import(mdsc, inode, h, session,
ce1fbc8d 2877 snaptrace, snaptrace_len);
a8599bd8
SW
2878 }
2879
2880 /* the rest require a cap */
be655596 2881 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2882 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2883 if (!cap) {
9dbd412f 2884 dout(" no cap on %p ino %llx.%llx from mds%d\n",
a8599bd8 2885 inode, ceph_ino(inode), ceph_snap(inode), mds);
be655596 2886 spin_unlock(&ci->i_ceph_lock);
21b559de 2887 goto flush_cap_releases;
a8599bd8
SW
2888 }
2889
be655596 2890 /* note that each of these drops i_ceph_lock for us */
a8599bd8
SW
2891 switch (op) {
2892 case CEPH_CAP_OP_REVOKE:
2893 case CEPH_CAP_OP_GRANT:
0e5e1774 2894 case CEPH_CAP_OP_IMPORT:
15637c8b
SW
2895 handle_cap_grant(inode, h, session, cap, msg->middle);
2896 goto done_unlocked;
a8599bd8
SW
2897
2898 case CEPH_CAP_OP_FLUSH_ACK:
6df058c0 2899 handle_cap_flush_ack(inode, tid, h, session, cap);
a8599bd8
SW
2900 break;
2901
2902 case CEPH_CAP_OP_TRUNC:
2903 handle_cap_trunc(inode, h, session);
2904 break;
2905
2906 default:
be655596 2907 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2908 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2909 ceph_cap_op_name(op));
2910 }
2911
21b559de
GF
2912 goto done;
2913
2914flush_cap_releases:
2915 /*
2916 * send any full release message to try to move things
2917 * along for the mds (who clearly thinks we still have this
2918 * cap).
2919 */
2920 ceph_add_cap_releases(mdsc, session);
2921 ceph_send_cap_releases(mdsc, session);
2922
a8599bd8 2923done:
15637c8b
SW
2924 mutex_unlock(&session->s_mutex);
2925done_unlocked:
a8599bd8
SW
2926 if (inode)
2927 iput(inode);
154f42c2
SW
2928 if (open_target_sessions)
2929 ceph_mdsc_open_export_target_sessions(mdsc, session);
a8599bd8
SW
2930 return;
2931
2932bad:
2933 pr_err("ceph_handle_caps: corrupt message\n");
9ec7cab1 2934 ceph_msg_dump(msg);
a8599bd8
SW
2935 return;
2936}
2937
2938/*
2939 * Delayed work handler to process end of delayed cap release LRU list.
2940 */
afcdaea3 2941void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
a8599bd8
SW
2942{
2943 struct ceph_inode_info *ci;
2944 int flags = CHECK_CAPS_NODELAY;
2945
a8599bd8
SW
2946 dout("check_delayed_caps\n");
2947 while (1) {
2948 spin_lock(&mdsc->cap_delay_lock);
2949 if (list_empty(&mdsc->cap_delay_list))
2950 break;
2951 ci = list_first_entry(&mdsc->cap_delay_list,
2952 struct ceph_inode_info,
2953 i_cap_delay_list);
2954 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2955 time_before(jiffies, ci->i_hold_caps_max))
2956 break;
2957 list_del_init(&ci->i_cap_delay_list);
2958 spin_unlock(&mdsc->cap_delay_lock);
2959 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2960 ceph_check_caps(ci, flags, NULL);
2961 }
2962 spin_unlock(&mdsc->cap_delay_lock);
2963}
2964
afcdaea3
SW
2965/*
2966 * Flush all dirty caps to the mds
2967 */
2968void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2969{
db354052
SW
2970 struct ceph_inode_info *ci;
2971 struct inode *inode;
afcdaea3
SW
2972
2973 dout("flush_dirty_caps\n");
2974 spin_lock(&mdsc->cap_dirty_lock);
db354052
SW
2975 while (!list_empty(&mdsc->cap_dirty)) {
2976 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
2977 i_dirty_item);
70b666c3
SW
2978 inode = &ci->vfs_inode;
2979 ihold(inode);
db354052 2980 dout("flush_dirty_caps %p\n", inode);
afcdaea3 2981 spin_unlock(&mdsc->cap_dirty_lock);
70b666c3
SW
2982 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
2983 iput(inode);
afcdaea3
SW
2984 spin_lock(&mdsc->cap_dirty_lock);
2985 }
2986 spin_unlock(&mdsc->cap_dirty_lock);
db354052 2987 dout("flush_dirty_caps done\n");
afcdaea3
SW
2988}
2989
a8599bd8
SW
2990/*
2991 * Drop open file reference. If we were the last open file,
2992 * we may need to release capabilities to the MDS (or schedule
2993 * their delayed release).
2994 */
2995void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2996{
2997 struct inode *inode = &ci->vfs_inode;
2998 int last = 0;
2999
be655596 3000 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
3001 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
3002 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
3003 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
3004 if (--ci->i_nr_by_mode[fmode] == 0)
3005 last++;
be655596 3006 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
3007
3008 if (last && ci->i_vino.snap == CEPH_NOSNAP)
3009 ceph_check_caps(ci, 0, NULL);
3010}
3011
3012/*
3013 * Helpers for embedding cap and dentry lease releases into mds
3014 * requests.
3015 *
3016 * @force is used by dentry_release (below) to force inclusion of a
3017 * record for the directory inode, even when there aren't any caps to
3018 * drop.
3019 */
3020int ceph_encode_inode_release(void **p, struct inode *inode,
3021 int mds, int drop, int unless, int force)
3022{
3023 struct ceph_inode_info *ci = ceph_inode(inode);
3024 struct ceph_cap *cap;
3025 struct ceph_mds_request_release *rel = *p;
ec97f88b 3026 int used, dirty;
a8599bd8 3027 int ret = 0;
a8599bd8 3028
be655596 3029 spin_lock(&ci->i_ceph_lock);
916623da 3030 used = __ceph_caps_used(ci);
ec97f88b 3031 dirty = __ceph_caps_dirty(ci);
916623da 3032
ec97f88b
SW
3033 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3034 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
916623da
SW
3035 ceph_cap_string(unless));
3036
ec97f88b
SW
3037 /* only drop unused, clean caps */
3038 drop &= ~(used | dirty);
916623da 3039
a8599bd8
SW
3040 cap = __get_cap_for_mds(ci, mds);
3041 if (cap && __cap_is_valid(cap)) {
3042 if (force ||
3043 ((cap->issued & drop) &&
3044 (cap->issued & unless) == 0)) {
3045 if ((cap->issued & drop) &&
3046 (cap->issued & unless) == 0) {
bb137f84
YZ
3047 int wanted = __ceph_caps_wanted(ci);
3048 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3049 wanted |= cap->mds_wanted;
3050 dout("encode_inode_release %p cap %p "
3051 "%s -> %s, wanted %s -> %s\n", inode, cap,
a8599bd8 3052 ceph_cap_string(cap->issued),
bb137f84
YZ
3053 ceph_cap_string(cap->issued & ~drop),
3054 ceph_cap_string(cap->mds_wanted),
3055 ceph_cap_string(wanted));
3056
a8599bd8
SW
3057 cap->issued &= ~drop;
3058 cap->implemented &= ~drop;
bb137f84 3059 cap->mds_wanted = wanted;
a8599bd8
SW
3060 } else {
3061 dout("encode_inode_release %p cap %p %s"
3062 " (force)\n", inode, cap,
3063 ceph_cap_string(cap->issued));
3064 }
3065
3066 rel->ino = cpu_to_le64(ceph_ino(inode));
3067 rel->cap_id = cpu_to_le64(cap->cap_id);
3068 rel->seq = cpu_to_le32(cap->seq);
3069 rel->issue_seq = cpu_to_le32(cap->issue_seq),
3070 rel->mseq = cpu_to_le32(cap->mseq);
3071 rel->caps = cpu_to_le32(cap->issued);
3072 rel->wanted = cpu_to_le32(cap->mds_wanted);
3073 rel->dname_len = 0;
3074 rel->dname_seq = 0;
3075 *p += sizeof(*rel);
3076 ret = 1;
3077 } else {
3078 dout("encode_inode_release %p cap %p %s\n",
3079 inode, cap, ceph_cap_string(cap->issued));
3080 }
3081 }
be655596 3082 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
3083 return ret;
3084}
3085
3086int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3087 int mds, int drop, int unless)
3088{
3089 struct inode *dir = dentry->d_parent->d_inode;
3090 struct ceph_mds_request_release *rel = *p;
3091 struct ceph_dentry_info *di = ceph_dentry(dentry);
3092 int force = 0;
3093 int ret;
3094
3095 /*
3096 * force an record for the directory caps if we have a dentry lease.
be655596 3097 * this is racy (can't take i_ceph_lock and d_lock together), but it
a8599bd8
SW
3098 * doesn't have to be perfect; the mds will revoke anything we don't
3099 * release.
3100 */
3101 spin_lock(&dentry->d_lock);
3102 if (di->lease_session && di->lease_session->s_mds == mds)
3103 force = 1;
3104 spin_unlock(&dentry->d_lock);
3105
3106 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3107
3108 spin_lock(&dentry->d_lock);
3109 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3110 dout("encode_dentry_release %p mds%d seq %d\n",
3111 dentry, mds, (int)di->lease_seq);
3112 rel->dname_len = cpu_to_le32(dentry->d_name.len);
3113 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3114 *p += dentry->d_name.len;
3115 rel->dname_seq = cpu_to_le32(di->lease_seq);
1dadcce3 3116 __ceph_mdsc_drop_dentry_lease(dentry);
a8599bd8
SW
3117 }
3118 spin_unlock(&dentry->d_lock);
3119 return ret;
3120}