]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/ceph/caps.c
ceph: move dirty inode to migrating list when clearing auth caps
[mirror_ubuntu-bionic-kernel.git] / fs / ceph / caps.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
a8599bd8
SW
2
3#include <linux/fs.h>
4#include <linux/kernel.h>
5#include <linux/sched.h>
5a0e3ad6 6#include <linux/slab.h>
a8599bd8
SW
7#include <linux/vmalloc.h>
8#include <linux/wait.h>
f1a3d572 9#include <linux/writeback.h>
a8599bd8
SW
10
11#include "super.h"
3d14c5d2
YS
12#include "mds_client.h"
13#include <linux/ceph/decode.h>
14#include <linux/ceph/messenger.h>
a8599bd8
SW
15
16/*
17 * Capability management
18 *
19 * The Ceph metadata servers control client access to inode metadata
20 * and file data by issuing capabilities, granting clients permission
21 * to read and/or write both inode field and file data to OSDs
22 * (storage nodes). Each capability consists of a set of bits
23 * indicating which operations are allowed.
24 *
25 * If the client holds a *_SHARED cap, the client has a coherent value
26 * that can be safely read from the cached inode.
27 *
28 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
29 * client is allowed to change inode attributes (e.g., file size,
30 * mtime), note its dirty state in the ceph_cap, and asynchronously
31 * flush that metadata change to the MDS.
32 *
33 * In the event of a conflicting operation (perhaps by another
34 * client), the MDS will revoke the conflicting client capabilities.
35 *
36 * In order for a client to cache an inode, it must hold a capability
37 * with at least one MDS server. When inodes are released, release
38 * notifications are batched and periodically sent en masse to the MDS
39 * cluster to release server state.
40 */
41
42
43/*
44 * Generate readable cap strings for debugging output.
45 */
46#define MAX_CAP_STR 20
47static char cap_str[MAX_CAP_STR][40];
48static DEFINE_SPINLOCK(cap_str_lock);
49static int last_cap_str;
50
51static char *gcap_string(char *s, int c)
52{
53 if (c & CEPH_CAP_GSHARED)
54 *s++ = 's';
55 if (c & CEPH_CAP_GEXCL)
56 *s++ = 'x';
57 if (c & CEPH_CAP_GCACHE)
58 *s++ = 'c';
59 if (c & CEPH_CAP_GRD)
60 *s++ = 'r';
61 if (c & CEPH_CAP_GWR)
62 *s++ = 'w';
63 if (c & CEPH_CAP_GBUFFER)
64 *s++ = 'b';
65 if (c & CEPH_CAP_GLAZYIO)
66 *s++ = 'l';
67 return s;
68}
69
70const char *ceph_cap_string(int caps)
71{
72 int i;
73 char *s;
74 int c;
75
76 spin_lock(&cap_str_lock);
77 i = last_cap_str++;
78 if (last_cap_str == MAX_CAP_STR)
79 last_cap_str = 0;
80 spin_unlock(&cap_str_lock);
81
82 s = cap_str[i];
83
84 if (caps & CEPH_CAP_PIN)
85 *s++ = 'p';
86
87 c = (caps >> CEPH_CAP_SAUTH) & 3;
88 if (c) {
89 *s++ = 'A';
90 s = gcap_string(s, c);
91 }
92
93 c = (caps >> CEPH_CAP_SLINK) & 3;
94 if (c) {
95 *s++ = 'L';
96 s = gcap_string(s, c);
97 }
98
99 c = (caps >> CEPH_CAP_SXATTR) & 3;
100 if (c) {
101 *s++ = 'X';
102 s = gcap_string(s, c);
103 }
104
105 c = caps >> CEPH_CAP_SFILE;
106 if (c) {
107 *s++ = 'F';
108 s = gcap_string(s, c);
109 }
110
111 if (s == cap_str[i])
112 *s++ = '-';
113 *s = 0;
114 return cap_str[i];
115}
116
37151668 117void ceph_caps_init(struct ceph_mds_client *mdsc)
a8599bd8 118{
37151668
YS
119 INIT_LIST_HEAD(&mdsc->caps_list);
120 spin_lock_init(&mdsc->caps_list_lock);
a8599bd8
SW
121}
122
37151668 123void ceph_caps_finalize(struct ceph_mds_client *mdsc)
a8599bd8
SW
124{
125 struct ceph_cap *cap;
126
37151668
YS
127 spin_lock(&mdsc->caps_list_lock);
128 while (!list_empty(&mdsc->caps_list)) {
129 cap = list_first_entry(&mdsc->caps_list,
130 struct ceph_cap, caps_item);
a8599bd8
SW
131 list_del(&cap->caps_item);
132 kmem_cache_free(ceph_cap_cachep, cap);
133 }
37151668
YS
134 mdsc->caps_total_count = 0;
135 mdsc->caps_avail_count = 0;
136 mdsc->caps_use_count = 0;
137 mdsc->caps_reserve_count = 0;
138 mdsc->caps_min_count = 0;
139 spin_unlock(&mdsc->caps_list_lock);
85ccce43
SW
140}
141
37151668 142void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
85ccce43 143{
37151668
YS
144 spin_lock(&mdsc->caps_list_lock);
145 mdsc->caps_min_count += delta;
146 BUG_ON(mdsc->caps_min_count < 0);
147 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
148}
149
37151668
YS
150int ceph_reserve_caps(struct ceph_mds_client *mdsc,
151 struct ceph_cap_reservation *ctx, int need)
a8599bd8
SW
152{
153 int i;
154 struct ceph_cap *cap;
155 int have;
156 int alloc = 0;
157 LIST_HEAD(newcaps);
158 int ret = 0;
159
160 dout("reserve caps ctx=%p need=%d\n", ctx, need);
161
162 /* first reserve any caps that are already allocated */
37151668
YS
163 spin_lock(&mdsc->caps_list_lock);
164 if (mdsc->caps_avail_count >= need)
a8599bd8
SW
165 have = need;
166 else
37151668
YS
167 have = mdsc->caps_avail_count;
168 mdsc->caps_avail_count -= have;
169 mdsc->caps_reserve_count += have;
170 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
171 mdsc->caps_reserve_count +
172 mdsc->caps_avail_count);
173 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
174
175 for (i = have; i < need; i++) {
176 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
177 if (!cap) {
178 ret = -ENOMEM;
179 goto out_alloc_count;
180 }
181 list_add(&cap->caps_item, &newcaps);
182 alloc++;
183 }
184 BUG_ON(have + alloc != need);
185
37151668
YS
186 spin_lock(&mdsc->caps_list_lock);
187 mdsc->caps_total_count += alloc;
188 mdsc->caps_reserve_count += alloc;
189 list_splice(&newcaps, &mdsc->caps_list);
a8599bd8 190
37151668
YS
191 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
192 mdsc->caps_reserve_count +
193 mdsc->caps_avail_count);
194 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
195
196 ctx->count = need;
197 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
37151668
YS
198 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
199 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8
SW
200 return 0;
201
202out_alloc_count:
203 /* we didn't manage to reserve as much as we needed */
204 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
205 ctx, need, have);
206 return ret;
207}
208
37151668
YS
209int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
210 struct ceph_cap_reservation *ctx)
a8599bd8
SW
211{
212 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
213 if (ctx->count) {
37151668
YS
214 spin_lock(&mdsc->caps_list_lock);
215 BUG_ON(mdsc->caps_reserve_count < ctx->count);
216 mdsc->caps_reserve_count -= ctx->count;
217 mdsc->caps_avail_count += ctx->count;
a8599bd8
SW
218 ctx->count = 0;
219 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
37151668
YS
220 mdsc->caps_total_count, mdsc->caps_use_count,
221 mdsc->caps_reserve_count, mdsc->caps_avail_count);
222 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
223 mdsc->caps_reserve_count +
224 mdsc->caps_avail_count);
225 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
226 }
227 return 0;
228}
229
37151668
YS
230static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
231 struct ceph_cap_reservation *ctx)
a8599bd8
SW
232{
233 struct ceph_cap *cap = NULL;
234
235 /* temporary, until we do something about cap import/export */
443b3760
SW
236 if (!ctx) {
237 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
238 if (cap) {
4d1d0534 239 spin_lock(&mdsc->caps_list_lock);
37151668
YS
240 mdsc->caps_use_count++;
241 mdsc->caps_total_count++;
4d1d0534 242 spin_unlock(&mdsc->caps_list_lock);
443b3760
SW
243 }
244 return cap;
245 }
a8599bd8 246
37151668 247 spin_lock(&mdsc->caps_list_lock);
a8599bd8 248 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
37151668
YS
249 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
250 mdsc->caps_reserve_count, mdsc->caps_avail_count);
a8599bd8 251 BUG_ON(!ctx->count);
37151668
YS
252 BUG_ON(ctx->count > mdsc->caps_reserve_count);
253 BUG_ON(list_empty(&mdsc->caps_list));
a8599bd8
SW
254
255 ctx->count--;
37151668
YS
256 mdsc->caps_reserve_count--;
257 mdsc->caps_use_count++;
a8599bd8 258
37151668 259 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
a8599bd8
SW
260 list_del(&cap->caps_item);
261
37151668
YS
262 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
263 mdsc->caps_reserve_count + mdsc->caps_avail_count);
264 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
265 return cap;
266}
267
37151668 268void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
a8599bd8 269{
37151668 270 spin_lock(&mdsc->caps_list_lock);
7c1332b8 271 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
37151668
YS
272 cap, mdsc->caps_total_count, mdsc->caps_use_count,
273 mdsc->caps_reserve_count, mdsc->caps_avail_count);
274 mdsc->caps_use_count--;
a8599bd8 275 /*
85ccce43
SW
276 * Keep some preallocated caps around (ceph_min_count), to
277 * avoid lots of free/alloc churn.
a8599bd8 278 */
37151668
YS
279 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
280 mdsc->caps_min_count) {
281 mdsc->caps_total_count--;
a8599bd8
SW
282 kmem_cache_free(ceph_cap_cachep, cap);
283 } else {
37151668
YS
284 mdsc->caps_avail_count++;
285 list_add(&cap->caps_item, &mdsc->caps_list);
a8599bd8
SW
286 }
287
37151668
YS
288 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
289 mdsc->caps_reserve_count + mdsc->caps_avail_count);
290 spin_unlock(&mdsc->caps_list_lock);
a8599bd8
SW
291}
292
3d14c5d2 293void ceph_reservation_status(struct ceph_fs_client *fsc,
85ccce43
SW
294 int *total, int *avail, int *used, int *reserved,
295 int *min)
a8599bd8 296{
3d14c5d2 297 struct ceph_mds_client *mdsc = fsc->mdsc;
37151668 298
a8599bd8 299 if (total)
37151668 300 *total = mdsc->caps_total_count;
a8599bd8 301 if (avail)
37151668 302 *avail = mdsc->caps_avail_count;
a8599bd8 303 if (used)
37151668 304 *used = mdsc->caps_use_count;
a8599bd8 305 if (reserved)
37151668 306 *reserved = mdsc->caps_reserve_count;
85ccce43 307 if (min)
37151668 308 *min = mdsc->caps_min_count;
a8599bd8
SW
309}
310
311/*
312 * Find ceph_cap for given mds, if any.
313 *
be655596 314 * Called with i_ceph_lock held.
a8599bd8
SW
315 */
316static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317{
318 struct ceph_cap *cap;
319 struct rb_node *n = ci->i_caps.rb_node;
320
321 while (n) {
322 cap = rb_entry(n, struct ceph_cap, ci_node);
323 if (mds < cap->mds)
324 n = n->rb_left;
325 else if (mds > cap->mds)
326 n = n->rb_right;
327 else
328 return cap;
329 }
330 return NULL;
331}
332
2bc50259
GF
333struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
334{
335 struct ceph_cap *cap;
336
be655596 337 spin_lock(&ci->i_ceph_lock);
2bc50259 338 cap = __get_cap_for_mds(ci, mds);
be655596 339 spin_unlock(&ci->i_ceph_lock);
2bc50259
GF
340 return cap;
341}
342
a8599bd8 343/*
33caad32 344 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
a8599bd8 345 */
ca81f3f6 346static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
a8599bd8
SW
347{
348 struct ceph_cap *cap;
349 int mds = -1;
350 struct rb_node *p;
351
33caad32 352 /* prefer mds with WR|BUFFER|EXCL caps */
a8599bd8
SW
353 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
354 cap = rb_entry(p, struct ceph_cap, ci_node);
355 mds = cap->mds;
a8599bd8
SW
356 if (cap->issued & (CEPH_CAP_FILE_WR |
357 CEPH_CAP_FILE_BUFFER |
358 CEPH_CAP_FILE_EXCL))
359 break;
360 }
361 return mds;
362}
363
364int ceph_get_cap_mds(struct inode *inode)
365{
be655596 366 struct ceph_inode_info *ci = ceph_inode(inode);
a8599bd8 367 int mds;
be655596 368 spin_lock(&ci->i_ceph_lock);
ca81f3f6 369 mds = __ceph_get_cap_mds(ceph_inode(inode));
be655596 370 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
371 return mds;
372}
373
374/*
be655596 375 * Called under i_ceph_lock.
a8599bd8
SW
376 */
377static void __insert_cap_node(struct ceph_inode_info *ci,
378 struct ceph_cap *new)
379{
380 struct rb_node **p = &ci->i_caps.rb_node;
381 struct rb_node *parent = NULL;
382 struct ceph_cap *cap = NULL;
383
384 while (*p) {
385 parent = *p;
386 cap = rb_entry(parent, struct ceph_cap, ci_node);
387 if (new->mds < cap->mds)
388 p = &(*p)->rb_left;
389 else if (new->mds > cap->mds)
390 p = &(*p)->rb_right;
391 else
392 BUG();
393 }
394
395 rb_link_node(&new->ci_node, parent, p);
396 rb_insert_color(&new->ci_node, &ci->i_caps);
397}
398
399/*
400 * (re)set cap hold timeouts, which control the delayed release
401 * of unused caps back to the MDS. Should be called on cap use.
402 */
403static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
404 struct ceph_inode_info *ci)
405{
3d14c5d2 406 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
a8599bd8
SW
407
408 ci->i_hold_caps_min = round_jiffies(jiffies +
409 ma->caps_wanted_delay_min * HZ);
410 ci->i_hold_caps_max = round_jiffies(jiffies +
411 ma->caps_wanted_delay_max * HZ);
412 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
413 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
414}
415
416/*
417 * (Re)queue cap at the end of the delayed cap release list.
418 *
419 * If I_FLUSH is set, leave the inode at the front of the list.
420 *
be655596 421 * Caller holds i_ceph_lock
a8599bd8
SW
422 * -> we take mdsc->cap_delay_lock
423 */
424static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
425 struct ceph_inode_info *ci)
426{
427 __cap_set_timeouts(mdsc, ci);
428 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
429 ci->i_ceph_flags, ci->i_hold_caps_max);
430 if (!mdsc->stopping) {
431 spin_lock(&mdsc->cap_delay_lock);
432 if (!list_empty(&ci->i_cap_delay_list)) {
433 if (ci->i_ceph_flags & CEPH_I_FLUSH)
434 goto no_change;
435 list_del_init(&ci->i_cap_delay_list);
436 }
437 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
438no_change:
439 spin_unlock(&mdsc->cap_delay_lock);
440 }
441}
442
443/*
444 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
445 * indicating we should send a cap message to flush dirty metadata
446 * asap, and move to the front of the delayed cap list.
447 */
448static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
449 struct ceph_inode_info *ci)
450{
451 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
452 spin_lock(&mdsc->cap_delay_lock);
453 ci->i_ceph_flags |= CEPH_I_FLUSH;
454 if (!list_empty(&ci->i_cap_delay_list))
455 list_del_init(&ci->i_cap_delay_list);
456 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
457 spin_unlock(&mdsc->cap_delay_lock);
458}
459
460/*
461 * Cancel delayed work on cap.
462 *
be655596 463 * Caller must hold i_ceph_lock.
a8599bd8
SW
464 */
465static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
466 struct ceph_inode_info *ci)
467{
468 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
469 if (list_empty(&ci->i_cap_delay_list))
470 return;
471 spin_lock(&mdsc->cap_delay_lock);
472 list_del_init(&ci->i_cap_delay_list);
473 spin_unlock(&mdsc->cap_delay_lock);
474}
475
476/*
477 * Common issue checks for add_cap, handle_cap_grant.
478 */
479static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
480 unsigned issued)
481{
482 unsigned had = __ceph_caps_issued(ci, NULL);
483
484 /*
485 * Each time we receive FILE_CACHE anew, we increment
486 * i_rdcache_gen.
487 */
2962507c
SW
488 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
489 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
a8599bd8
SW
490 ci->i_rdcache_gen++;
491
492 /*
c6ffe100 493 * if we are newly issued FILE_SHARED, clear D_COMPLETE; we
a8599bd8
SW
494 * don't know what happened to this directory while we didn't
495 * have the cap.
496 */
497 if ((issued & CEPH_CAP_FILE_SHARED) &&
498 (had & CEPH_CAP_FILE_SHARED) == 0) {
499 ci->i_shared_gen++;
c6ffe100
SW
500 if (S_ISDIR(ci->vfs_inode.i_mode))
501 ceph_dir_clear_complete(&ci->vfs_inode);
a8599bd8
SW
502 }
503}
504
505/*
506 * Add a capability under the given MDS session.
507 *
508 * Caller should hold session snap_rwsem (read) and s_mutex.
509 *
510 * @fmode is the open file mode, if we are opening a file, otherwise
511 * it is < 0. (This is so we can atomically add the cap and add an
512 * open file reference to it.)
513 */
514int ceph_add_cap(struct inode *inode,
515 struct ceph_mds_session *session, u64 cap_id,
516 int fmode, unsigned issued, unsigned wanted,
517 unsigned seq, unsigned mseq, u64 realmino, int flags,
518 struct ceph_cap_reservation *caps_reservation)
519{
3d14c5d2 520 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8
SW
521 struct ceph_inode_info *ci = ceph_inode(inode);
522 struct ceph_cap *new_cap = NULL;
523 struct ceph_cap *cap;
524 int mds = session->s_mds;
525 int actual_wanted;
526
527 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
528 session->s_mds, cap_id, ceph_cap_string(issued), seq);
529
530 /*
531 * If we are opening the file, include file mode wanted bits
532 * in wanted.
533 */
534 if (fmode >= 0)
535 wanted |= ceph_caps_for_mode(fmode);
536
537retry:
be655596 538 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
539 cap = __get_cap_for_mds(ci, mds);
540 if (!cap) {
541 if (new_cap) {
542 cap = new_cap;
543 new_cap = NULL;
544 } else {
be655596 545 spin_unlock(&ci->i_ceph_lock);
37151668 546 new_cap = get_cap(mdsc, caps_reservation);
a8599bd8
SW
547 if (new_cap == NULL)
548 return -ENOMEM;
549 goto retry;
550 }
551
552 cap->issued = 0;
553 cap->implemented = 0;
554 cap->mds = mds;
555 cap->mds_wanted = 0;
556
557 cap->ci = ci;
558 __insert_cap_node(ci, cap);
559
560 /* clear out old exporting info? (i.e. on cap import) */
561 if (ci->i_cap_exporting_mds == mds) {
562 ci->i_cap_exporting_issued = 0;
563 ci->i_cap_exporting_mseq = 0;
564 ci->i_cap_exporting_mds = -1;
565 }
566
567 /* add to session cap list */
568 cap->session = session;
569 spin_lock(&session->s_cap_lock);
570 list_add_tail(&cap->session_caps, &session->s_caps);
571 session->s_nr_caps++;
572 spin_unlock(&session->s_cap_lock);
3540303f
SW
573 } else if (new_cap)
574 ceph_put_cap(mdsc, new_cap);
a8599bd8
SW
575
576 if (!ci->i_snap_realm) {
577 /*
578 * add this inode to the appropriate snap realm
579 */
580 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
581 realmino);
582 if (realm) {
583 ceph_get_snap_realm(mdsc, realm);
584 spin_lock(&realm->inodes_with_caps_lock);
585 ci->i_snap_realm = realm;
586 list_add(&ci->i_snap_realm_item,
587 &realm->inodes_with_caps);
588 spin_unlock(&realm->inodes_with_caps_lock);
589 } else {
590 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
591 realmino);
b8cd07e7 592 WARN_ON(!realm);
a8599bd8
SW
593 }
594 }
595
596 __check_cap_issue(ci, cap, issued);
597
598 /*
599 * If we are issued caps we don't want, or the mds' wanted
600 * value appears to be off, queue a check so we'll release
601 * later and/or update the mds wanted value.
602 */
603 actual_wanted = __ceph_caps_wanted(ci);
604 if ((wanted & ~actual_wanted) ||
605 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
606 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
607 ceph_cap_string(issued), ceph_cap_string(wanted),
608 ceph_cap_string(actual_wanted));
609 __cap_delay_requeue(mdsc, ci);
610 }
611
612 if (flags & CEPH_CAP_FLAG_AUTH)
613 ci->i_auth_cap = cap;
8a92a119 614 else if (ci->i_auth_cap == cap) {
a8599bd8 615 ci->i_auth_cap = NULL;
8a92a119
YZ
616 spin_lock(&mdsc->cap_dirty_lock);
617 if (!list_empty(&ci->i_dirty_item)) {
618 dout(" moving %p to cap_dirty_migrating\n", inode);
619 list_move(&ci->i_dirty_item,
620 &mdsc->cap_dirty_migrating);
621 }
622 spin_unlock(&mdsc->cap_dirty_lock);
623 }
a8599bd8
SW
624
625 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
626 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
627 ceph_cap_string(issued|cap->issued), seq, mds);
628 cap->cap_id = cap_id;
629 cap->issued = issued;
630 cap->implemented |= issued;
631 cap->mds_wanted |= wanted;
632 cap->seq = seq;
633 cap->issue_seq = seq;
634 cap->mseq = mseq;
685f9a5d 635 cap->cap_gen = session->s_cap_gen;
a8599bd8
SW
636
637 if (fmode >= 0)
638 __ceph_get_fmode(ci, fmode);
be655596 639 spin_unlock(&ci->i_ceph_lock);
03066f23 640 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
641 return 0;
642}
643
644/*
645 * Return true if cap has not timed out and belongs to the current
646 * generation of the MDS session (i.e. has not gone 'stale' due to
647 * us losing touch with the mds).
648 */
649static int __cap_is_valid(struct ceph_cap *cap)
650{
651 unsigned long ttl;
cdac8303 652 u32 gen;
a8599bd8 653
d8fb02ab 654 spin_lock(&cap->session->s_gen_ttl_lock);
a8599bd8
SW
655 gen = cap->session->s_cap_gen;
656 ttl = cap->session->s_cap_ttl;
d8fb02ab 657 spin_unlock(&cap->session->s_gen_ttl_lock);
a8599bd8 658
685f9a5d 659 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
a8599bd8
SW
660 dout("__cap_is_valid %p cap %p issued %s "
661 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
685f9a5d 662 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
a8599bd8
SW
663 return 0;
664 }
665
666 return 1;
667}
668
669/*
670 * Return set of valid cap bits issued to us. Note that caps time
671 * out, and may be invalidated in bulk if the client session times out
672 * and session->s_cap_gen is bumped.
673 */
674int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
675{
7af8f1e4 676 int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
a8599bd8
SW
677 struct ceph_cap *cap;
678 struct rb_node *p;
679
680 if (implemented)
681 *implemented = 0;
682 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
683 cap = rb_entry(p, struct ceph_cap, ci_node);
684 if (!__cap_is_valid(cap))
685 continue;
686 dout("__ceph_caps_issued %p cap %p issued %s\n",
687 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
688 have |= cap->issued;
689 if (implemented)
690 *implemented |= cap->implemented;
691 }
692 return have;
693}
694
695/*
696 * Get cap bits issued by caps other than @ocap
697 */
698int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
699{
700 int have = ci->i_snap_caps;
701 struct ceph_cap *cap;
702 struct rb_node *p;
703
704 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
705 cap = rb_entry(p, struct ceph_cap, ci_node);
706 if (cap == ocap)
707 continue;
708 if (!__cap_is_valid(cap))
709 continue;
710 have |= cap->issued;
711 }
712 return have;
713}
714
715/*
716 * Move a cap to the end of the LRU (oldest caps at list head, newest
717 * at list tail).
718 */
719static void __touch_cap(struct ceph_cap *cap)
720{
721 struct ceph_mds_session *s = cap->session;
722
a8599bd8 723 spin_lock(&s->s_cap_lock);
7c1332b8 724 if (s->s_cap_iterator == NULL) {
5dacf091
SW
725 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
726 s->s_mds);
727 list_move_tail(&cap->session_caps, &s->s_caps);
728 } else {
729 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
730 &cap->ci->vfs_inode, cap, s->s_mds);
731 }
a8599bd8
SW
732 spin_unlock(&s->s_cap_lock);
733}
734
735/*
736 * Check if we hold the given mask. If so, move the cap(s) to the
737 * front of their respective LRUs. (This is the preferred way for
738 * callers to check for caps they want.)
739 */
740int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
741{
742 struct ceph_cap *cap;
743 struct rb_node *p;
744 int have = ci->i_snap_caps;
745
746 if ((have & mask) == mask) {
747 dout("__ceph_caps_issued_mask %p snap issued %s"
748 " (mask %s)\n", &ci->vfs_inode,
749 ceph_cap_string(have),
750 ceph_cap_string(mask));
751 return 1;
752 }
753
754 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
755 cap = rb_entry(p, struct ceph_cap, ci_node);
756 if (!__cap_is_valid(cap))
757 continue;
758 if ((cap->issued & mask) == mask) {
759 dout("__ceph_caps_issued_mask %p cap %p issued %s"
760 " (mask %s)\n", &ci->vfs_inode, cap,
761 ceph_cap_string(cap->issued),
762 ceph_cap_string(mask));
763 if (touch)
764 __touch_cap(cap);
765 return 1;
766 }
767
768 /* does a combination of caps satisfy mask? */
769 have |= cap->issued;
770 if ((have & mask) == mask) {
771 dout("__ceph_caps_issued_mask %p combo issued %s"
772 " (mask %s)\n", &ci->vfs_inode,
773 ceph_cap_string(cap->issued),
774 ceph_cap_string(mask));
775 if (touch) {
776 struct rb_node *q;
777
25985edc 778 /* touch this + preceding caps */
a8599bd8
SW
779 __touch_cap(cap);
780 for (q = rb_first(&ci->i_caps); q != p;
781 q = rb_next(q)) {
782 cap = rb_entry(q, struct ceph_cap,
783 ci_node);
784 if (!__cap_is_valid(cap))
785 continue;
786 __touch_cap(cap);
787 }
788 }
789 return 1;
790 }
791 }
792
793 return 0;
794}
795
796/*
797 * Return true if mask caps are currently being revoked by an MDS.
798 */
799int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
800{
801 struct inode *inode = &ci->vfs_inode;
802 struct ceph_cap *cap;
803 struct rb_node *p;
804 int ret = 0;
805
be655596 806 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
807 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
808 cap = rb_entry(p, struct ceph_cap, ci_node);
809 if (__cap_is_valid(cap) &&
810 (cap->implemented & ~cap->issued & mask)) {
811 ret = 1;
812 break;
813 }
814 }
be655596 815 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
816 dout("ceph_caps_revoking %p %s = %d\n", inode,
817 ceph_cap_string(mask), ret);
818 return ret;
819}
820
821int __ceph_caps_used(struct ceph_inode_info *ci)
822{
823 int used = 0;
824 if (ci->i_pin_ref)
825 used |= CEPH_CAP_PIN;
826 if (ci->i_rd_ref)
827 used |= CEPH_CAP_FILE_RD;
a43fb731 828 if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
a8599bd8
SW
829 used |= CEPH_CAP_FILE_CACHE;
830 if (ci->i_wr_ref)
831 used |= CEPH_CAP_FILE_WR;
d3d0720d 832 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
a8599bd8
SW
833 used |= CEPH_CAP_FILE_BUFFER;
834 return used;
835}
836
837/*
838 * wanted, by virtue of open file modes
839 */
840int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
841{
842 int want = 0;
843 int mode;
33caad32 844 for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
a8599bd8
SW
845 if (ci->i_nr_by_mode[mode])
846 want |= ceph_caps_for_mode(mode);
847 return want;
848}
849
850/*
851 * Return caps we have registered with the MDS(s) as 'wanted'.
852 */
853int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
854{
855 struct ceph_cap *cap;
856 struct rb_node *p;
857 int mds_wanted = 0;
858
859 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
860 cap = rb_entry(p, struct ceph_cap, ci_node);
861 if (!__cap_is_valid(cap))
862 continue;
863 mds_wanted |= cap->mds_wanted;
864 }
865 return mds_wanted;
866}
867
868/*
be655596 869 * called under i_ceph_lock
a8599bd8
SW
870 */
871static int __ceph_is_any_caps(struct ceph_inode_info *ci)
872{
873 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
874}
875
876/*
f818a736
SW
877 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
878 *
be655596 879 * caller should hold i_ceph_lock.
a6369741 880 * caller will not hold session s_mutex if called from destroy_inode.
a8599bd8 881 */
7c1332b8 882void __ceph_remove_cap(struct ceph_cap *cap)
a8599bd8
SW
883{
884 struct ceph_mds_session *session = cap->session;
885 struct ceph_inode_info *ci = cap->ci;
640ef79d 886 struct ceph_mds_client *mdsc =
3d14c5d2 887 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
f818a736 888 int removed = 0;
a8599bd8
SW
889
890 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
891
7c1332b8
SW
892 /* remove from session list */
893 spin_lock(&session->s_cap_lock);
894 if (session->s_cap_iterator == cap) {
895 /* not yet, we are iterating over this very cap */
896 dout("__ceph_remove_cap delaying %p removal from session %p\n",
897 cap, cap->session);
898 } else {
899 list_del_init(&cap->session_caps);
900 session->s_nr_caps--;
901 cap->session = NULL;
f818a736 902 removed = 1;
7c1332b8 903 }
f818a736
SW
904 /* protect backpointer with s_cap_lock: see iterate_session_caps */
905 cap->ci = NULL;
7c1332b8
SW
906 spin_unlock(&session->s_cap_lock);
907
f818a736
SW
908 /* remove from inode list */
909 rb_erase(&cap->ci_node, &ci->i_caps);
910 if (ci->i_auth_cap == cap)
911 ci->i_auth_cap = NULL;
912
913 if (removed)
37151668 914 ceph_put_cap(mdsc, cap);
a8599bd8
SW
915
916 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
917 struct ceph_snap_realm *realm = ci->i_snap_realm;
918 spin_lock(&realm->inodes_with_caps_lock);
919 list_del_init(&ci->i_snap_realm_item);
920 ci->i_snap_realm_counter++;
921 ci->i_snap_realm = NULL;
922 spin_unlock(&realm->inodes_with_caps_lock);
923 ceph_put_snap_realm(mdsc, realm);
924 }
925 if (!__ceph_is_any_real_caps(ci))
926 __cap_delay_cancel(mdsc, ci);
927}
928
929/*
930 * Build and send a cap message to the given MDS.
931 *
932 * Caller should be holding s_mutex.
933 */
934static int send_cap_msg(struct ceph_mds_session *session,
935 u64 ino, u64 cid, int op,
936 int caps, int wanted, int dirty,
937 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
938 u64 size, u64 max_size,
939 struct timespec *mtime, struct timespec *atime,
940 u64 time_warp_seq,
5706b27d 941 uid_t uid, gid_t gid, umode_t mode,
a8599bd8
SW
942 u64 xattr_version,
943 struct ceph_buffer *xattrs_buf,
944 u64 follows)
945{
946 struct ceph_mds_caps *fc;
947 struct ceph_msg *msg;
948
949 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
950 " seq %u/%u mseq %u follows %lld size %llu/%llu"
951 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
952 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
953 ceph_cap_string(dirty),
954 seq, issue_seq, mseq, follows, size, max_size,
955 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
956
b61c2763 957 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS, false);
a79832f2
SW
958 if (!msg)
959 return -ENOMEM;
a8599bd8 960
6df058c0 961 msg->hdr.tid = cpu_to_le64(flush_tid);
a8599bd8 962
6df058c0 963 fc = msg->front.iov_base;
a8599bd8
SW
964 memset(fc, 0, sizeof(*fc));
965
966 fc->cap_id = cpu_to_le64(cid);
967 fc->op = cpu_to_le32(op);
968 fc->seq = cpu_to_le32(seq);
a8599bd8
SW
969 fc->issue_seq = cpu_to_le32(issue_seq);
970 fc->migrate_seq = cpu_to_le32(mseq);
971 fc->caps = cpu_to_le32(caps);
972 fc->wanted = cpu_to_le32(wanted);
973 fc->dirty = cpu_to_le32(dirty);
974 fc->ino = cpu_to_le64(ino);
975 fc->snap_follows = cpu_to_le64(follows);
976
977 fc->size = cpu_to_le64(size);
978 fc->max_size = cpu_to_le64(max_size);
979 if (mtime)
980 ceph_encode_timespec(&fc->mtime, mtime);
981 if (atime)
982 ceph_encode_timespec(&fc->atime, atime);
983 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
984
985 fc->uid = cpu_to_le32(uid);
986 fc->gid = cpu_to_le32(gid);
987 fc->mode = cpu_to_le32(mode);
988
989 fc->xattr_version = cpu_to_le64(xattr_version);
990 if (xattrs_buf) {
991 msg->middle = ceph_buffer_get(xattrs_buf);
992 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
993 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
994 }
995
996 ceph_con_send(&session->s_con, msg);
997 return 0;
998}
999
3d7ded4d
SW
1000static void __queue_cap_release(struct ceph_mds_session *session,
1001 u64 ino, u64 cap_id, u32 migrate_seq,
1002 u32 issue_seq)
1003{
1004 struct ceph_msg *msg;
1005 struct ceph_mds_cap_release *head;
1006 struct ceph_mds_cap_item *item;
1007
1008 spin_lock(&session->s_cap_lock);
1009 BUG_ON(!session->s_num_cap_releases);
1010 msg = list_first_entry(&session->s_cap_releases,
1011 struct ceph_msg, list_head);
1012
1013 dout(" adding %llx release to mds%d msg %p (%d left)\n",
1014 ino, session->s_mds, msg, session->s_num_cap_releases);
1015
1016 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1017 head = msg->front.iov_base;
b905a7f8 1018 le32_add_cpu(&head->num, 1);
3d7ded4d
SW
1019 item = msg->front.iov_base + msg->front.iov_len;
1020 item->ino = cpu_to_le64(ino);
1021 item->cap_id = cpu_to_le64(cap_id);
1022 item->migrate_seq = cpu_to_le32(migrate_seq);
1023 item->seq = cpu_to_le32(issue_seq);
1024
1025 session->s_num_cap_releases--;
1026
1027 msg->front.iov_len += sizeof(*item);
1028 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1029 dout(" release msg %p full\n", msg);
1030 list_move_tail(&msg->list_head, &session->s_cap_releases_done);
1031 } else {
1032 dout(" release msg %p at %d/%d (%d)\n", msg,
1033 (int)le32_to_cpu(head->num),
1034 (int)CEPH_CAPS_PER_RELEASE,
1035 (int)msg->front.iov_len);
1036 }
1037 spin_unlock(&session->s_cap_lock);
1038}
1039
a8599bd8 1040/*
a6369741 1041 * Queue cap releases when an inode is dropped from our cache. Since
be655596 1042 * inode is about to be destroyed, there is no need for i_ceph_lock.
a8599bd8
SW
1043 */
1044void ceph_queue_caps_release(struct inode *inode)
1045{
1046 struct ceph_inode_info *ci = ceph_inode(inode);
1047 struct rb_node *p;
1048
a8599bd8
SW
1049 p = rb_first(&ci->i_caps);
1050 while (p) {
1051 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1052 struct ceph_mds_session *session = cap->session;
a8599bd8 1053
3d7ded4d
SW
1054 __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
1055 cap->mseq, cap->issue_seq);
a8599bd8 1056 p = rb_next(p);
7c1332b8 1057 __ceph_remove_cap(cap);
a8599bd8 1058 }
a8599bd8
SW
1059}
1060
1061/*
1062 * Send a cap msg on the given inode. Update our caps state, then
be655596 1063 * drop i_ceph_lock and send the message.
a8599bd8
SW
1064 *
1065 * Make note of max_size reported/requested from mds, revoked caps
1066 * that have now been implemented.
1067 *
1068 * Make half-hearted attempt ot to invalidate page cache if we are
1069 * dropping RDCACHE. Note that this will leave behind locked pages
1070 * that we'll then need to deal with elsewhere.
1071 *
1072 * Return non-zero if delayed release, or we experienced an error
1073 * such that the caller should requeue + retry later.
1074 *
be655596 1075 * called with i_ceph_lock, then drops it.
a8599bd8
SW
1076 * caller should hold snap_rwsem (read), s_mutex.
1077 */
1078static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1079 int op, int used, int want, int retain, int flushing,
1080 unsigned *pflush_tid)
be655596 1081 __releases(cap->ci->i_ceph_lock)
a8599bd8
SW
1082{
1083 struct ceph_inode_info *ci = cap->ci;
1084 struct inode *inode = &ci->vfs_inode;
1085 u64 cap_id = cap->cap_id;
68c28323 1086 int held, revoking, dropping, keep;
a8599bd8
SW
1087 u64 seq, issue_seq, mseq, time_warp_seq, follows;
1088 u64 size, max_size;
1089 struct timespec mtime, atime;
1090 int wake = 0;
5706b27d 1091 umode_t mode;
a8599bd8
SW
1092 uid_t uid;
1093 gid_t gid;
1094 struct ceph_mds_session *session;
1095 u64 xattr_version = 0;
082afec9 1096 struct ceph_buffer *xattr_blob = NULL;
a8599bd8
SW
1097 int delayed = 0;
1098 u64 flush_tid = 0;
1099 int i;
1100 int ret;
1101
68c28323
SW
1102 held = cap->issued | cap->implemented;
1103 revoking = cap->implemented & ~cap->issued;
1104 retain &= ~revoking;
1105 dropping = cap->issued & ~retain;
1106
a8599bd8
SW
1107 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1108 inode, cap, cap->session,
1109 ceph_cap_string(held), ceph_cap_string(held & retain),
1110 ceph_cap_string(revoking));
1111 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1112
1113 session = cap->session;
1114
1115 /* don't release wanted unless we've waited a bit. */
1116 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1117 time_before(jiffies, ci->i_hold_caps_min)) {
1118 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1119 ceph_cap_string(cap->issued),
1120 ceph_cap_string(cap->issued & retain),
1121 ceph_cap_string(cap->mds_wanted),
1122 ceph_cap_string(want));
1123 want |= cap->mds_wanted;
1124 retain |= cap->issued;
1125 delayed = 1;
1126 }
1127 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1128
1129 cap->issued &= retain; /* drop bits we don't want */
1130 if (cap->implemented & ~cap->issued) {
1131 /*
1132 * Wake up any waiters on wanted -> needed transition.
1133 * This is due to the weird transition from buffered
1134 * to sync IO... we need to flush dirty pages _before_
1135 * allowing sync writes to avoid reordering.
1136 */
1137 wake = 1;
1138 }
1139 cap->implemented &= cap->issued | used;
1140 cap->mds_wanted = want;
1141
1142 if (flushing) {
1143 /*
1144 * assign a tid for flush operations so we can avoid
1145 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1146 * clean type races. track latest tid for every bit
1147 * so we can handle flush AxFw, flush Fw, and have the
1148 * first ack clean Ax.
1149 */
1150 flush_tid = ++ci->i_cap_flush_last_tid;
1151 if (pflush_tid)
1152 *pflush_tid = flush_tid;
1153 dout(" cap_flush_tid %d\n", (int)flush_tid);
1154 for (i = 0; i < CEPH_CAP_BITS; i++)
1155 if (flushing & (1 << i))
1156 ci->i_cap_flush_tid[i] = flush_tid;
7d8cb26d
SW
1157
1158 follows = ci->i_head_snapc->seq;
1159 } else {
1160 follows = 0;
a8599bd8
SW
1161 }
1162
1163 keep = cap->implemented;
1164 seq = cap->seq;
1165 issue_seq = cap->issue_seq;
1166 mseq = cap->mseq;
1167 size = inode->i_size;
1168 ci->i_reported_size = size;
1169 max_size = ci->i_wanted_max_size;
1170 ci->i_requested_max_size = max_size;
1171 mtime = inode->i_mtime;
1172 atime = inode->i_atime;
1173 time_warp_seq = ci->i_time_warp_seq;
a8599bd8
SW
1174 uid = inode->i_uid;
1175 gid = inode->i_gid;
1176 mode = inode->i_mode;
1177
082afec9 1178 if (flushing & CEPH_CAP_XATTR_EXCL) {
a8599bd8 1179 __ceph_build_xattrs_blob(ci);
082afec9
SW
1180 xattr_blob = ci->i_xattrs.blob;
1181 xattr_version = ci->i_xattrs.version;
a8599bd8
SW
1182 }
1183
be655596 1184 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1185
a8599bd8
SW
1186 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1187 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1188 size, max_size, &mtime, &atime, time_warp_seq,
082afec9 1189 uid, gid, mode, xattr_version, xattr_blob,
a8599bd8
SW
1190 follows);
1191 if (ret < 0) {
1192 dout("error sending cap msg, must requeue %p\n", inode);
1193 delayed = 1;
1194 }
1195
1196 if (wake)
03066f23 1197 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
1198
1199 return delayed;
1200}
1201
1202/*
1203 * When a snapshot is taken, clients accumulate dirty metadata on
1204 * inodes with capabilities in ceph_cap_snaps to describe the file
1205 * state at the time the snapshot was taken. This must be flushed
1206 * asynchronously back to the MDS once sync writes complete and dirty
1207 * data is written out.
1208 *
e835124c
SW
1209 * Unless @again is true, skip cap_snaps that were already sent to
1210 * the MDS (i.e., during this session).
1211 *
be655596 1212 * Called under i_ceph_lock. Takes s_mutex as needed.
a8599bd8
SW
1213 */
1214void __ceph_flush_snaps(struct ceph_inode_info *ci,
e835124c
SW
1215 struct ceph_mds_session **psession,
1216 int again)
be655596
SW
1217 __releases(ci->i_ceph_lock)
1218 __acquires(ci->i_ceph_lock)
a8599bd8
SW
1219{
1220 struct inode *inode = &ci->vfs_inode;
1221 int mds;
1222 struct ceph_cap_snap *capsnap;
1223 u32 mseq;
3d14c5d2 1224 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8
SW
1225 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1226 session->s_mutex */
1227 u64 next_follows = 0; /* keep track of how far we've gotten through the
1228 i_cap_snaps list, and skip these entries next time
1229 around to avoid an infinite loop */
1230
1231 if (psession)
1232 session = *psession;
1233
1234 dout("__flush_snaps %p\n", inode);
1235retry:
1236 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1237 /* avoid an infiniute loop after retry */
1238 if (capsnap->follows < next_follows)
1239 continue;
1240 /*
1241 * we need to wait for sync writes to complete and for dirty
1242 * pages to be written out.
1243 */
1244 if (capsnap->dirty_pages || capsnap->writing)
cfc0bf66 1245 break;
a8599bd8 1246
819ccbfa
SW
1247 /*
1248 * if cap writeback already occurred, we should have dropped
1249 * the capsnap in ceph_put_wrbuffer_cap_refs.
1250 */
1251 BUG_ON(capsnap->dirty == 0);
1252
a8599bd8 1253 /* pick mds, take s_mutex */
ca81f3f6
SW
1254 if (ci->i_auth_cap == NULL) {
1255 dout("no auth cap (migrating?), doing nothing\n");
1256 goto out;
1257 }
e835124c
SW
1258
1259 /* only flush each capsnap once */
1260 if (!again && !list_empty(&capsnap->flushing_item)) {
1261 dout("already flushed %p, skipping\n", capsnap);
1262 continue;
1263 }
1264
ca81f3f6
SW
1265 mds = ci->i_auth_cap->session->s_mds;
1266 mseq = ci->i_auth_cap->mseq;
1267
a8599bd8
SW
1268 if (session && session->s_mds != mds) {
1269 dout("oops, wrong session %p mutex\n", session);
1270 mutex_unlock(&session->s_mutex);
1271 ceph_put_mds_session(session);
1272 session = NULL;
1273 }
1274 if (!session) {
be655596 1275 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1276 mutex_lock(&mdsc->mutex);
1277 session = __ceph_lookup_mds_session(mdsc, mds);
1278 mutex_unlock(&mdsc->mutex);
1279 if (session) {
1280 dout("inverting session/ino locks on %p\n",
1281 session);
1282 mutex_lock(&session->s_mutex);
1283 }
1284 /*
1285 * if session == NULL, we raced against a cap
ca81f3f6
SW
1286 * deletion or migration. retry, and we'll
1287 * get a better @mds value next time.
a8599bd8 1288 */
be655596 1289 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1290 goto retry;
1291 }
1292
1293 capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1294 atomic_inc(&capsnap->nref);
1295 if (!list_empty(&capsnap->flushing_item))
1296 list_del_init(&capsnap->flushing_item);
1297 list_add_tail(&capsnap->flushing_item,
1298 &session->s_cap_snaps_flushing);
be655596 1299 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1300
cfc0bf66
SW
1301 dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1302 inode, capsnap, capsnap->follows, capsnap->flush_tid);
a8599bd8
SW
1303 send_cap_msg(session, ceph_vino(inode).ino, 0,
1304 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1305 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1306 capsnap->size, 0,
1307 &capsnap->mtime, &capsnap->atime,
1308 capsnap->time_warp_seq,
1309 capsnap->uid, capsnap->gid, capsnap->mode,
4a625be4 1310 capsnap->xattr_version, capsnap->xattr_blob,
a8599bd8
SW
1311 capsnap->follows);
1312
1313 next_follows = capsnap->follows + 1;
1314 ceph_put_cap_snap(capsnap);
1315
be655596 1316 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1317 goto retry;
1318 }
1319
1320 /* we flushed them all; remove this inode from the queue */
1321 spin_lock(&mdsc->snap_flush_lock);
1322 list_del_init(&ci->i_snap_flush_item);
1323 spin_unlock(&mdsc->snap_flush_lock);
1324
ca81f3f6 1325out:
a8599bd8
SW
1326 if (psession)
1327 *psession = session;
1328 else if (session) {
1329 mutex_unlock(&session->s_mutex);
1330 ceph_put_mds_session(session);
1331 }
1332}
1333
1334static void ceph_flush_snaps(struct ceph_inode_info *ci)
1335{
be655596 1336 spin_lock(&ci->i_ceph_lock);
e835124c 1337 __ceph_flush_snaps(ci, NULL, 0);
be655596 1338 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1339}
1340
76e3b390 1341/*
fca65b4a
SW
1342 * Mark caps dirty. If inode is newly dirty, return the dirty flags.
1343 * Caller is then responsible for calling __mark_inode_dirty with the
1344 * returned flags value.
76e3b390 1345 */
fca65b4a 1346int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
76e3b390 1347{
640ef79d 1348 struct ceph_mds_client *mdsc =
3d14c5d2 1349 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
76e3b390
SW
1350 struct inode *inode = &ci->vfs_inode;
1351 int was = ci->i_dirty_caps;
1352 int dirty = 0;
1353
1354 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1355 ceph_cap_string(mask), ceph_cap_string(was),
1356 ceph_cap_string(was | mask));
1357 ci->i_dirty_caps |= mask;
1358 if (was == 0) {
7d8cb26d
SW
1359 if (!ci->i_head_snapc)
1360 ci->i_head_snapc = ceph_get_snap_context(
1361 ci->i_snap_realm->cached_context);
0685235f
YZ
1362 dout(" inode %p now dirty snapc %p auth cap %p\n",
1363 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
76e3b390
SW
1364 BUG_ON(!list_empty(&ci->i_dirty_item));
1365 spin_lock(&mdsc->cap_dirty_lock);
0685235f
YZ
1366 if (ci->i_auth_cap)
1367 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1368 else
1369 list_add(&ci->i_dirty_item,
1370 &mdsc->cap_dirty_migrating);
76e3b390
SW
1371 spin_unlock(&mdsc->cap_dirty_lock);
1372 if (ci->i_flushing_caps == 0) {
3772d26d 1373 ihold(inode);
76e3b390
SW
1374 dirty |= I_DIRTY_SYNC;
1375 }
1376 }
1377 BUG_ON(list_empty(&ci->i_dirty_item));
1378 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1379 (mask & CEPH_CAP_FILE_BUFFER))
1380 dirty |= I_DIRTY_DATASYNC;
76e3b390 1381 __cap_delay_requeue(mdsc, ci);
fca65b4a 1382 return dirty;
76e3b390
SW
1383}
1384
a8599bd8
SW
1385/*
1386 * Add dirty inode to the flushing list. Assigned a seq number so we
1387 * can wait for caps to flush without starving.
cdc35f96 1388 *
be655596 1389 * Called under i_ceph_lock.
a8599bd8 1390 */
cdc35f96 1391static int __mark_caps_flushing(struct inode *inode,
a8599bd8
SW
1392 struct ceph_mds_session *session)
1393{
3d14c5d2 1394 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 1395 struct ceph_inode_info *ci = ceph_inode(inode);
cdc35f96 1396 int flushing;
50b885b9 1397
cdc35f96 1398 BUG_ON(ci->i_dirty_caps == 0);
a8599bd8 1399 BUG_ON(list_empty(&ci->i_dirty_item));
cdc35f96
SW
1400
1401 flushing = ci->i_dirty_caps;
1402 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1403 ceph_cap_string(flushing),
1404 ceph_cap_string(ci->i_flushing_caps),
1405 ceph_cap_string(ci->i_flushing_caps | flushing));
1406 ci->i_flushing_caps |= flushing;
1407 ci->i_dirty_caps = 0;
afcdaea3 1408 dout(" inode %p now !dirty\n", inode);
cdc35f96 1409
a8599bd8 1410 spin_lock(&mdsc->cap_dirty_lock);
afcdaea3
SW
1411 list_del_init(&ci->i_dirty_item);
1412
1413 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
a8599bd8
SW
1414 if (list_empty(&ci->i_flushing_item)) {
1415 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1416 mdsc->num_cap_flushing++;
afcdaea3
SW
1417 dout(" inode %p now flushing seq %lld\n", inode,
1418 ci->i_cap_flush_seq);
1419 } else {
1420 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1421 dout(" inode %p now flushing (more) seq %lld\n", inode,
a8599bd8
SW
1422 ci->i_cap_flush_seq);
1423 }
1424 spin_unlock(&mdsc->cap_dirty_lock);
cdc35f96
SW
1425
1426 return flushing;
a8599bd8
SW
1427}
1428
5ecad6fd
SW
1429/*
1430 * try to invalidate mapping pages without blocking.
1431 */
5ecad6fd
SW
1432static int try_nonblocking_invalidate(struct inode *inode)
1433{
1434 struct ceph_inode_info *ci = ceph_inode(inode);
1435 u32 invalidating_gen = ci->i_rdcache_gen;
1436
be655596 1437 spin_unlock(&ci->i_ceph_lock);
5ecad6fd 1438 invalidate_mapping_pages(&inode->i_data, 0, -1);
be655596 1439 spin_lock(&ci->i_ceph_lock);
5ecad6fd 1440
18a38193 1441 if (inode->i_data.nrpages == 0 &&
5ecad6fd
SW
1442 invalidating_gen == ci->i_rdcache_gen) {
1443 /* success. */
1444 dout("try_nonblocking_invalidate %p success\n", inode);
cd045cb4
SW
1445 /* save any racing async invalidate some trouble */
1446 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
5ecad6fd
SW
1447 return 0;
1448 }
1449 dout("try_nonblocking_invalidate %p failed\n", inode);
1450 return -1;
1451}
1452
a8599bd8
SW
1453/*
1454 * Swiss army knife function to examine currently used and wanted
1455 * versus held caps. Release, flush, ack revoked caps to mds as
1456 * appropriate.
1457 *
1458 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1459 * cap release further.
1460 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1461 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1462 * further delay.
1463 */
1464void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1465 struct ceph_mds_session *session)
1466{
3d14c5d2
YS
1467 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1468 struct ceph_mds_client *mdsc = fsc->mdsc;
a8599bd8
SW
1469 struct inode *inode = &ci->vfs_inode;
1470 struct ceph_cap *cap;
1471 int file_wanted, used;
1472 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
cbd03635 1473 int issued, implemented, want, retain, revoking, flushing = 0;
a8599bd8
SW
1474 int mds = -1; /* keep track of how far we've gone through i_caps list
1475 to avoid an infinite loop on retry */
1476 struct rb_node *p;
1477 int tried_invalidate = 0;
1478 int delayed = 0, sent = 0, force_requeue = 0, num;
cbd03635 1479 int queue_invalidate = 0;
a8599bd8
SW
1480 int is_delayed = flags & CHECK_CAPS_NODELAY;
1481
1482 /* if we are unmounting, flush any unused caps immediately. */
1483 if (mdsc->stopping)
1484 is_delayed = 1;
1485
be655596 1486 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1487
1488 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1489 flags |= CHECK_CAPS_FLUSH;
1490
1491 /* flush snaps first time around only */
1492 if (!list_empty(&ci->i_cap_snaps))
e835124c 1493 __ceph_flush_snaps(ci, &session, 0);
a8599bd8
SW
1494 goto retry_locked;
1495retry:
be655596 1496 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1497retry_locked:
1498 file_wanted = __ceph_caps_file_wanted(ci);
1499 used = __ceph_caps_used(ci);
1500 want = file_wanted | used;
cbd03635
SW
1501 issued = __ceph_caps_issued(ci, &implemented);
1502 revoking = implemented & ~issued;
a8599bd8
SW
1503
1504 retain = want | CEPH_CAP_PIN;
1505 if (!mdsc->stopping && inode->i_nlink > 0) {
1506 if (want) {
1507 retain |= CEPH_CAP_ANY; /* be greedy */
1508 } else {
1509 retain |= CEPH_CAP_ANY_SHARED;
1510 /*
1511 * keep RD only if we didn't have the file open RW,
1512 * because then the mds would revoke it anyway to
1513 * journal max_size=0.
1514 */
1515 if (ci->i_max_size == 0)
1516 retain |= CEPH_CAP_ANY_RD;
1517 }
1518 }
1519
1520 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
cbd03635 1521 " issued %s revoking %s retain %s %s%s%s\n", inode,
a8599bd8
SW
1522 ceph_cap_string(file_wanted),
1523 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1524 ceph_cap_string(ci->i_flushing_caps),
cbd03635 1525 ceph_cap_string(issued), ceph_cap_string(revoking),
a8599bd8
SW
1526 ceph_cap_string(retain),
1527 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1528 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1529 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1530
1531 /*
1532 * If we no longer need to hold onto old our caps, and we may
1533 * have cached pages, but don't want them, then try to invalidate.
1534 * If we fail, it's because pages are locked.... try again later.
1535 */
1536 if ((!is_delayed || mdsc->stopping) &&
1537 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
93afd449 1538 inode->i_data.nrpages && /* have cached pages */
cbd03635 1539 (file_wanted == 0 || /* no open files */
2962507c
SW
1540 (revoking & (CEPH_CAP_FILE_CACHE|
1541 CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */
a8599bd8 1542 !tried_invalidate) {
a8599bd8 1543 dout("check_caps trying to invalidate on %p\n", inode);
5ecad6fd 1544 if (try_nonblocking_invalidate(inode) < 0) {
2962507c
SW
1545 if (revoking & (CEPH_CAP_FILE_CACHE|
1546 CEPH_CAP_FILE_LAZYIO)) {
5ecad6fd
SW
1547 dout("check_caps queuing invalidate\n");
1548 queue_invalidate = 1;
1549 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1550 } else {
1551 dout("check_caps failed to invalidate pages\n");
1552 /* we failed to invalidate pages. check these
1553 caps again later. */
1554 force_requeue = 1;
1555 __cap_set_timeouts(mdsc, ci);
1556 }
a8599bd8
SW
1557 }
1558 tried_invalidate = 1;
1559 goto retry_locked;
1560 }
1561
1562 num = 0;
1563 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1564 cap = rb_entry(p, struct ceph_cap, ci_node);
1565 num++;
1566
1567 /* avoid looping forever */
1568 if (mds >= cap->mds ||
1569 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1570 continue;
1571
1572 /* NOTE: no side-effects allowed, until we take s_mutex */
1573
1574 revoking = cap->implemented & ~cap->issued;
088b3f5e
SW
1575 dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
1576 cap->mds, cap, ceph_cap_string(cap->issued),
1577 ceph_cap_string(cap->implemented),
1578 ceph_cap_string(revoking));
a8599bd8
SW
1579
1580 if (cap == ci->i_auth_cap &&
1581 (cap->issued & CEPH_CAP_FILE_WR)) {
1582 /* request larger max_size from MDS? */
1583 if (ci->i_wanted_max_size > ci->i_max_size &&
1584 ci->i_wanted_max_size > ci->i_requested_max_size) {
1585 dout("requesting new max_size\n");
1586 goto ack;
1587 }
1588
1589 /* approaching file_max? */
1590 if ((inode->i_size << 1) >= ci->i_max_size &&
1591 (ci->i_reported_size << 1) < ci->i_max_size) {
1592 dout("i_size approaching max_size\n");
1593 goto ack;
1594 }
1595 }
1596 /* flush anything dirty? */
1597 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1598 ci->i_dirty_caps) {
1599 dout("flushing dirty caps\n");
1600 goto ack;
1601 }
1602
1603 /* completed revocation? going down and there are no caps? */
1604 if (revoking && (revoking & used) == 0) {
1605 dout("completed revocation of %s\n",
1606 ceph_cap_string(cap->implemented & ~cap->issued));
1607 goto ack;
1608 }
1609
1610 /* want more caps from mds? */
1611 if (want & ~(cap->mds_wanted | cap->issued))
1612 goto ack;
1613
1614 /* things we might delay */
1615 if ((cap->issued & ~retain) == 0 &&
1616 cap->mds_wanted == want)
1617 continue; /* nope, all good */
1618
1619 if (is_delayed)
1620 goto ack;
1621
1622 /* delay? */
1623 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1624 time_before(jiffies, ci->i_hold_caps_max)) {
1625 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1626 ceph_cap_string(cap->issued),
1627 ceph_cap_string(cap->issued & retain),
1628 ceph_cap_string(cap->mds_wanted),
1629 ceph_cap_string(want));
1630 delayed++;
1631 continue;
1632 }
1633
1634ack:
e9964c10
SW
1635 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1636 dout(" skipping %p I_NOFLUSH set\n", inode);
1637 continue;
1638 }
1639
a8599bd8
SW
1640 if (session && session != cap->session) {
1641 dout("oops, wrong session %p mutex\n", session);
1642 mutex_unlock(&session->s_mutex);
1643 session = NULL;
1644 }
1645 if (!session) {
1646 session = cap->session;
1647 if (mutex_trylock(&session->s_mutex) == 0) {
1648 dout("inverting session/ino locks on %p\n",
1649 session);
be655596 1650 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1651 if (took_snap_rwsem) {
1652 up_read(&mdsc->snap_rwsem);
1653 took_snap_rwsem = 0;
1654 }
1655 mutex_lock(&session->s_mutex);
1656 goto retry;
1657 }
1658 }
1659 /* take snap_rwsem after session mutex */
1660 if (!took_snap_rwsem) {
1661 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1662 dout("inverting snap/in locks on %p\n",
1663 inode);
be655596 1664 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1665 down_read(&mdsc->snap_rwsem);
1666 took_snap_rwsem = 1;
1667 goto retry;
1668 }
1669 took_snap_rwsem = 1;
1670 }
1671
cdc35f96
SW
1672 if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1673 flushing = __mark_caps_flushing(inode, session);
24be0c48
SW
1674 else
1675 flushing = 0;
a8599bd8
SW
1676
1677 mds = cap->mds; /* remember mds, so we don't repeat */
1678 sent++;
1679
be655596 1680 /* __send_cap drops i_ceph_lock */
a8599bd8
SW
1681 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1682 retain, flushing, NULL);
be655596 1683 goto retry; /* retake i_ceph_lock and restart our cap scan. */
a8599bd8
SW
1684 }
1685
1686 /*
1687 * Reschedule delayed caps release if we delayed anything,
1688 * otherwise cancel.
1689 */
1690 if (delayed && is_delayed)
1691 force_requeue = 1; /* __send_cap delayed release; requeue */
1692 if (!delayed && !is_delayed)
1693 __cap_delay_cancel(mdsc, ci);
1694 else if (!is_delayed || force_requeue)
1695 __cap_delay_requeue(mdsc, ci);
1696
be655596 1697 spin_unlock(&ci->i_ceph_lock);
a8599bd8 1698
cbd03635 1699 if (queue_invalidate)
3c6f6b79 1700 ceph_queue_invalidate(inode);
cbd03635 1701
cdc2ce05 1702 if (session)
a8599bd8
SW
1703 mutex_unlock(&session->s_mutex);
1704 if (took_snap_rwsem)
1705 up_read(&mdsc->snap_rwsem);
1706}
1707
a8599bd8
SW
1708/*
1709 * Try to flush dirty caps back to the auth mds.
1710 */
1711static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1712 unsigned *flush_tid)
1713{
3d14c5d2 1714 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8
SW
1715 struct ceph_inode_info *ci = ceph_inode(inode);
1716 int unlock_session = session ? 0 : 1;
1717 int flushing = 0;
1718
1719retry:
be655596 1720 spin_lock(&ci->i_ceph_lock);
e9964c10
SW
1721 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1722 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1723 goto out;
1724 }
a8599bd8
SW
1725 if (ci->i_dirty_caps && ci->i_auth_cap) {
1726 struct ceph_cap *cap = ci->i_auth_cap;
1727 int used = __ceph_caps_used(ci);
1728 int want = __ceph_caps_wanted(ci);
1729 int delayed;
1730
1731 if (!session) {
be655596 1732 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1733 session = cap->session;
1734 mutex_lock(&session->s_mutex);
1735 goto retry;
1736 }
1737 BUG_ON(session != cap->session);
1738 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1739 goto out;
1740
cdc35f96 1741 flushing = __mark_caps_flushing(inode, session);
a8599bd8 1742
be655596 1743 /* __send_cap drops i_ceph_lock */
a8599bd8
SW
1744 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1745 cap->issued | cap->implemented, flushing,
1746 flush_tid);
1747 if (!delayed)
1748 goto out_unlocked;
1749
be655596 1750 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1751 __cap_delay_requeue(mdsc, ci);
1752 }
1753out:
be655596 1754 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1755out_unlocked:
1756 if (session && unlock_session)
1757 mutex_unlock(&session->s_mutex);
1758 return flushing;
1759}
1760
1761/*
1762 * Return true if we've flushed caps through the given flush_tid.
1763 */
1764static int caps_are_flushed(struct inode *inode, unsigned tid)
1765{
1766 struct ceph_inode_info *ci = ceph_inode(inode);
a5ee751c 1767 int i, ret = 1;
a8599bd8 1768
be655596 1769 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1770 for (i = 0; i < CEPH_CAP_BITS; i++)
1771 if ((ci->i_flushing_caps & (1 << i)) &&
1772 ci->i_cap_flush_tid[i] <= tid) {
1773 /* still flushing this bit */
1774 ret = 0;
1775 break;
1776 }
be655596 1777 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1778 return ret;
1779}
1780
1781/*
1782 * Wait on any unsafe replies for the given inode. First wait on the
1783 * newest request, and make that the upper bound. Then, if there are
1784 * more requests, keep waiting on the oldest as long as it is still older
1785 * than the original request.
1786 */
1787static void sync_write_wait(struct inode *inode)
1788{
1789 struct ceph_inode_info *ci = ceph_inode(inode);
1790 struct list_head *head = &ci->i_unsafe_writes;
1791 struct ceph_osd_request *req;
1792 u64 last_tid;
1793
1794 spin_lock(&ci->i_unsafe_lock);
1795 if (list_empty(head))
1796 goto out;
1797
1798 /* set upper bound as _last_ entry in chain */
1799 req = list_entry(head->prev, struct ceph_osd_request,
1800 r_unsafe_item);
1801 last_tid = req->r_tid;
1802
1803 do {
1804 ceph_osdc_get_request(req);
1805 spin_unlock(&ci->i_unsafe_lock);
1806 dout("sync_write_wait on tid %llu (until %llu)\n",
1807 req->r_tid, last_tid);
1808 wait_for_completion(&req->r_safe_completion);
1809 spin_lock(&ci->i_unsafe_lock);
1810 ceph_osdc_put_request(req);
1811
1812 /*
1813 * from here on look at first entry in chain, since we
1814 * only want to wait for anything older than last_tid
1815 */
1816 if (list_empty(head))
1817 break;
1818 req = list_entry(head->next, struct ceph_osd_request,
1819 r_unsafe_item);
1820 } while (req->r_tid < last_tid);
1821out:
1822 spin_unlock(&ci->i_unsafe_lock);
1823}
1824
02c24a82 1825int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
a8599bd8 1826{
7ea80859 1827 struct inode *inode = file->f_mapping->host;
a8599bd8
SW
1828 struct ceph_inode_info *ci = ceph_inode(inode);
1829 unsigned flush_tid;
1830 int ret;
1831 int dirty;
1832
1833 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1834 sync_write_wait(inode);
1835
02c24a82 1836 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
a8599bd8
SW
1837 if (ret < 0)
1838 return ret;
02c24a82 1839 mutex_lock(&inode->i_mutex);
a8599bd8
SW
1840
1841 dirty = try_flush_caps(inode, NULL, &flush_tid);
1842 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1843
1844 /*
1845 * only wait on non-file metadata writeback (the mds
1846 * can recover size and mtime, so we don't need to
1847 * wait for that)
1848 */
1849 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1850 dout("fsync waiting for flush_tid %u\n", flush_tid);
1851 ret = wait_event_interruptible(ci->i_cap_wq,
1852 caps_are_flushed(inode, flush_tid));
1853 }
1854
1855 dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
02c24a82 1856 mutex_unlock(&inode->i_mutex);
a8599bd8
SW
1857 return ret;
1858}
1859
1860/*
1861 * Flush any dirty caps back to the mds. If we aren't asked to wait,
1862 * queue inode for flush but don't do so immediately, because we can
1863 * get by with fewer MDS messages if we wait for data writeback to
1864 * complete first.
1865 */
f1a3d572 1866int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
a8599bd8
SW
1867{
1868 struct ceph_inode_info *ci = ceph_inode(inode);
1869 unsigned flush_tid;
1870 int err = 0;
1871 int dirty;
f1a3d572 1872 int wait = wbc->sync_mode == WB_SYNC_ALL;
a8599bd8
SW
1873
1874 dout("write_inode %p wait=%d\n", inode, wait);
1875 if (wait) {
1876 dirty = try_flush_caps(inode, NULL, &flush_tid);
1877 if (dirty)
1878 err = wait_event_interruptible(ci->i_cap_wq,
1879 caps_are_flushed(inode, flush_tid));
1880 } else {
640ef79d 1881 struct ceph_mds_client *mdsc =
3d14c5d2 1882 ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8 1883
be655596 1884 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1885 if (__ceph_caps_dirty(ci))
1886 __cap_delay_requeue_front(mdsc, ci);
be655596 1887 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1888 }
1889 return err;
1890}
1891
1892/*
1893 * After a recovering MDS goes active, we need to resend any caps
1894 * we were flushing.
1895 *
1896 * Caller holds session->s_mutex.
1897 */
1898static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1899 struct ceph_mds_session *session)
1900{
1901 struct ceph_cap_snap *capsnap;
1902
1903 dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1904 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1905 flushing_item) {
1906 struct ceph_inode_info *ci = capsnap->ci;
1907 struct inode *inode = &ci->vfs_inode;
1908 struct ceph_cap *cap;
1909
be655596 1910 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1911 cap = ci->i_auth_cap;
1912 if (cap && cap->session == session) {
1913 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1914 cap, capsnap);
e835124c 1915 __ceph_flush_snaps(ci, &session, 1);
a8599bd8
SW
1916 } else {
1917 pr_err("%p auth cap %p not mds%d ???\n", inode,
1918 cap, session->s_mds);
a8599bd8 1919 }
be655596 1920 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1921 }
1922}
1923
1924void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1925 struct ceph_mds_session *session)
1926{
1927 struct ceph_inode_info *ci;
1928
1929 kick_flushing_capsnaps(mdsc, session);
1930
1931 dout("kick_flushing_caps mds%d\n", session->s_mds);
1932 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1933 struct inode *inode = &ci->vfs_inode;
1934 struct ceph_cap *cap;
1935 int delayed = 0;
1936
be655596 1937 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
1938 cap = ci->i_auth_cap;
1939 if (cap && cap->session == session) {
1940 dout("kick_flushing_caps %p cap %p %s\n", inode,
1941 cap, ceph_cap_string(ci->i_flushing_caps));
1942 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1943 __ceph_caps_used(ci),
1944 __ceph_caps_wanted(ci),
1945 cap->issued | cap->implemented,
1946 ci->i_flushing_caps, NULL);
1947 if (delayed) {
be655596 1948 spin_lock(&ci->i_ceph_lock);
a8599bd8 1949 __cap_delay_requeue(mdsc, ci);
be655596 1950 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1951 }
1952 } else {
1953 pr_err("%p auth cap %p not mds%d ???\n", inode,
1954 cap, session->s_mds);
be655596 1955 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
1956 }
1957 }
1958}
1959
088b3f5e
SW
1960static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
1961 struct ceph_mds_session *session,
1962 struct inode *inode)
1963{
1964 struct ceph_inode_info *ci = ceph_inode(inode);
1965 struct ceph_cap *cap;
1966 int delayed = 0;
1967
be655596 1968 spin_lock(&ci->i_ceph_lock);
088b3f5e
SW
1969 cap = ci->i_auth_cap;
1970 dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
1971 ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
1972 __ceph_flush_snaps(ci, &session, 1);
1973 if (ci->i_flushing_caps) {
1974 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1975 __ceph_caps_used(ci),
1976 __ceph_caps_wanted(ci),
1977 cap->issued | cap->implemented,
1978 ci->i_flushing_caps, NULL);
1979 if (delayed) {
be655596 1980 spin_lock(&ci->i_ceph_lock);
088b3f5e 1981 __cap_delay_requeue(mdsc, ci);
be655596 1982 spin_unlock(&ci->i_ceph_lock);
088b3f5e
SW
1983 }
1984 } else {
be655596 1985 spin_unlock(&ci->i_ceph_lock);
088b3f5e
SW
1986 }
1987}
1988
a8599bd8
SW
1989
1990/*
1991 * Take references to capabilities we hold, so that we don't release
1992 * them to the MDS prematurely.
1993 *
be655596 1994 * Protected by i_ceph_lock.
a8599bd8
SW
1995 */
1996static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1997{
1998 if (got & CEPH_CAP_PIN)
1999 ci->i_pin_ref++;
2000 if (got & CEPH_CAP_FILE_RD)
2001 ci->i_rd_ref++;
2002 if (got & CEPH_CAP_FILE_CACHE)
2003 ci->i_rdcache_ref++;
2004 if (got & CEPH_CAP_FILE_WR)
2005 ci->i_wr_ref++;
2006 if (got & CEPH_CAP_FILE_BUFFER) {
d3d0720d 2007 if (ci->i_wb_ref == 0)
3772d26d 2008 ihold(&ci->vfs_inode);
d3d0720d
HC
2009 ci->i_wb_ref++;
2010 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2011 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
a8599bd8
SW
2012 }
2013}
2014
2015/*
2016 * Try to grab cap references. Specify those refs we @want, and the
2017 * minimal set we @need. Also include the larger offset we are writing
2018 * to (when applicable), and check against max_size here as well.
2019 * Note that caller is responsible for ensuring max_size increases are
2020 * requested from the MDS.
2021 */
2022static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2023 int *got, loff_t endoff, int *check_max, int *err)
2024{
2025 struct inode *inode = &ci->vfs_inode;
2026 int ret = 0;
2027 int have, implemented;
195d3ce2 2028 int file_wanted;
a8599bd8
SW
2029
2030 dout("get_cap_refs %p need %s want %s\n", inode,
2031 ceph_cap_string(need), ceph_cap_string(want));
be655596 2032 spin_lock(&ci->i_ceph_lock);
a8599bd8 2033
195d3ce2
SW
2034 /* make sure file is actually open */
2035 file_wanted = __ceph_caps_file_wanted(ci);
2036 if ((file_wanted & need) == 0) {
2037 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2038 ceph_cap_string(need), ceph_cap_string(file_wanted));
a8599bd8
SW
2039 *err = -EBADF;
2040 ret = 1;
2041 goto out;
2042 }
2043
2044 if (need & CEPH_CAP_FILE_WR) {
2045 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2046 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2047 inode, endoff, ci->i_max_size);
2048 if (endoff > ci->i_wanted_max_size) {
2049 *check_max = 1;
2050 ret = 1;
2051 }
2052 goto out;
2053 }
2054 /*
2055 * If a sync write is in progress, we must wait, so that we
2056 * can get a final snapshot value for size+mtime.
2057 */
2058 if (__ceph_have_pending_cap_snap(ci)) {
2059 dout("get_cap_refs %p cap_snap_pending\n", inode);
2060 goto out;
2061 }
2062 }
2063 have = __ceph_caps_issued(ci, &implemented);
2064
2065 /*
2066 * disallow writes while a truncate is pending
2067 */
2068 if (ci->i_truncate_pending)
2069 have &= ~CEPH_CAP_FILE_WR;
2070
2071 if ((have & need) == need) {
2072 /*
2073 * Look at (implemented & ~have & not) so that we keep waiting
2074 * on transition from wanted -> needed caps. This is needed
2075 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2076 * going before a prior buffered writeback happens.
2077 */
2078 int not = want & ~(have & need);
2079 int revoking = implemented & ~have;
2080 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2081 inode, ceph_cap_string(have), ceph_cap_string(not),
2082 ceph_cap_string(revoking));
2083 if ((revoking & not) == 0) {
2084 *got = need | (have & want);
2085 __take_cap_refs(ci, *got);
2086 ret = 1;
2087 }
2088 } else {
2089 dout("get_cap_refs %p have %s needed %s\n", inode,
2090 ceph_cap_string(have), ceph_cap_string(need));
2091 }
2092out:
be655596 2093 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2094 dout("get_cap_refs %p ret %d got %s\n", inode,
2095 ret, ceph_cap_string(*got));
2096 return ret;
2097}
2098
2099/*
2100 * Check the offset we are writing up to against our current
2101 * max_size. If necessary, tell the MDS we want to write to
2102 * a larger offset.
2103 */
2104static void check_max_size(struct inode *inode, loff_t endoff)
2105{
2106 struct ceph_inode_info *ci = ceph_inode(inode);
2107 int check = 0;
2108
2109 /* do we need to explicitly request a larger max_size? */
be655596 2110 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2111 if ((endoff >= ci->i_max_size ||
2112 endoff > (inode->i_size << 1)) &&
2113 endoff > ci->i_wanted_max_size) {
2114 dout("write %p at large endoff %llu, req max_size\n",
2115 inode, endoff);
2116 ci->i_wanted_max_size = endoff;
2117 check = 1;
2118 }
be655596 2119 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2120 if (check)
2121 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2122}
2123
2124/*
2125 * Wait for caps, and take cap references. If we can't get a WR cap
2126 * due to a small max_size, make sure we check_max_size (and possibly
2127 * ask the mds) so we don't get hung up indefinitely.
2128 */
2129int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2130 loff_t endoff)
2131{
2132 int check_max, ret, err;
2133
2134retry:
2135 if (endoff > 0)
2136 check_max_size(&ci->vfs_inode, endoff);
2137 check_max = 0;
2138 err = 0;
2139 ret = wait_event_interruptible(ci->i_cap_wq,
2140 try_get_cap_refs(ci, need, want,
2141 got, endoff,
2142 &check_max, &err));
2143 if (err)
2144 ret = err;
2145 if (check_max)
2146 goto retry;
2147 return ret;
2148}
2149
2150/*
2151 * Take cap refs. Caller must already know we hold at least one ref
2152 * on the caps in question or we don't know this is safe.
2153 */
2154void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2155{
be655596 2156 spin_lock(&ci->i_ceph_lock);
a8599bd8 2157 __take_cap_refs(ci, caps);
be655596 2158 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2159}
2160
2161/*
2162 * Release cap refs.
2163 *
2164 * If we released the last ref on any given cap, call ceph_check_caps
2165 * to release (or schedule a release).
2166 *
2167 * If we are releasing a WR cap (from a sync write), finalize any affected
2168 * cap_snap, and wake up any waiters.
2169 */
2170void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2171{
2172 struct inode *inode = &ci->vfs_inode;
2173 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2174 struct ceph_cap_snap *capsnap;
2175
be655596 2176 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2177 if (had & CEPH_CAP_PIN)
2178 --ci->i_pin_ref;
2179 if (had & CEPH_CAP_FILE_RD)
2180 if (--ci->i_rd_ref == 0)
2181 last++;
2182 if (had & CEPH_CAP_FILE_CACHE)
2183 if (--ci->i_rdcache_ref == 0)
2184 last++;
2185 if (had & CEPH_CAP_FILE_BUFFER) {
d3d0720d 2186 if (--ci->i_wb_ref == 0) {
a8599bd8
SW
2187 last++;
2188 put++;
2189 }
d3d0720d
HC
2190 dout("put_cap_refs %p wb %d -> %d (?)\n",
2191 inode, ci->i_wb_ref+1, ci->i_wb_ref);
a8599bd8
SW
2192 }
2193 if (had & CEPH_CAP_FILE_WR)
2194 if (--ci->i_wr_ref == 0) {
2195 last++;
2196 if (!list_empty(&ci->i_cap_snaps)) {
2197 capsnap = list_first_entry(&ci->i_cap_snaps,
2198 struct ceph_cap_snap,
2199 ci_item);
2200 if (capsnap->writing) {
2201 capsnap->writing = 0;
2202 flushsnaps =
2203 __ceph_finish_cap_snap(ci,
2204 capsnap);
2205 wake = 1;
2206 }
2207 }
2208 }
be655596 2209 spin_unlock(&ci->i_ceph_lock);
a8599bd8 2210
819ccbfa
SW
2211 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2212 last ? " last" : "", put ? " put" : "");
a8599bd8
SW
2213
2214 if (last && !flushsnaps)
2215 ceph_check_caps(ci, 0, NULL);
2216 else if (flushsnaps)
2217 ceph_flush_snaps(ci);
2218 if (wake)
03066f23 2219 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
2220 if (put)
2221 iput(inode);
2222}
2223
2224/*
2225 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2226 * context. Adjust per-snap dirty page accounting as appropriate.
2227 * Once all dirty data for a cap_snap is flushed, flush snapped file
2228 * metadata back to the MDS. If we dropped the last ref, call
2229 * ceph_check_caps.
2230 */
2231void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2232 struct ceph_snap_context *snapc)
2233{
2234 struct inode *inode = &ci->vfs_inode;
2235 int last = 0;
819ccbfa
SW
2236 int complete_capsnap = 0;
2237 int drop_capsnap = 0;
a8599bd8
SW
2238 int found = 0;
2239 struct ceph_cap_snap *capsnap = NULL;
2240
be655596 2241 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2242 ci->i_wrbuffer_ref -= nr;
2243 last = !ci->i_wrbuffer_ref;
2244
2245 if (ci->i_head_snapc == snapc) {
2246 ci->i_wrbuffer_ref_head -= nr;
7d8cb26d
SW
2247 if (ci->i_wrbuffer_ref_head == 0 &&
2248 ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) {
2249 BUG_ON(!ci->i_head_snapc);
a8599bd8
SW
2250 ceph_put_snap_context(ci->i_head_snapc);
2251 ci->i_head_snapc = NULL;
2252 }
2253 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2254 inode,
2255 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2256 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2257 last ? " LAST" : "");
2258 } else {
2259 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2260 if (capsnap->context == snapc) {
2261 found = 1;
a8599bd8
SW
2262 break;
2263 }
2264 }
2265 BUG_ON(!found);
819ccbfa
SW
2266 capsnap->dirty_pages -= nr;
2267 if (capsnap->dirty_pages == 0) {
2268 complete_capsnap = 1;
2269 if (capsnap->dirty == 0)
2270 /* cap writeback completed before we created
2271 * the cap_snap; no FLUSHSNAP is needed */
2272 drop_capsnap = 1;
2273 }
a8599bd8 2274 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
819ccbfa 2275 " snap %lld %d/%d -> %d/%d %s%s%s\n",
a8599bd8
SW
2276 inode, capsnap, capsnap->context->seq,
2277 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2278 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2279 last ? " (wrbuffer last)" : "",
819ccbfa
SW
2280 complete_capsnap ? " (complete capsnap)" : "",
2281 drop_capsnap ? " (drop capsnap)" : "");
2282 if (drop_capsnap) {
2283 ceph_put_snap_context(capsnap->context);
2284 list_del(&capsnap->ci_item);
2285 list_del(&capsnap->flushing_item);
2286 ceph_put_cap_snap(capsnap);
2287 }
a8599bd8
SW
2288 }
2289
be655596 2290 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2291
2292 if (last) {
2293 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2294 iput(inode);
819ccbfa 2295 } else if (complete_capsnap) {
a8599bd8 2296 ceph_flush_snaps(ci);
03066f23 2297 wake_up_all(&ci->i_cap_wq);
a8599bd8 2298 }
819ccbfa
SW
2299 if (drop_capsnap)
2300 iput(inode);
a8599bd8
SW
2301}
2302
2303/*
2304 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2305 * actually be a revocation if it specifies a smaller cap set.)
2306 *
be655596 2307 * caller holds s_mutex and i_ceph_lock, we drop both.
15637c8b 2308 *
a8599bd8
SW
2309 * return value:
2310 * 0 - ok
2311 * 1 - check_caps on auth cap only (writeback)
2312 * 2 - check_caps (ack revoke)
2313 */
15637c8b
SW
2314static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2315 struct ceph_mds_session *session,
2316 struct ceph_cap *cap,
2317 struct ceph_buffer *xattr_buf)
be655596 2318 __releases(ci->i_ceph_lock)
a8599bd8
SW
2319{
2320 struct ceph_inode_info *ci = ceph_inode(inode);
2321 int mds = session->s_mds;
2f56f56a 2322 int seq = le32_to_cpu(grant->seq);
a8599bd8
SW
2323 int newcaps = le32_to_cpu(grant->caps);
2324 int issued, implemented, used, wanted, dirty;
2325 u64 size = le64_to_cpu(grant->size);
2326 u64 max_size = le64_to_cpu(grant->max_size);
2327 struct timespec mtime, atime, ctime;
15637c8b 2328 int check_caps = 0;
a8599bd8
SW
2329 int wake = 0;
2330 int writeback = 0;
2331 int revoked_rdcache = 0;
3c6f6b79 2332 int queue_invalidate = 0;
a8599bd8 2333
2f56f56a
SW
2334 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2335 inode, cap, mds, seq, ceph_cap_string(newcaps));
a8599bd8
SW
2336 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2337 inode->i_size);
2338
2339 /*
2340 * If CACHE is being revoked, and we have no dirty buffers,
2341 * try to invalidate (once). (If there are dirty buffers, we
2342 * will invalidate _after_ writeback.)
2343 */
3b454c49
SW
2344 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2345 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
bcd2cbd1 2346 !ci->i_wrbuffer_ref) {
5ecad6fd
SW
2347 if (try_nonblocking_invalidate(inode) == 0) {
2348 revoked_rdcache = 1;
2349 } else {
a8599bd8
SW
2350 /* there were locked pages.. invalidate later
2351 in a separate thread. */
2352 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
3c6f6b79 2353 queue_invalidate = 1;
a8599bd8
SW
2354 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2355 }
a8599bd8 2356 }
a8599bd8
SW
2357 }
2358
2359 /* side effects now are allowed */
2360
2361 issued = __ceph_caps_issued(ci, &implemented);
2362 issued |= implemented | __ceph_caps_dirty(ci);
2363
685f9a5d 2364 cap->cap_gen = session->s_cap_gen;
a8599bd8
SW
2365
2366 __check_cap_issue(ci, cap, newcaps);
2367
2368 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2369 inode->i_mode = le32_to_cpu(grant->mode);
2370 inode->i_uid = le32_to_cpu(grant->uid);
2371 inode->i_gid = le32_to_cpu(grant->gid);
2372 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2373 inode->i_uid, inode->i_gid);
2374 }
2375
2376 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 2377 set_nlink(inode, le32_to_cpu(grant->nlink));
a8599bd8
SW
2378
2379 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2380 int len = le32_to_cpu(grant->xattr_len);
2381 u64 version = le64_to_cpu(grant->xattr_version);
2382
2383 if (version > ci->i_xattrs.version) {
2384 dout(" got new xattrs v%llu on %p len %d\n",
2385 version, inode, len);
2386 if (ci->i_xattrs.blob)
2387 ceph_buffer_put(ci->i_xattrs.blob);
2388 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2389 ci->i_xattrs.version = version;
2390 }
2391 }
2392
2393 /* size/ctime/mtime/atime? */
2394 ceph_fill_file_size(inode, issued,
2395 le32_to_cpu(grant->truncate_seq),
2396 le64_to_cpu(grant->truncate_size), size);
2397 ceph_decode_timespec(&mtime, &grant->mtime);
2398 ceph_decode_timespec(&atime, &grant->atime);
2399 ceph_decode_timespec(&ctime, &grant->ctime);
2400 ceph_fill_file_time(inode, issued,
2401 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2402 &atime);
2403
2404 /* max size increase? */
5e62ad30 2405 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
a8599bd8
SW
2406 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2407 ci->i_max_size = max_size;
2408 if (max_size >= ci->i_wanted_max_size) {
2409 ci->i_wanted_max_size = 0; /* reset */
2410 ci->i_requested_max_size = 0;
2411 }
2412 wake = 1;
2413 }
2414
2415 /* check cap bits */
2416 wanted = __ceph_caps_wanted(ci);
2417 used = __ceph_caps_used(ci);
2418 dirty = __ceph_caps_dirty(ci);
2419 dout(" my wanted = %s, used = %s, dirty %s\n",
2420 ceph_cap_string(wanted),
2421 ceph_cap_string(used),
2422 ceph_cap_string(dirty));
2423 if (wanted != le32_to_cpu(grant->wanted)) {
2424 dout("mds wanted %s -> %s\n",
2425 ceph_cap_string(le32_to_cpu(grant->wanted)),
2426 ceph_cap_string(wanted));
2427 grant->wanted = cpu_to_le32(wanted);
2428 }
2429
2430 cap->seq = seq;
2431
2432 /* file layout may have changed */
2433 ci->i_layout = grant->layout;
2434
2435 /* revocation, grant, or no-op? */
2436 if (cap->issued & ~newcaps) {
3b454c49
SW
2437 int revoking = cap->issued & ~newcaps;
2438
2439 dout("revocation: %s -> %s (revoking %s)\n",
2440 ceph_cap_string(cap->issued),
2441 ceph_cap_string(newcaps),
2442 ceph_cap_string(revoking));
0eb6cd49 2443 if (revoking & used & CEPH_CAP_FILE_BUFFER)
3b454c49
SW
2444 writeback = 1; /* initiate writeback; will delay ack */
2445 else if (revoking == CEPH_CAP_FILE_CACHE &&
2446 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2447 queue_invalidate)
2448 ; /* do nothing yet, invalidation will be queued */
2449 else if (cap == ci->i_auth_cap)
2450 check_caps = 1; /* check auth cap only */
2451 else
2452 check_caps = 2; /* check all caps */
a8599bd8 2453 cap->issued = newcaps;
978097c9 2454 cap->implemented |= newcaps;
a8599bd8
SW
2455 } else if (cap->issued == newcaps) {
2456 dout("caps unchanged: %s -> %s\n",
2457 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2458 } else {
2459 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2460 ceph_cap_string(newcaps));
2461 cap->issued = newcaps;
2462 cap->implemented |= newcaps; /* add bits only, to
2463 * avoid stepping on a
2464 * pending revocation */
2465 wake = 1;
2466 }
978097c9 2467 BUG_ON(cap->issued & ~cap->implemented);
a8599bd8 2468
be655596 2469 spin_unlock(&ci->i_ceph_lock);
3c6f6b79 2470 if (writeback)
a8599bd8
SW
2471 /*
2472 * queue inode for writeback: we can't actually call
2473 * filemap_write_and_wait, etc. from message handler
2474 * context.
2475 */
3c6f6b79
SW
2476 ceph_queue_writeback(inode);
2477 if (queue_invalidate)
2478 ceph_queue_invalidate(inode);
a8599bd8 2479 if (wake)
03066f23 2480 wake_up_all(&ci->i_cap_wq);
15637c8b
SW
2481
2482 if (check_caps == 1)
2483 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2484 session);
2485 else if (check_caps == 2)
2486 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2487 else
2488 mutex_unlock(&session->s_mutex);
a8599bd8
SW
2489}
2490
2491/*
2492 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2493 * MDS has been safely committed.
2494 */
6df058c0 2495static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
2496 struct ceph_mds_caps *m,
2497 struct ceph_mds_session *session,
2498 struct ceph_cap *cap)
be655596 2499 __releases(ci->i_ceph_lock)
a8599bd8
SW
2500{
2501 struct ceph_inode_info *ci = ceph_inode(inode);
3d14c5d2 2502 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
a8599bd8
SW
2503 unsigned seq = le32_to_cpu(m->seq);
2504 int dirty = le32_to_cpu(m->dirty);
2505 int cleaned = 0;
afcdaea3 2506 int drop = 0;
a8599bd8
SW
2507 int i;
2508
2509 for (i = 0; i < CEPH_CAP_BITS; i++)
2510 if ((dirty & (1 << i)) &&
2511 flush_tid == ci->i_cap_flush_tid[i])
2512 cleaned |= 1 << i;
2513
2514 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2515 " flushing %s -> %s\n",
2516 inode, session->s_mds, seq, ceph_cap_string(dirty),
2517 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2518 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2519
2520 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2521 goto out;
2522
a8599bd8 2523 ci->i_flushing_caps &= ~cleaned;
a8599bd8
SW
2524
2525 spin_lock(&mdsc->cap_dirty_lock);
2526 if (ci->i_flushing_caps == 0) {
2527 list_del_init(&ci->i_flushing_item);
2528 if (!list_empty(&session->s_cap_flushing))
2529 dout(" mds%d still flushing cap on %p\n",
2530 session->s_mds,
2531 &list_entry(session->s_cap_flushing.next,
2532 struct ceph_inode_info,
2533 i_flushing_item)->vfs_inode);
2534 mdsc->num_cap_flushing--;
03066f23 2535 wake_up_all(&mdsc->cap_flushing_wq);
a8599bd8 2536 dout(" inode %p now !flushing\n", inode);
afcdaea3
SW
2537
2538 if (ci->i_dirty_caps == 0) {
2539 dout(" inode %p now clean\n", inode);
2540 BUG_ON(!list_empty(&ci->i_dirty_item));
2541 drop = 1;
7d8cb26d
SW
2542 if (ci->i_wrbuffer_ref_head == 0) {
2543 BUG_ON(!ci->i_head_snapc);
2544 ceph_put_snap_context(ci->i_head_snapc);
2545 ci->i_head_snapc = NULL;
2546 }
76e3b390
SW
2547 } else {
2548 BUG_ON(list_empty(&ci->i_dirty_item));
afcdaea3 2549 }
a8599bd8
SW
2550 }
2551 spin_unlock(&mdsc->cap_dirty_lock);
03066f23 2552 wake_up_all(&ci->i_cap_wq);
a8599bd8
SW
2553
2554out:
be655596 2555 spin_unlock(&ci->i_ceph_lock);
afcdaea3 2556 if (drop)
a8599bd8
SW
2557 iput(inode);
2558}
2559
2560/*
2561 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
2562 * throw away our cap_snap.
2563 *
2564 * Caller hold s_mutex.
2565 */
6df058c0 2566static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
a8599bd8
SW
2567 struct ceph_mds_caps *m,
2568 struct ceph_mds_session *session)
2569{
2570 struct ceph_inode_info *ci = ceph_inode(inode);
2571 u64 follows = le64_to_cpu(m->snap_follows);
a8599bd8
SW
2572 struct ceph_cap_snap *capsnap;
2573 int drop = 0;
2574
2575 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2576 inode, ci, session->s_mds, follows);
2577
be655596 2578 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2579 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2580 if (capsnap->follows == follows) {
2581 if (capsnap->flush_tid != flush_tid) {
2582 dout(" cap_snap %p follows %lld tid %lld !="
2583 " %lld\n", capsnap, follows,
2584 flush_tid, capsnap->flush_tid);
2585 break;
2586 }
2587 WARN_ON(capsnap->dirty_pages || capsnap->writing);
819ccbfa
SW
2588 dout(" removing %p cap_snap %p follows %lld\n",
2589 inode, capsnap, follows);
a8599bd8
SW
2590 ceph_put_snap_context(capsnap->context);
2591 list_del(&capsnap->ci_item);
2592 list_del(&capsnap->flushing_item);
2593 ceph_put_cap_snap(capsnap);
2594 drop = 1;
2595 break;
2596 } else {
2597 dout(" skipping cap_snap %p follows %lld\n",
2598 capsnap, capsnap->follows);
2599 }
2600 }
be655596 2601 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2602 if (drop)
2603 iput(inode);
2604}
2605
2606/*
2607 * Handle TRUNC from MDS, indicating file truncation.
2608 *
2609 * caller hold s_mutex.
2610 */
2611static void handle_cap_trunc(struct inode *inode,
2612 struct ceph_mds_caps *trunc,
2613 struct ceph_mds_session *session)
be655596 2614 __releases(ci->i_ceph_lock)
a8599bd8
SW
2615{
2616 struct ceph_inode_info *ci = ceph_inode(inode);
2617 int mds = session->s_mds;
2618 int seq = le32_to_cpu(trunc->seq);
2619 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2620 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2621 u64 size = le64_to_cpu(trunc->size);
2622 int implemented = 0;
2623 int dirty = __ceph_caps_dirty(ci);
2624 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2625 int queue_trunc = 0;
2626
2627 issued |= implemented | dirty;
2628
2629 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2630 inode, mds, seq, truncate_size, truncate_seq);
2631 queue_trunc = ceph_fill_file_size(inode, issued,
2632 truncate_seq, truncate_size, size);
be655596 2633 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2634
2635 if (queue_trunc)
3c6f6b79 2636 ceph_queue_vmtruncate(inode);
a8599bd8
SW
2637}
2638
2639/*
2640 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
2641 * different one. If we are the most recent migration we've seen (as
2642 * indicated by mseq), make note of the migrating cap bits for the
2643 * duration (until we see the corresponding IMPORT).
2644 *
2645 * caller holds s_mutex
2646 */
2647static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
154f42c2
SW
2648 struct ceph_mds_session *session,
2649 int *open_target_sessions)
a8599bd8 2650{
db354052 2651 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
a8599bd8
SW
2652 struct ceph_inode_info *ci = ceph_inode(inode);
2653 int mds = session->s_mds;
2654 unsigned mseq = le32_to_cpu(ex->migrate_seq);
2655 struct ceph_cap *cap = NULL, *t;
2656 struct rb_node *p;
2657 int remember = 1;
2658
2659 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2660 inode, ci, mds, mseq);
2661
be655596 2662 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2663
2664 /* make sure we haven't seen a higher mseq */
2665 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2666 t = rb_entry(p, struct ceph_cap, ci_node);
2667 if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2668 dout(" higher mseq on cap from mds%d\n",
2669 t->session->s_mds);
2670 remember = 0;
2671 }
2672 if (t->session->s_mds == mds)
2673 cap = t;
2674 }
2675
2676 if (cap) {
2677 if (remember) {
2678 /* make note */
2679 ci->i_cap_exporting_mds = mds;
2680 ci->i_cap_exporting_mseq = mseq;
2681 ci->i_cap_exporting_issued = cap->issued;
154f42c2
SW
2682
2683 /*
2684 * make sure we have open sessions with all possible
2685 * export targets, so that we get the matching IMPORT
2686 */
2687 *open_target_sessions = 1;
db354052
SW
2688
2689 /*
2690 * we can't flush dirty caps that we've seen the
2691 * EXPORT but no IMPORT for
2692 */
2693 spin_lock(&mdsc->cap_dirty_lock);
2694 if (!list_empty(&ci->i_dirty_item)) {
2695 dout(" moving %p to cap_dirty_migrating\n",
2696 inode);
2697 list_move(&ci->i_dirty_item,
2698 &mdsc->cap_dirty_migrating);
2699 }
2700 spin_unlock(&mdsc->cap_dirty_lock);
a8599bd8 2701 }
7c1332b8 2702 __ceph_remove_cap(cap);
a8599bd8 2703 }
4ea0043a 2704 /* else, we already released it */
a8599bd8 2705
be655596 2706 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2707}
2708
2709/*
2710 * Handle cap IMPORT. If there are temp bits from an older EXPORT,
2711 * clean them up.
2712 *
2713 * caller holds s_mutex.
2714 */
2715static void handle_cap_import(struct ceph_mds_client *mdsc,
2716 struct inode *inode, struct ceph_mds_caps *im,
2717 struct ceph_mds_session *session,
2718 void *snaptrace, int snaptrace_len)
2719{
2720 struct ceph_inode_info *ci = ceph_inode(inode);
2721 int mds = session->s_mds;
2722 unsigned issued = le32_to_cpu(im->caps);
2723 unsigned wanted = le32_to_cpu(im->wanted);
2724 unsigned seq = le32_to_cpu(im->seq);
2725 unsigned mseq = le32_to_cpu(im->migrate_seq);
2726 u64 realmino = le64_to_cpu(im->realm);
2727 u64 cap_id = le64_to_cpu(im->cap_id);
2728
2729 if (ci->i_cap_exporting_mds >= 0 &&
2730 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2731 dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2732 " - cleared exporting from mds%d\n",
2733 inode, ci, mds, mseq,
2734 ci->i_cap_exporting_mds);
2735 ci->i_cap_exporting_issued = 0;
2736 ci->i_cap_exporting_mseq = 0;
2737 ci->i_cap_exporting_mds = -1;
db354052
SW
2738
2739 spin_lock(&mdsc->cap_dirty_lock);
2740 if (!list_empty(&ci->i_dirty_item)) {
2741 dout(" moving %p back to cap_dirty\n", inode);
2742 list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
2743 }
2744 spin_unlock(&mdsc->cap_dirty_lock);
a8599bd8
SW
2745 } else {
2746 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2747 inode, ci, mds, mseq);
2748 }
2749
2750 down_write(&mdsc->snap_rwsem);
2751 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2752 false);
2753 downgrade_write(&mdsc->snap_rwsem);
2754 ceph_add_cap(inode, session, cap_id, -1,
2755 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2756 NULL /* no caps context */);
088b3f5e 2757 kick_flushing_inode_caps(mdsc, session, inode);
a8599bd8 2758 up_read(&mdsc->snap_rwsem);
feb4cc9b
SW
2759
2760 /* make sure we re-request max_size, if necessary */
be655596 2761 spin_lock(&ci->i_ceph_lock);
0e5e1774 2762 ci->i_wanted_max_size = 0; /* reset */
feb4cc9b 2763 ci->i_requested_max_size = 0;
be655596 2764 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2765}
2766
2767/*
2768 * Handle a caps message from the MDS.
2769 *
2770 * Identify the appropriate session, inode, and call the right handler
2771 * based on the cap op.
2772 */
2773void ceph_handle_caps(struct ceph_mds_session *session,
2774 struct ceph_msg *msg)
2775{
2776 struct ceph_mds_client *mdsc = session->s_mdsc;
3d14c5d2 2777 struct super_block *sb = mdsc->fsc->sb;
a8599bd8 2778 struct inode *inode;
be655596 2779 struct ceph_inode_info *ci;
a8599bd8
SW
2780 struct ceph_cap *cap;
2781 struct ceph_mds_caps *h;
2600d2dd 2782 int mds = session->s_mds;
a8599bd8 2783 int op;
3d7ded4d 2784 u32 seq, mseq;
a8599bd8
SW
2785 struct ceph_vino vino;
2786 u64 cap_id;
2787 u64 size, max_size;
6df058c0 2788 u64 tid;
70edb55b 2789 void *snaptrace;
ce1fbc8d
SW
2790 size_t snaptrace_len;
2791 void *flock;
2792 u32 flock_len;
154f42c2 2793 int open_target_sessions = 0;
a8599bd8
SW
2794
2795 dout("handle_caps from mds%d\n", mds);
2796
2797 /* decode */
6df058c0 2798 tid = le64_to_cpu(msg->hdr.tid);
a8599bd8
SW
2799 if (msg->front.iov_len < sizeof(*h))
2800 goto bad;
2801 h = msg->front.iov_base;
2802 op = le32_to_cpu(h->op);
2803 vino.ino = le64_to_cpu(h->ino);
2804 vino.snap = CEPH_NOSNAP;
2805 cap_id = le64_to_cpu(h->cap_id);
2806 seq = le32_to_cpu(h->seq);
3d7ded4d 2807 mseq = le32_to_cpu(h->migrate_seq);
a8599bd8
SW
2808 size = le64_to_cpu(h->size);
2809 max_size = le64_to_cpu(h->max_size);
2810
ce1fbc8d
SW
2811 snaptrace = h + 1;
2812 snaptrace_len = le32_to_cpu(h->snap_trace_len);
2813
2814 if (le16_to_cpu(msg->hdr.version) >= 2) {
2815 void *p, *end;
2816
2817 p = snaptrace + snaptrace_len;
2818 end = msg->front.iov_base + msg->front.iov_len;
2819 ceph_decode_32_safe(&p, end, flock_len, bad);
2820 flock = p;
2821 } else {
2822 flock = NULL;
2823 flock_len = 0;
2824 }
2825
a8599bd8
SW
2826 mutex_lock(&session->s_mutex);
2827 session->s_seq++;
2828 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2829 (unsigned)seq);
2830
2831 /* lookup ino */
2832 inode = ceph_find_inode(sb, vino);
be655596 2833 ci = ceph_inode(inode);
a8599bd8
SW
2834 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2835 vino.snap, inode);
2836 if (!inode) {
2837 dout(" i don't have ino %llx\n", vino.ino);
3d7ded4d
SW
2838
2839 if (op == CEPH_CAP_OP_IMPORT)
2840 __queue_cap_release(session, vino.ino, cap_id,
2841 mseq, seq);
21b559de 2842 goto flush_cap_releases;
a8599bd8
SW
2843 }
2844
2845 /* these will work even if we don't have a cap yet */
2846 switch (op) {
2847 case CEPH_CAP_OP_FLUSHSNAP_ACK:
6df058c0 2848 handle_cap_flushsnap_ack(inode, tid, h, session);
a8599bd8
SW
2849 goto done;
2850
2851 case CEPH_CAP_OP_EXPORT:
154f42c2 2852 handle_cap_export(inode, h, session, &open_target_sessions);
a8599bd8
SW
2853 goto done;
2854
2855 case CEPH_CAP_OP_IMPORT:
2856 handle_cap_import(mdsc, inode, h, session,
ce1fbc8d 2857 snaptrace, snaptrace_len);
a8599bd8
SW
2858 }
2859
2860 /* the rest require a cap */
be655596 2861 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2862 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2863 if (!cap) {
9dbd412f 2864 dout(" no cap on %p ino %llx.%llx from mds%d\n",
a8599bd8 2865 inode, ceph_ino(inode), ceph_snap(inode), mds);
be655596 2866 spin_unlock(&ci->i_ceph_lock);
21b559de 2867 goto flush_cap_releases;
a8599bd8
SW
2868 }
2869
be655596 2870 /* note that each of these drops i_ceph_lock for us */
a8599bd8
SW
2871 switch (op) {
2872 case CEPH_CAP_OP_REVOKE:
2873 case CEPH_CAP_OP_GRANT:
0e5e1774 2874 case CEPH_CAP_OP_IMPORT:
15637c8b
SW
2875 handle_cap_grant(inode, h, session, cap, msg->middle);
2876 goto done_unlocked;
a8599bd8
SW
2877
2878 case CEPH_CAP_OP_FLUSH_ACK:
6df058c0 2879 handle_cap_flush_ack(inode, tid, h, session, cap);
a8599bd8
SW
2880 break;
2881
2882 case CEPH_CAP_OP_TRUNC:
2883 handle_cap_trunc(inode, h, session);
2884 break;
2885
2886 default:
be655596 2887 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2888 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2889 ceph_cap_op_name(op));
2890 }
2891
21b559de
GF
2892 goto done;
2893
2894flush_cap_releases:
2895 /*
2896 * send any full release message to try to move things
2897 * along for the mds (who clearly thinks we still have this
2898 * cap).
2899 */
2900 ceph_add_cap_releases(mdsc, session);
2901 ceph_send_cap_releases(mdsc, session);
2902
a8599bd8 2903done:
15637c8b
SW
2904 mutex_unlock(&session->s_mutex);
2905done_unlocked:
a8599bd8
SW
2906 if (inode)
2907 iput(inode);
154f42c2
SW
2908 if (open_target_sessions)
2909 ceph_mdsc_open_export_target_sessions(mdsc, session);
a8599bd8
SW
2910 return;
2911
2912bad:
2913 pr_err("ceph_handle_caps: corrupt message\n");
9ec7cab1 2914 ceph_msg_dump(msg);
a8599bd8
SW
2915 return;
2916}
2917
2918/*
2919 * Delayed work handler to process end of delayed cap release LRU list.
2920 */
afcdaea3 2921void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
a8599bd8
SW
2922{
2923 struct ceph_inode_info *ci;
2924 int flags = CHECK_CAPS_NODELAY;
2925
a8599bd8
SW
2926 dout("check_delayed_caps\n");
2927 while (1) {
2928 spin_lock(&mdsc->cap_delay_lock);
2929 if (list_empty(&mdsc->cap_delay_list))
2930 break;
2931 ci = list_first_entry(&mdsc->cap_delay_list,
2932 struct ceph_inode_info,
2933 i_cap_delay_list);
2934 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2935 time_before(jiffies, ci->i_hold_caps_max))
2936 break;
2937 list_del_init(&ci->i_cap_delay_list);
2938 spin_unlock(&mdsc->cap_delay_lock);
2939 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2940 ceph_check_caps(ci, flags, NULL);
2941 }
2942 spin_unlock(&mdsc->cap_delay_lock);
2943}
2944
afcdaea3
SW
2945/*
2946 * Flush all dirty caps to the mds
2947 */
2948void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2949{
db354052
SW
2950 struct ceph_inode_info *ci;
2951 struct inode *inode;
afcdaea3
SW
2952
2953 dout("flush_dirty_caps\n");
2954 spin_lock(&mdsc->cap_dirty_lock);
db354052
SW
2955 while (!list_empty(&mdsc->cap_dirty)) {
2956 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
2957 i_dirty_item);
70b666c3
SW
2958 inode = &ci->vfs_inode;
2959 ihold(inode);
db354052 2960 dout("flush_dirty_caps %p\n", inode);
afcdaea3 2961 spin_unlock(&mdsc->cap_dirty_lock);
70b666c3
SW
2962 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
2963 iput(inode);
afcdaea3
SW
2964 spin_lock(&mdsc->cap_dirty_lock);
2965 }
2966 spin_unlock(&mdsc->cap_dirty_lock);
db354052 2967 dout("flush_dirty_caps done\n");
afcdaea3
SW
2968}
2969
a8599bd8
SW
2970/*
2971 * Drop open file reference. If we were the last open file,
2972 * we may need to release capabilities to the MDS (or schedule
2973 * their delayed release).
2974 */
2975void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2976{
2977 struct inode *inode = &ci->vfs_inode;
2978 int last = 0;
2979
be655596 2980 spin_lock(&ci->i_ceph_lock);
a8599bd8
SW
2981 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2982 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2983 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2984 if (--ci->i_nr_by_mode[fmode] == 0)
2985 last++;
be655596 2986 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
2987
2988 if (last && ci->i_vino.snap == CEPH_NOSNAP)
2989 ceph_check_caps(ci, 0, NULL);
2990}
2991
2992/*
2993 * Helpers for embedding cap and dentry lease releases into mds
2994 * requests.
2995 *
2996 * @force is used by dentry_release (below) to force inclusion of a
2997 * record for the directory inode, even when there aren't any caps to
2998 * drop.
2999 */
3000int ceph_encode_inode_release(void **p, struct inode *inode,
3001 int mds, int drop, int unless, int force)
3002{
3003 struct ceph_inode_info *ci = ceph_inode(inode);
3004 struct ceph_cap *cap;
3005 struct ceph_mds_request_release *rel = *p;
ec97f88b 3006 int used, dirty;
a8599bd8 3007 int ret = 0;
a8599bd8 3008
be655596 3009 spin_lock(&ci->i_ceph_lock);
916623da 3010 used = __ceph_caps_used(ci);
ec97f88b 3011 dirty = __ceph_caps_dirty(ci);
916623da 3012
ec97f88b
SW
3013 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3014 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
916623da
SW
3015 ceph_cap_string(unless));
3016
ec97f88b
SW
3017 /* only drop unused, clean caps */
3018 drop &= ~(used | dirty);
916623da 3019
a8599bd8
SW
3020 cap = __get_cap_for_mds(ci, mds);
3021 if (cap && __cap_is_valid(cap)) {
3022 if (force ||
3023 ((cap->issued & drop) &&
3024 (cap->issued & unless) == 0)) {
3025 if ((cap->issued & drop) &&
3026 (cap->issued & unless) == 0) {
3027 dout("encode_inode_release %p cap %p %s -> "
3028 "%s\n", inode, cap,
3029 ceph_cap_string(cap->issued),
3030 ceph_cap_string(cap->issued & ~drop));
3031 cap->issued &= ~drop;
3032 cap->implemented &= ~drop;
3033 if (ci->i_ceph_flags & CEPH_I_NODELAY) {
3034 int wanted = __ceph_caps_wanted(ci);
3035 dout(" wanted %s -> %s (act %s)\n",
3036 ceph_cap_string(cap->mds_wanted),
3037 ceph_cap_string(cap->mds_wanted &
3038 ~wanted),
3039 ceph_cap_string(wanted));
3040 cap->mds_wanted &= wanted;
3041 }
3042 } else {
3043 dout("encode_inode_release %p cap %p %s"
3044 " (force)\n", inode, cap,
3045 ceph_cap_string(cap->issued));
3046 }
3047
3048 rel->ino = cpu_to_le64(ceph_ino(inode));
3049 rel->cap_id = cpu_to_le64(cap->cap_id);
3050 rel->seq = cpu_to_le32(cap->seq);
3051 rel->issue_seq = cpu_to_le32(cap->issue_seq),
3052 rel->mseq = cpu_to_le32(cap->mseq);
3053 rel->caps = cpu_to_le32(cap->issued);
3054 rel->wanted = cpu_to_le32(cap->mds_wanted);
3055 rel->dname_len = 0;
3056 rel->dname_seq = 0;
3057 *p += sizeof(*rel);
3058 ret = 1;
3059 } else {
3060 dout("encode_inode_release %p cap %p %s\n",
3061 inode, cap, ceph_cap_string(cap->issued));
3062 }
3063 }
be655596 3064 spin_unlock(&ci->i_ceph_lock);
a8599bd8
SW
3065 return ret;
3066}
3067
3068int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3069 int mds, int drop, int unless)
3070{
3071 struct inode *dir = dentry->d_parent->d_inode;
3072 struct ceph_mds_request_release *rel = *p;
3073 struct ceph_dentry_info *di = ceph_dentry(dentry);
3074 int force = 0;
3075 int ret;
3076
3077 /*
3078 * force an record for the directory caps if we have a dentry lease.
be655596 3079 * this is racy (can't take i_ceph_lock and d_lock together), but it
a8599bd8
SW
3080 * doesn't have to be perfect; the mds will revoke anything we don't
3081 * release.
3082 */
3083 spin_lock(&dentry->d_lock);
3084 if (di->lease_session && di->lease_session->s_mds == mds)
3085 force = 1;
3086 spin_unlock(&dentry->d_lock);
3087
3088 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3089
3090 spin_lock(&dentry->d_lock);
3091 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3092 dout("encode_dentry_release %p mds%d seq %d\n",
3093 dentry, mds, (int)di->lease_seq);
3094 rel->dname_len = cpu_to_le32(dentry->d_name.len);
3095 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3096 *p += dentry->d_name.len;
3097 rel->dname_seq = cpu_to_le32(di->lease_seq);
1dadcce3 3098 __ceph_mdsc_drop_dentry_lease(dentry);
a8599bd8
SW
3099 }
3100 spin_unlock(&dentry->d_lock);
3101 return ret;
3102}