]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - fs/ceph/caps.c
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-eoan-kernel.git] / fs / ceph / caps.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/kernel.h>
6 #include <linux/sched/signal.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/wait.h>
10 #include <linux/writeback.h>
11
12 #include "super.h"
13 #include "mds_client.h"
14 #include "cache.h"
15 #include <linux/ceph/decode.h>
16 #include <linux/ceph/messenger.h>
17
18 /*
19 * Capability management
20 *
21 * The Ceph metadata servers control client access to inode metadata
22 * and file data by issuing capabilities, granting clients permission
23 * to read and/or write both inode field and file data to OSDs
24 * (storage nodes). Each capability consists of a set of bits
25 * indicating which operations are allowed.
26 *
27 * If the client holds a *_SHARED cap, the client has a coherent value
28 * that can be safely read from the cached inode.
29 *
30 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
31 * client is allowed to change inode attributes (e.g., file size,
32 * mtime), note its dirty state in the ceph_cap, and asynchronously
33 * flush that metadata change to the MDS.
34 *
35 * In the event of a conflicting operation (perhaps by another
36 * client), the MDS will revoke the conflicting client capabilities.
37 *
38 * In order for a client to cache an inode, it must hold a capability
39 * with at least one MDS server. When inodes are released, release
40 * notifications are batched and periodically sent en masse to the MDS
41 * cluster to release server state.
42 */
43
44 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
45 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
46 struct ceph_mds_session *session,
47 struct ceph_inode_info *ci,
48 u64 oldest_flush_tid);
49
50 /*
51 * Generate readable cap strings for debugging output.
52 */
53 #define MAX_CAP_STR 20
54 static char cap_str[MAX_CAP_STR][40];
55 static DEFINE_SPINLOCK(cap_str_lock);
56 static int last_cap_str;
57
58 static char *gcap_string(char *s, int c)
59 {
60 if (c & CEPH_CAP_GSHARED)
61 *s++ = 's';
62 if (c & CEPH_CAP_GEXCL)
63 *s++ = 'x';
64 if (c & CEPH_CAP_GCACHE)
65 *s++ = 'c';
66 if (c & CEPH_CAP_GRD)
67 *s++ = 'r';
68 if (c & CEPH_CAP_GWR)
69 *s++ = 'w';
70 if (c & CEPH_CAP_GBUFFER)
71 *s++ = 'b';
72 if (c & CEPH_CAP_GLAZYIO)
73 *s++ = 'l';
74 return s;
75 }
76
77 const char *ceph_cap_string(int caps)
78 {
79 int i;
80 char *s;
81 int c;
82
83 spin_lock(&cap_str_lock);
84 i = last_cap_str++;
85 if (last_cap_str == MAX_CAP_STR)
86 last_cap_str = 0;
87 spin_unlock(&cap_str_lock);
88
89 s = cap_str[i];
90
91 if (caps & CEPH_CAP_PIN)
92 *s++ = 'p';
93
94 c = (caps >> CEPH_CAP_SAUTH) & 3;
95 if (c) {
96 *s++ = 'A';
97 s = gcap_string(s, c);
98 }
99
100 c = (caps >> CEPH_CAP_SLINK) & 3;
101 if (c) {
102 *s++ = 'L';
103 s = gcap_string(s, c);
104 }
105
106 c = (caps >> CEPH_CAP_SXATTR) & 3;
107 if (c) {
108 *s++ = 'X';
109 s = gcap_string(s, c);
110 }
111
112 c = caps >> CEPH_CAP_SFILE;
113 if (c) {
114 *s++ = 'F';
115 s = gcap_string(s, c);
116 }
117
118 if (s == cap_str[i])
119 *s++ = '-';
120 *s = 0;
121 return cap_str[i];
122 }
123
124 void ceph_caps_init(struct ceph_mds_client *mdsc)
125 {
126 INIT_LIST_HEAD(&mdsc->caps_list);
127 spin_lock_init(&mdsc->caps_list_lock);
128 }
129
130 void ceph_caps_finalize(struct ceph_mds_client *mdsc)
131 {
132 struct ceph_cap *cap;
133
134 spin_lock(&mdsc->caps_list_lock);
135 while (!list_empty(&mdsc->caps_list)) {
136 cap = list_first_entry(&mdsc->caps_list,
137 struct ceph_cap, caps_item);
138 list_del(&cap->caps_item);
139 kmem_cache_free(ceph_cap_cachep, cap);
140 }
141 mdsc->caps_total_count = 0;
142 mdsc->caps_avail_count = 0;
143 mdsc->caps_use_count = 0;
144 mdsc->caps_reserve_count = 0;
145 mdsc->caps_min_count = 0;
146 spin_unlock(&mdsc->caps_list_lock);
147 }
148
149 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
150 {
151 spin_lock(&mdsc->caps_list_lock);
152 mdsc->caps_min_count += delta;
153 BUG_ON(mdsc->caps_min_count < 0);
154 spin_unlock(&mdsc->caps_list_lock);
155 }
156
157 void ceph_reserve_caps(struct ceph_mds_client *mdsc,
158 struct ceph_cap_reservation *ctx, int need)
159 {
160 int i;
161 struct ceph_cap *cap;
162 int have;
163 int alloc = 0;
164 LIST_HEAD(newcaps);
165
166 dout("reserve caps ctx=%p need=%d\n", ctx, need);
167
168 /* first reserve any caps that are already allocated */
169 spin_lock(&mdsc->caps_list_lock);
170 if (mdsc->caps_avail_count >= need)
171 have = need;
172 else
173 have = mdsc->caps_avail_count;
174 mdsc->caps_avail_count -= have;
175 mdsc->caps_reserve_count += have;
176 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
177 mdsc->caps_reserve_count +
178 mdsc->caps_avail_count);
179 spin_unlock(&mdsc->caps_list_lock);
180
181 for (i = have; i < need; i++) {
182 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
183 if (!cap)
184 break;
185 list_add(&cap->caps_item, &newcaps);
186 alloc++;
187 }
188 /* we didn't manage to reserve as much as we needed */
189 if (have + alloc != need)
190 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
191 ctx, need, have + alloc);
192
193 spin_lock(&mdsc->caps_list_lock);
194 mdsc->caps_total_count += alloc;
195 mdsc->caps_reserve_count += alloc;
196 list_splice(&newcaps, &mdsc->caps_list);
197
198 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
199 mdsc->caps_reserve_count +
200 mdsc->caps_avail_count);
201 spin_unlock(&mdsc->caps_list_lock);
202
203 ctx->count = need;
204 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
205 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
206 mdsc->caps_reserve_count, mdsc->caps_avail_count);
207 }
208
209 int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
210 struct ceph_cap_reservation *ctx)
211 {
212 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
213 if (ctx->count) {
214 spin_lock(&mdsc->caps_list_lock);
215 BUG_ON(mdsc->caps_reserve_count < ctx->count);
216 mdsc->caps_reserve_count -= ctx->count;
217 mdsc->caps_avail_count += ctx->count;
218 ctx->count = 0;
219 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
220 mdsc->caps_total_count, mdsc->caps_use_count,
221 mdsc->caps_reserve_count, mdsc->caps_avail_count);
222 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
223 mdsc->caps_reserve_count +
224 mdsc->caps_avail_count);
225 spin_unlock(&mdsc->caps_list_lock);
226 }
227 return 0;
228 }
229
230 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
231 struct ceph_cap_reservation *ctx)
232 {
233 struct ceph_cap *cap = NULL;
234
235 /* temporary, until we do something about cap import/export */
236 if (!ctx) {
237 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
238 if (cap) {
239 spin_lock(&mdsc->caps_list_lock);
240 mdsc->caps_use_count++;
241 mdsc->caps_total_count++;
242 spin_unlock(&mdsc->caps_list_lock);
243 }
244 return cap;
245 }
246
247 spin_lock(&mdsc->caps_list_lock);
248 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
249 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
250 mdsc->caps_reserve_count, mdsc->caps_avail_count);
251 BUG_ON(!ctx->count);
252 BUG_ON(ctx->count > mdsc->caps_reserve_count);
253 BUG_ON(list_empty(&mdsc->caps_list));
254
255 ctx->count--;
256 mdsc->caps_reserve_count--;
257 mdsc->caps_use_count++;
258
259 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
260 list_del(&cap->caps_item);
261
262 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
263 mdsc->caps_reserve_count + mdsc->caps_avail_count);
264 spin_unlock(&mdsc->caps_list_lock);
265 return cap;
266 }
267
268 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
269 {
270 spin_lock(&mdsc->caps_list_lock);
271 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
272 cap, mdsc->caps_total_count, mdsc->caps_use_count,
273 mdsc->caps_reserve_count, mdsc->caps_avail_count);
274 mdsc->caps_use_count--;
275 /*
276 * Keep some preallocated caps around (ceph_min_count), to
277 * avoid lots of free/alloc churn.
278 */
279 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
280 mdsc->caps_min_count) {
281 mdsc->caps_total_count--;
282 kmem_cache_free(ceph_cap_cachep, cap);
283 } else {
284 mdsc->caps_avail_count++;
285 list_add(&cap->caps_item, &mdsc->caps_list);
286 }
287
288 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
289 mdsc->caps_reserve_count + mdsc->caps_avail_count);
290 spin_unlock(&mdsc->caps_list_lock);
291 }
292
293 void ceph_reservation_status(struct ceph_fs_client *fsc,
294 int *total, int *avail, int *used, int *reserved,
295 int *min)
296 {
297 struct ceph_mds_client *mdsc = fsc->mdsc;
298
299 if (total)
300 *total = mdsc->caps_total_count;
301 if (avail)
302 *avail = mdsc->caps_avail_count;
303 if (used)
304 *used = mdsc->caps_use_count;
305 if (reserved)
306 *reserved = mdsc->caps_reserve_count;
307 if (min)
308 *min = mdsc->caps_min_count;
309 }
310
311 /*
312 * Find ceph_cap for given mds, if any.
313 *
314 * Called with i_ceph_lock held.
315 */
316 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317 {
318 struct ceph_cap *cap;
319 struct rb_node *n = ci->i_caps.rb_node;
320
321 while (n) {
322 cap = rb_entry(n, struct ceph_cap, ci_node);
323 if (mds < cap->mds)
324 n = n->rb_left;
325 else if (mds > cap->mds)
326 n = n->rb_right;
327 else
328 return cap;
329 }
330 return NULL;
331 }
332
333 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
334 {
335 struct ceph_cap *cap;
336
337 spin_lock(&ci->i_ceph_lock);
338 cap = __get_cap_for_mds(ci, mds);
339 spin_unlock(&ci->i_ceph_lock);
340 return cap;
341 }
342
343 /*
344 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
345 */
346 static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
347 {
348 struct ceph_cap *cap;
349 int mds = -1;
350 struct rb_node *p;
351
352 /* prefer mds with WR|BUFFER|EXCL caps */
353 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
354 cap = rb_entry(p, struct ceph_cap, ci_node);
355 mds = cap->mds;
356 if (cap->issued & (CEPH_CAP_FILE_WR |
357 CEPH_CAP_FILE_BUFFER |
358 CEPH_CAP_FILE_EXCL))
359 break;
360 }
361 return mds;
362 }
363
364 int ceph_get_cap_mds(struct inode *inode)
365 {
366 struct ceph_inode_info *ci = ceph_inode(inode);
367 int mds;
368 spin_lock(&ci->i_ceph_lock);
369 mds = __ceph_get_cap_mds(ceph_inode(inode));
370 spin_unlock(&ci->i_ceph_lock);
371 return mds;
372 }
373
374 /*
375 * Called under i_ceph_lock.
376 */
377 static void __insert_cap_node(struct ceph_inode_info *ci,
378 struct ceph_cap *new)
379 {
380 struct rb_node **p = &ci->i_caps.rb_node;
381 struct rb_node *parent = NULL;
382 struct ceph_cap *cap = NULL;
383
384 while (*p) {
385 parent = *p;
386 cap = rb_entry(parent, struct ceph_cap, ci_node);
387 if (new->mds < cap->mds)
388 p = &(*p)->rb_left;
389 else if (new->mds > cap->mds)
390 p = &(*p)->rb_right;
391 else
392 BUG();
393 }
394
395 rb_link_node(&new->ci_node, parent, p);
396 rb_insert_color(&new->ci_node, &ci->i_caps);
397 }
398
399 /*
400 * (re)set cap hold timeouts, which control the delayed release
401 * of unused caps back to the MDS. Should be called on cap use.
402 */
403 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
404 struct ceph_inode_info *ci)
405 {
406 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
407
408 ci->i_hold_caps_min = round_jiffies(jiffies +
409 ma->caps_wanted_delay_min * HZ);
410 ci->i_hold_caps_max = round_jiffies(jiffies +
411 ma->caps_wanted_delay_max * HZ);
412 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
413 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
414 }
415
416 /*
417 * (Re)queue cap at the end of the delayed cap release list.
418 *
419 * If I_FLUSH is set, leave the inode at the front of the list.
420 *
421 * Caller holds i_ceph_lock
422 * -> we take mdsc->cap_delay_lock
423 */
424 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
425 struct ceph_inode_info *ci)
426 {
427 __cap_set_timeouts(mdsc, ci);
428 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
429 ci->i_ceph_flags, ci->i_hold_caps_max);
430 if (!mdsc->stopping) {
431 spin_lock(&mdsc->cap_delay_lock);
432 if (!list_empty(&ci->i_cap_delay_list)) {
433 if (ci->i_ceph_flags & CEPH_I_FLUSH)
434 goto no_change;
435 list_del_init(&ci->i_cap_delay_list);
436 }
437 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
438 no_change:
439 spin_unlock(&mdsc->cap_delay_lock);
440 }
441 }
442
443 /*
444 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
445 * indicating we should send a cap message to flush dirty metadata
446 * asap, and move to the front of the delayed cap list.
447 */
448 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
449 struct ceph_inode_info *ci)
450 {
451 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
452 spin_lock(&mdsc->cap_delay_lock);
453 ci->i_ceph_flags |= CEPH_I_FLUSH;
454 if (!list_empty(&ci->i_cap_delay_list))
455 list_del_init(&ci->i_cap_delay_list);
456 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
457 spin_unlock(&mdsc->cap_delay_lock);
458 }
459
460 /*
461 * Cancel delayed work on cap.
462 *
463 * Caller must hold i_ceph_lock.
464 */
465 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
466 struct ceph_inode_info *ci)
467 {
468 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
469 if (list_empty(&ci->i_cap_delay_list))
470 return;
471 spin_lock(&mdsc->cap_delay_lock);
472 list_del_init(&ci->i_cap_delay_list);
473 spin_unlock(&mdsc->cap_delay_lock);
474 }
475
476 /*
477 * Common issue checks for add_cap, handle_cap_grant.
478 */
479 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
480 unsigned issued)
481 {
482 unsigned had = __ceph_caps_issued(ci, NULL);
483
484 /*
485 * Each time we receive FILE_CACHE anew, we increment
486 * i_rdcache_gen.
487 */
488 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
489 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
490 ci->i_rdcache_gen++;
491 }
492
493 /*
494 * If FILE_SHARED is newly issued, mark dir not complete. We don't
495 * know what happened to this directory while we didn't have the cap.
496 * If FILE_SHARED is being revoked, also mark dir not complete. It
497 * stops on-going cached readdir.
498 */
499 if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) {
500 if (issued & CEPH_CAP_FILE_SHARED)
501 ci->i_shared_gen++;
502 if (S_ISDIR(ci->vfs_inode.i_mode)) {
503 dout(" marking %p NOT complete\n", &ci->vfs_inode);
504 __ceph_dir_clear_complete(ci);
505 }
506 }
507 }
508
509 /*
510 * Add a capability under the given MDS session.
511 *
512 * Caller should hold session snap_rwsem (read) and s_mutex.
513 *
514 * @fmode is the open file mode, if we are opening a file, otherwise
515 * it is < 0. (This is so we can atomically add the cap and add an
516 * open file reference to it.)
517 */
518 void ceph_add_cap(struct inode *inode,
519 struct ceph_mds_session *session, u64 cap_id,
520 int fmode, unsigned issued, unsigned wanted,
521 unsigned seq, unsigned mseq, u64 realmino, int flags,
522 struct ceph_cap **new_cap)
523 {
524 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
525 struct ceph_inode_info *ci = ceph_inode(inode);
526 struct ceph_cap *cap;
527 int mds = session->s_mds;
528 int actual_wanted;
529
530 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
531 session->s_mds, cap_id, ceph_cap_string(issued), seq);
532
533 /*
534 * If we are opening the file, include file mode wanted bits
535 * in wanted.
536 */
537 if (fmode >= 0)
538 wanted |= ceph_caps_for_mode(fmode);
539
540 cap = __get_cap_for_mds(ci, mds);
541 if (!cap) {
542 cap = *new_cap;
543 *new_cap = NULL;
544
545 cap->issued = 0;
546 cap->implemented = 0;
547 cap->mds = mds;
548 cap->mds_wanted = 0;
549 cap->mseq = 0;
550
551 cap->ci = ci;
552 __insert_cap_node(ci, cap);
553
554 /* add to session cap list */
555 cap->session = session;
556 spin_lock(&session->s_cap_lock);
557 list_add_tail(&cap->session_caps, &session->s_caps);
558 session->s_nr_caps++;
559 spin_unlock(&session->s_cap_lock);
560 } else {
561 /*
562 * auth mds of the inode changed. we received the cap export
563 * message, but still haven't received the cap import message.
564 * handle_cap_export() updated the new auth MDS' cap.
565 *
566 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
567 * a message that was send before the cap import message. So
568 * don't remove caps.
569 */
570 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
571 WARN_ON(cap != ci->i_auth_cap);
572 WARN_ON(cap->cap_id != cap_id);
573 seq = cap->seq;
574 mseq = cap->mseq;
575 issued |= cap->issued;
576 flags |= CEPH_CAP_FLAG_AUTH;
577 }
578 }
579
580 if (!ci->i_snap_realm) {
581 /*
582 * add this inode to the appropriate snap realm
583 */
584 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
585 realmino);
586 if (realm) {
587 spin_lock(&realm->inodes_with_caps_lock);
588 ci->i_snap_realm = realm;
589 list_add(&ci->i_snap_realm_item,
590 &realm->inodes_with_caps);
591 spin_unlock(&realm->inodes_with_caps_lock);
592 } else {
593 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
594 realmino);
595 WARN_ON(!realm);
596 }
597 }
598
599 __check_cap_issue(ci, cap, issued);
600
601 /*
602 * If we are issued caps we don't want, or the mds' wanted
603 * value appears to be off, queue a check so we'll release
604 * later and/or update the mds wanted value.
605 */
606 actual_wanted = __ceph_caps_wanted(ci);
607 if ((wanted & ~actual_wanted) ||
608 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
609 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
610 ceph_cap_string(issued), ceph_cap_string(wanted),
611 ceph_cap_string(actual_wanted));
612 __cap_delay_requeue(mdsc, ci);
613 }
614
615 if (flags & CEPH_CAP_FLAG_AUTH) {
616 if (!ci->i_auth_cap ||
617 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
618 ci->i_auth_cap = cap;
619 cap->mds_wanted = wanted;
620 }
621 } else {
622 WARN_ON(ci->i_auth_cap == cap);
623 }
624
625 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
626 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
627 ceph_cap_string(issued|cap->issued), seq, mds);
628 cap->cap_id = cap_id;
629 cap->issued = issued;
630 cap->implemented |= issued;
631 if (ceph_seq_cmp(mseq, cap->mseq) > 0)
632 cap->mds_wanted = wanted;
633 else
634 cap->mds_wanted |= wanted;
635 cap->seq = seq;
636 cap->issue_seq = seq;
637 cap->mseq = mseq;
638 cap->cap_gen = session->s_cap_gen;
639
640 if (fmode >= 0)
641 __ceph_get_fmode(ci, fmode);
642 }
643
644 /*
645 * Return true if cap has not timed out and belongs to the current
646 * generation of the MDS session (i.e. has not gone 'stale' due to
647 * us losing touch with the mds).
648 */
649 static int __cap_is_valid(struct ceph_cap *cap)
650 {
651 unsigned long ttl;
652 u32 gen;
653
654 spin_lock(&cap->session->s_gen_ttl_lock);
655 gen = cap->session->s_cap_gen;
656 ttl = cap->session->s_cap_ttl;
657 spin_unlock(&cap->session->s_gen_ttl_lock);
658
659 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
660 dout("__cap_is_valid %p cap %p issued %s "
661 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
662 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
663 return 0;
664 }
665
666 return 1;
667 }
668
669 /*
670 * Return set of valid cap bits issued to us. Note that caps time
671 * out, and may be invalidated in bulk if the client session times out
672 * and session->s_cap_gen is bumped.
673 */
674 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
675 {
676 int have = ci->i_snap_caps;
677 struct ceph_cap *cap;
678 struct rb_node *p;
679
680 if (implemented)
681 *implemented = 0;
682 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
683 cap = rb_entry(p, struct ceph_cap, ci_node);
684 if (!__cap_is_valid(cap))
685 continue;
686 dout("__ceph_caps_issued %p cap %p issued %s\n",
687 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
688 have |= cap->issued;
689 if (implemented)
690 *implemented |= cap->implemented;
691 }
692 /*
693 * exclude caps issued by non-auth MDS, but are been revoking
694 * by the auth MDS. The non-auth MDS should be revoking/exporting
695 * these caps, but the message is delayed.
696 */
697 if (ci->i_auth_cap) {
698 cap = ci->i_auth_cap;
699 have &= ~cap->implemented | cap->issued;
700 }
701 return have;
702 }
703
704 /*
705 * Get cap bits issued by caps other than @ocap
706 */
707 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
708 {
709 int have = ci->i_snap_caps;
710 struct ceph_cap *cap;
711 struct rb_node *p;
712
713 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
714 cap = rb_entry(p, struct ceph_cap, ci_node);
715 if (cap == ocap)
716 continue;
717 if (!__cap_is_valid(cap))
718 continue;
719 have |= cap->issued;
720 }
721 return have;
722 }
723
724 /*
725 * Move a cap to the end of the LRU (oldest caps at list head, newest
726 * at list tail).
727 */
728 static void __touch_cap(struct ceph_cap *cap)
729 {
730 struct ceph_mds_session *s = cap->session;
731
732 spin_lock(&s->s_cap_lock);
733 if (!s->s_cap_iterator) {
734 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
735 s->s_mds);
736 list_move_tail(&cap->session_caps, &s->s_caps);
737 } else {
738 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
739 &cap->ci->vfs_inode, cap, s->s_mds);
740 }
741 spin_unlock(&s->s_cap_lock);
742 }
743
744 /*
745 * Check if we hold the given mask. If so, move the cap(s) to the
746 * front of their respective LRUs. (This is the preferred way for
747 * callers to check for caps they want.)
748 */
749 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
750 {
751 struct ceph_cap *cap;
752 struct rb_node *p;
753 int have = ci->i_snap_caps;
754
755 if ((have & mask) == mask) {
756 dout("__ceph_caps_issued_mask %p snap issued %s"
757 " (mask %s)\n", &ci->vfs_inode,
758 ceph_cap_string(have),
759 ceph_cap_string(mask));
760 return 1;
761 }
762
763 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
764 cap = rb_entry(p, struct ceph_cap, ci_node);
765 if (!__cap_is_valid(cap))
766 continue;
767 if ((cap->issued & mask) == mask) {
768 dout("__ceph_caps_issued_mask %p cap %p issued %s"
769 " (mask %s)\n", &ci->vfs_inode, cap,
770 ceph_cap_string(cap->issued),
771 ceph_cap_string(mask));
772 if (touch)
773 __touch_cap(cap);
774 return 1;
775 }
776
777 /* does a combination of caps satisfy mask? */
778 have |= cap->issued;
779 if ((have & mask) == mask) {
780 dout("__ceph_caps_issued_mask %p combo issued %s"
781 " (mask %s)\n", &ci->vfs_inode,
782 ceph_cap_string(cap->issued),
783 ceph_cap_string(mask));
784 if (touch) {
785 struct rb_node *q;
786
787 /* touch this + preceding caps */
788 __touch_cap(cap);
789 for (q = rb_first(&ci->i_caps); q != p;
790 q = rb_next(q)) {
791 cap = rb_entry(q, struct ceph_cap,
792 ci_node);
793 if (!__cap_is_valid(cap))
794 continue;
795 __touch_cap(cap);
796 }
797 }
798 return 1;
799 }
800 }
801
802 return 0;
803 }
804
805 /*
806 * Return true if mask caps are currently being revoked by an MDS.
807 */
808 int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
809 struct ceph_cap *ocap, int mask)
810 {
811 struct ceph_cap *cap;
812 struct rb_node *p;
813
814 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
815 cap = rb_entry(p, struct ceph_cap, ci_node);
816 if (cap != ocap &&
817 (cap->implemented & ~cap->issued & mask))
818 return 1;
819 }
820 return 0;
821 }
822
823 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
824 {
825 struct inode *inode = &ci->vfs_inode;
826 int ret;
827
828 spin_lock(&ci->i_ceph_lock);
829 ret = __ceph_caps_revoking_other(ci, NULL, mask);
830 spin_unlock(&ci->i_ceph_lock);
831 dout("ceph_caps_revoking %p %s = %d\n", inode,
832 ceph_cap_string(mask), ret);
833 return ret;
834 }
835
836 int __ceph_caps_used(struct ceph_inode_info *ci)
837 {
838 int used = 0;
839 if (ci->i_pin_ref)
840 used |= CEPH_CAP_PIN;
841 if (ci->i_rd_ref)
842 used |= CEPH_CAP_FILE_RD;
843 if (ci->i_rdcache_ref ||
844 (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
845 ci->vfs_inode.i_data.nrpages))
846 used |= CEPH_CAP_FILE_CACHE;
847 if (ci->i_wr_ref)
848 used |= CEPH_CAP_FILE_WR;
849 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
850 used |= CEPH_CAP_FILE_BUFFER;
851 return used;
852 }
853
854 /*
855 * wanted, by virtue of open file modes
856 */
857 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
858 {
859 int i, bits = 0;
860 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
861 if (ci->i_nr_by_mode[i])
862 bits |= 1 << i;
863 }
864 if (bits == 0)
865 return 0;
866 return ceph_caps_for_mode(bits >> 1);
867 }
868
869 /*
870 * Return caps we have registered with the MDS(s) as 'wanted'.
871 */
872 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check)
873 {
874 struct ceph_cap *cap;
875 struct rb_node *p;
876 int mds_wanted = 0;
877
878 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
879 cap = rb_entry(p, struct ceph_cap, ci_node);
880 if (check && !__cap_is_valid(cap))
881 continue;
882 if (cap == ci->i_auth_cap)
883 mds_wanted |= cap->mds_wanted;
884 else
885 mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
886 }
887 return mds_wanted;
888 }
889
890 /*
891 * called under i_ceph_lock
892 */
893 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
894 {
895 return !RB_EMPTY_ROOT(&ci->i_caps);
896 }
897
898 int ceph_is_any_caps(struct inode *inode)
899 {
900 struct ceph_inode_info *ci = ceph_inode(inode);
901 int ret;
902
903 spin_lock(&ci->i_ceph_lock);
904 ret = __ceph_is_any_caps(ci);
905 spin_unlock(&ci->i_ceph_lock);
906
907 return ret;
908 }
909
910 static void drop_inode_snap_realm(struct ceph_inode_info *ci)
911 {
912 struct ceph_snap_realm *realm = ci->i_snap_realm;
913 spin_lock(&realm->inodes_with_caps_lock);
914 list_del_init(&ci->i_snap_realm_item);
915 ci->i_snap_realm_counter++;
916 ci->i_snap_realm = NULL;
917 spin_unlock(&realm->inodes_with_caps_lock);
918 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
919 realm);
920 }
921
922 /*
923 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
924 *
925 * caller should hold i_ceph_lock.
926 * caller will not hold session s_mutex if called from destroy_inode.
927 */
928 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
929 {
930 struct ceph_mds_session *session = cap->session;
931 struct ceph_inode_info *ci = cap->ci;
932 struct ceph_mds_client *mdsc =
933 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
934 int removed = 0;
935
936 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
937
938 /* remove from session list */
939 spin_lock(&session->s_cap_lock);
940 if (session->s_cap_iterator == cap) {
941 /* not yet, we are iterating over this very cap */
942 dout("__ceph_remove_cap delaying %p removal from session %p\n",
943 cap, cap->session);
944 } else {
945 list_del_init(&cap->session_caps);
946 session->s_nr_caps--;
947 cap->session = NULL;
948 removed = 1;
949 }
950 /* protect backpointer with s_cap_lock: see iterate_session_caps */
951 cap->ci = NULL;
952
953 /*
954 * s_cap_reconnect is protected by s_cap_lock. no one changes
955 * s_cap_gen while session is in the reconnect state.
956 */
957 if (queue_release &&
958 (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
959 cap->queue_release = 1;
960 if (removed) {
961 list_add_tail(&cap->session_caps,
962 &session->s_cap_releases);
963 session->s_num_cap_releases++;
964 removed = 0;
965 }
966 } else {
967 cap->queue_release = 0;
968 }
969 cap->cap_ino = ci->i_vino.ino;
970
971 spin_unlock(&session->s_cap_lock);
972
973 /* remove from inode list */
974 rb_erase(&cap->ci_node, &ci->i_caps);
975 if (ci->i_auth_cap == cap)
976 ci->i_auth_cap = NULL;
977
978 if (removed)
979 ceph_put_cap(mdsc, cap);
980
981 /* when reconnect denied, we remove session caps forcibly,
982 * i_wr_ref can be non-zero. If there are ongoing write,
983 * keep i_snap_realm.
984 */
985 if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
986 drop_inode_snap_realm(ci);
987
988 if (!__ceph_is_any_real_caps(ci))
989 __cap_delay_cancel(mdsc, ci);
990 }
991
992 struct cap_msg_args {
993 struct ceph_mds_session *session;
994 u64 ino, cid, follows;
995 u64 flush_tid, oldest_flush_tid, size, max_size;
996 u64 xattr_version;
997 struct ceph_buffer *xattr_buf;
998 struct timespec atime, mtime, ctime;
999 int op, caps, wanted, dirty;
1000 u32 seq, issue_seq, mseq, time_warp_seq;
1001 u32 flags;
1002 kuid_t uid;
1003 kgid_t gid;
1004 umode_t mode;
1005 bool inline_data;
1006 };
1007
1008 /*
1009 * Build and send a cap message to the given MDS.
1010 *
1011 * Caller should be holding s_mutex.
1012 */
1013 static int send_cap_msg(struct cap_msg_args *arg)
1014 {
1015 struct ceph_mds_caps *fc;
1016 struct ceph_msg *msg;
1017 void *p;
1018 size_t extra_len;
1019 struct timespec zerotime = {0};
1020 struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
1021
1022 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
1023 " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
1024 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(arg->op),
1025 arg->cid, arg->ino, ceph_cap_string(arg->caps),
1026 ceph_cap_string(arg->wanted), ceph_cap_string(arg->dirty),
1027 arg->seq, arg->issue_seq, arg->flush_tid, arg->oldest_flush_tid,
1028 arg->mseq, arg->follows, arg->size, arg->max_size,
1029 arg->xattr_version,
1030 arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
1031
1032 /* flock buffer size + inline version + inline data size +
1033 * osd_epoch_barrier + oldest_flush_tid */
1034 extra_len = 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4;
1035 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1036 GFP_NOFS, false);
1037 if (!msg)
1038 return -ENOMEM;
1039
1040 msg->hdr.version = cpu_to_le16(10);
1041 msg->hdr.tid = cpu_to_le64(arg->flush_tid);
1042
1043 fc = msg->front.iov_base;
1044 memset(fc, 0, sizeof(*fc));
1045
1046 fc->cap_id = cpu_to_le64(arg->cid);
1047 fc->op = cpu_to_le32(arg->op);
1048 fc->seq = cpu_to_le32(arg->seq);
1049 fc->issue_seq = cpu_to_le32(arg->issue_seq);
1050 fc->migrate_seq = cpu_to_le32(arg->mseq);
1051 fc->caps = cpu_to_le32(arg->caps);
1052 fc->wanted = cpu_to_le32(arg->wanted);
1053 fc->dirty = cpu_to_le32(arg->dirty);
1054 fc->ino = cpu_to_le64(arg->ino);
1055 fc->snap_follows = cpu_to_le64(arg->follows);
1056
1057 fc->size = cpu_to_le64(arg->size);
1058 fc->max_size = cpu_to_le64(arg->max_size);
1059 ceph_encode_timespec(&fc->mtime, &arg->mtime);
1060 ceph_encode_timespec(&fc->atime, &arg->atime);
1061 ceph_encode_timespec(&fc->ctime, &arg->ctime);
1062 fc->time_warp_seq = cpu_to_le32(arg->time_warp_seq);
1063
1064 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, arg->uid));
1065 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, arg->gid));
1066 fc->mode = cpu_to_le32(arg->mode);
1067
1068 fc->xattr_version = cpu_to_le64(arg->xattr_version);
1069 if (arg->xattr_buf) {
1070 msg->middle = ceph_buffer_get(arg->xattr_buf);
1071 fc->xattr_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
1072 msg->hdr.middle_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
1073 }
1074
1075 p = fc + 1;
1076 /* flock buffer size (version 2) */
1077 ceph_encode_32(&p, 0);
1078 /* inline version (version 4) */
1079 ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE);
1080 /* inline data size */
1081 ceph_encode_32(&p, 0);
1082 /*
1083 * osd_epoch_barrier (version 5)
1084 * The epoch_barrier is protected osdc->lock, so READ_ONCE here in
1085 * case it was recently changed
1086 */
1087 ceph_encode_32(&p, READ_ONCE(osdc->epoch_barrier));
1088 /* oldest_flush_tid (version 6) */
1089 ceph_encode_64(&p, arg->oldest_flush_tid);
1090
1091 /*
1092 * caller_uid/caller_gid (version 7)
1093 *
1094 * Currently, we don't properly track which caller dirtied the caps
1095 * last, and force a flush of them when there is a conflict. For now,
1096 * just set this to 0:0, to emulate how the MDS has worked up to now.
1097 */
1098 ceph_encode_32(&p, 0);
1099 ceph_encode_32(&p, 0);
1100
1101 /* pool namespace (version 8) (mds always ignores this) */
1102 ceph_encode_32(&p, 0);
1103
1104 /*
1105 * btime and change_attr (version 9)
1106 *
1107 * We just zero these out for now, as the MDS ignores them unless
1108 * the requisite feature flags are set (which we don't do yet).
1109 */
1110 ceph_encode_timespec(p, &zerotime);
1111 p += sizeof(struct ceph_timespec);
1112 ceph_encode_64(&p, 0);
1113
1114 /* Advisory flags (version 10) */
1115 ceph_encode_32(&p, arg->flags);
1116
1117 ceph_con_send(&arg->session->s_con, msg);
1118 return 0;
1119 }
1120
1121 /*
1122 * Queue cap releases when an inode is dropped from our cache. Since
1123 * inode is about to be destroyed, there is no need for i_ceph_lock.
1124 */
1125 void ceph_queue_caps_release(struct inode *inode)
1126 {
1127 struct ceph_inode_info *ci = ceph_inode(inode);
1128 struct rb_node *p;
1129
1130 p = rb_first(&ci->i_caps);
1131 while (p) {
1132 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1133 p = rb_next(p);
1134 __ceph_remove_cap(cap, true);
1135 }
1136 }
1137
1138 /*
1139 * Send a cap msg on the given inode. Update our caps state, then
1140 * drop i_ceph_lock and send the message.
1141 *
1142 * Make note of max_size reported/requested from mds, revoked caps
1143 * that have now been implemented.
1144 *
1145 * Make half-hearted attempt ot to invalidate page cache if we are
1146 * dropping RDCACHE. Note that this will leave behind locked pages
1147 * that we'll then need to deal with elsewhere.
1148 *
1149 * Return non-zero if delayed release, or we experienced an error
1150 * such that the caller should requeue + retry later.
1151 *
1152 * called with i_ceph_lock, then drops it.
1153 * caller should hold snap_rwsem (read), s_mutex.
1154 */
1155 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1156 int op, bool sync, int used, int want, int retain,
1157 int flushing, u64 flush_tid, u64 oldest_flush_tid)
1158 __releases(cap->ci->i_ceph_lock)
1159 {
1160 struct ceph_inode_info *ci = cap->ci;
1161 struct inode *inode = &ci->vfs_inode;
1162 struct cap_msg_args arg;
1163 int held, revoking, dropping;
1164 int wake = 0;
1165 int delayed = 0;
1166 int ret;
1167
1168 held = cap->issued | cap->implemented;
1169 revoking = cap->implemented & ~cap->issued;
1170 retain &= ~revoking;
1171 dropping = cap->issued & ~retain;
1172
1173 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1174 inode, cap, cap->session,
1175 ceph_cap_string(held), ceph_cap_string(held & retain),
1176 ceph_cap_string(revoking));
1177 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1178
1179 arg.session = cap->session;
1180
1181 /* don't release wanted unless we've waited a bit. */
1182 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1183 time_before(jiffies, ci->i_hold_caps_min)) {
1184 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1185 ceph_cap_string(cap->issued),
1186 ceph_cap_string(cap->issued & retain),
1187 ceph_cap_string(cap->mds_wanted),
1188 ceph_cap_string(want));
1189 want |= cap->mds_wanted;
1190 retain |= cap->issued;
1191 delayed = 1;
1192 }
1193 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1194 if (want & ~cap->mds_wanted) {
1195 /* user space may open/close single file frequently.
1196 * This avoids droping mds_wanted immediately after
1197 * requesting new mds_wanted.
1198 */
1199 __cap_set_timeouts(mdsc, ci);
1200 }
1201
1202 cap->issued &= retain; /* drop bits we don't want */
1203 if (cap->implemented & ~cap->issued) {
1204 /*
1205 * Wake up any waiters on wanted -> needed transition.
1206 * This is due to the weird transition from buffered
1207 * to sync IO... we need to flush dirty pages _before_
1208 * allowing sync writes to avoid reordering.
1209 */
1210 wake = 1;
1211 }
1212 cap->implemented &= cap->issued | used;
1213 cap->mds_wanted = want;
1214
1215 arg.ino = ceph_vino(inode).ino;
1216 arg.cid = cap->cap_id;
1217 arg.follows = flushing ? ci->i_head_snapc->seq : 0;
1218 arg.flush_tid = flush_tid;
1219 arg.oldest_flush_tid = oldest_flush_tid;
1220
1221 arg.size = inode->i_size;
1222 ci->i_reported_size = arg.size;
1223 arg.max_size = ci->i_wanted_max_size;
1224 ci->i_requested_max_size = arg.max_size;
1225
1226 if (flushing & CEPH_CAP_XATTR_EXCL) {
1227 __ceph_build_xattrs_blob(ci);
1228 arg.xattr_version = ci->i_xattrs.version;
1229 arg.xattr_buf = ci->i_xattrs.blob;
1230 } else {
1231 arg.xattr_buf = NULL;
1232 }
1233
1234 arg.mtime = inode->i_mtime;
1235 arg.atime = inode->i_atime;
1236 arg.ctime = inode->i_ctime;
1237
1238 arg.op = op;
1239 arg.caps = cap->implemented;
1240 arg.wanted = want;
1241 arg.dirty = flushing;
1242
1243 arg.seq = cap->seq;
1244 arg.issue_seq = cap->issue_seq;
1245 arg.mseq = cap->mseq;
1246 arg.time_warp_seq = ci->i_time_warp_seq;
1247
1248 arg.uid = inode->i_uid;
1249 arg.gid = inode->i_gid;
1250 arg.mode = inode->i_mode;
1251
1252 arg.inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1253 if (list_empty(&ci->i_cap_snaps))
1254 arg.flags = CEPH_CLIENT_CAPS_NO_CAPSNAP;
1255 else
1256 arg.flags = CEPH_CLIENT_CAPS_PENDING_CAPSNAP;
1257 if (sync)
1258 arg.flags |= CEPH_CLIENT_CAPS_SYNC;
1259
1260 spin_unlock(&ci->i_ceph_lock);
1261
1262 ret = send_cap_msg(&arg);
1263 if (ret < 0) {
1264 dout("error sending cap msg, must requeue %p\n", inode);
1265 delayed = 1;
1266 }
1267
1268 if (wake)
1269 wake_up_all(&ci->i_cap_wq);
1270
1271 return delayed;
1272 }
1273
1274 static inline int __send_flush_snap(struct inode *inode,
1275 struct ceph_mds_session *session,
1276 struct ceph_cap_snap *capsnap,
1277 u32 mseq, u64 oldest_flush_tid)
1278 {
1279 struct cap_msg_args arg;
1280
1281 arg.session = session;
1282 arg.ino = ceph_vino(inode).ino;
1283 arg.cid = 0;
1284 arg.follows = capsnap->follows;
1285 arg.flush_tid = capsnap->cap_flush.tid;
1286 arg.oldest_flush_tid = oldest_flush_tid;
1287
1288 arg.size = capsnap->size;
1289 arg.max_size = 0;
1290 arg.xattr_version = capsnap->xattr_version;
1291 arg.xattr_buf = capsnap->xattr_blob;
1292
1293 arg.atime = capsnap->atime;
1294 arg.mtime = capsnap->mtime;
1295 arg.ctime = capsnap->ctime;
1296
1297 arg.op = CEPH_CAP_OP_FLUSHSNAP;
1298 arg.caps = capsnap->issued;
1299 arg.wanted = 0;
1300 arg.dirty = capsnap->dirty;
1301
1302 arg.seq = 0;
1303 arg.issue_seq = 0;
1304 arg.mseq = mseq;
1305 arg.time_warp_seq = capsnap->time_warp_seq;
1306
1307 arg.uid = capsnap->uid;
1308 arg.gid = capsnap->gid;
1309 arg.mode = capsnap->mode;
1310
1311 arg.inline_data = capsnap->inline_data;
1312 arg.flags = 0;
1313
1314 return send_cap_msg(&arg);
1315 }
1316
1317 /*
1318 * When a snapshot is taken, clients accumulate dirty metadata on
1319 * inodes with capabilities in ceph_cap_snaps to describe the file
1320 * state at the time the snapshot was taken. This must be flushed
1321 * asynchronously back to the MDS once sync writes complete and dirty
1322 * data is written out.
1323 *
1324 * Called under i_ceph_lock. Takes s_mutex as needed.
1325 */
1326 static void __ceph_flush_snaps(struct ceph_inode_info *ci,
1327 struct ceph_mds_session *session)
1328 __releases(ci->i_ceph_lock)
1329 __acquires(ci->i_ceph_lock)
1330 {
1331 struct inode *inode = &ci->vfs_inode;
1332 struct ceph_mds_client *mdsc = session->s_mdsc;
1333 struct ceph_cap_snap *capsnap;
1334 u64 oldest_flush_tid = 0;
1335 u64 first_tid = 1, last_tid = 0;
1336
1337 dout("__flush_snaps %p session %p\n", inode, session);
1338
1339 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1340 /*
1341 * we need to wait for sync writes to complete and for dirty
1342 * pages to be written out.
1343 */
1344 if (capsnap->dirty_pages || capsnap->writing)
1345 break;
1346
1347 /* should be removed by ceph_try_drop_cap_snap() */
1348 BUG_ON(!capsnap->need_flush);
1349
1350 /* only flush each capsnap once */
1351 if (capsnap->cap_flush.tid > 0) {
1352 dout(" already flushed %p, skipping\n", capsnap);
1353 continue;
1354 }
1355
1356 spin_lock(&mdsc->cap_dirty_lock);
1357 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid;
1358 list_add_tail(&capsnap->cap_flush.g_list,
1359 &mdsc->cap_flush_list);
1360 if (oldest_flush_tid == 0)
1361 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1362 if (list_empty(&ci->i_flushing_item)) {
1363 list_add_tail(&ci->i_flushing_item,
1364 &session->s_cap_flushing);
1365 }
1366 spin_unlock(&mdsc->cap_dirty_lock);
1367
1368 list_add_tail(&capsnap->cap_flush.i_list,
1369 &ci->i_cap_flush_list);
1370
1371 if (first_tid == 1)
1372 first_tid = capsnap->cap_flush.tid;
1373 last_tid = capsnap->cap_flush.tid;
1374 }
1375
1376 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS;
1377
1378 while (first_tid <= last_tid) {
1379 struct ceph_cap *cap = ci->i_auth_cap;
1380 struct ceph_cap_flush *cf;
1381 int ret;
1382
1383 if (!(cap && cap->session == session)) {
1384 dout("__flush_snaps %p auth cap %p not mds%d, "
1385 "stop\n", inode, cap, session->s_mds);
1386 break;
1387 }
1388
1389 ret = -ENOENT;
1390 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
1391 if (cf->tid >= first_tid) {
1392 ret = 0;
1393 break;
1394 }
1395 }
1396 if (ret < 0)
1397 break;
1398
1399 first_tid = cf->tid + 1;
1400
1401 capsnap = container_of(cf, struct ceph_cap_snap, cap_flush);
1402 refcount_inc(&capsnap->nref);
1403 spin_unlock(&ci->i_ceph_lock);
1404
1405 dout("__flush_snaps %p capsnap %p tid %llu %s\n",
1406 inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
1407
1408 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
1409 oldest_flush_tid);
1410 if (ret < 0) {
1411 pr_err("__flush_snaps: error sending cap flushsnap, "
1412 "ino (%llx.%llx) tid %llu follows %llu\n",
1413 ceph_vinop(inode), cf->tid, capsnap->follows);
1414 }
1415
1416 ceph_put_cap_snap(capsnap);
1417 spin_lock(&ci->i_ceph_lock);
1418 }
1419 }
1420
1421 void ceph_flush_snaps(struct ceph_inode_info *ci,
1422 struct ceph_mds_session **psession)
1423 {
1424 struct inode *inode = &ci->vfs_inode;
1425 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1426 struct ceph_mds_session *session = NULL;
1427 int mds;
1428
1429 dout("ceph_flush_snaps %p\n", inode);
1430 if (psession)
1431 session = *psession;
1432 retry:
1433 spin_lock(&ci->i_ceph_lock);
1434 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
1435 dout(" no capsnap needs flush, doing nothing\n");
1436 goto out;
1437 }
1438 if (!ci->i_auth_cap) {
1439 dout(" no auth cap (migrating?), doing nothing\n");
1440 goto out;
1441 }
1442
1443 mds = ci->i_auth_cap->session->s_mds;
1444 if (session && session->s_mds != mds) {
1445 dout(" oops, wrong session %p mutex\n", session);
1446 mutex_unlock(&session->s_mutex);
1447 ceph_put_mds_session(session);
1448 session = NULL;
1449 }
1450 if (!session) {
1451 spin_unlock(&ci->i_ceph_lock);
1452 mutex_lock(&mdsc->mutex);
1453 session = __ceph_lookup_mds_session(mdsc, mds);
1454 mutex_unlock(&mdsc->mutex);
1455 if (session) {
1456 dout(" inverting session/ino locks on %p\n", session);
1457 mutex_lock(&session->s_mutex);
1458 }
1459 goto retry;
1460 }
1461
1462 // make sure flushsnap messages are sent in proper order.
1463 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
1464 __kick_flushing_caps(mdsc, session, ci, 0);
1465 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
1466 }
1467
1468 __ceph_flush_snaps(ci, session);
1469 out:
1470 spin_unlock(&ci->i_ceph_lock);
1471
1472 if (psession) {
1473 *psession = session;
1474 } else if (session) {
1475 mutex_unlock(&session->s_mutex);
1476 ceph_put_mds_session(session);
1477 }
1478 /* we flushed them all; remove this inode from the queue */
1479 spin_lock(&mdsc->snap_flush_lock);
1480 list_del_init(&ci->i_snap_flush_item);
1481 spin_unlock(&mdsc->snap_flush_lock);
1482 }
1483
1484 /*
1485 * Mark caps dirty. If inode is newly dirty, return the dirty flags.
1486 * Caller is then responsible for calling __mark_inode_dirty with the
1487 * returned flags value.
1488 */
1489 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1490 struct ceph_cap_flush **pcf)
1491 {
1492 struct ceph_mds_client *mdsc =
1493 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1494 struct inode *inode = &ci->vfs_inode;
1495 int was = ci->i_dirty_caps;
1496 int dirty = 0;
1497
1498 if (!ci->i_auth_cap) {
1499 pr_warn("__mark_dirty_caps %p %llx mask %s, "
1500 "but no auth cap (session was closed?)\n",
1501 inode, ceph_ino(inode), ceph_cap_string(mask));
1502 return 0;
1503 }
1504
1505 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1506 ceph_cap_string(mask), ceph_cap_string(was),
1507 ceph_cap_string(was | mask));
1508 ci->i_dirty_caps |= mask;
1509 if (was == 0) {
1510 WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1511 swap(ci->i_prealloc_cap_flush, *pcf);
1512
1513 if (!ci->i_head_snapc) {
1514 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
1515 ci->i_head_snapc = ceph_get_snap_context(
1516 ci->i_snap_realm->cached_context);
1517 }
1518 dout(" inode %p now dirty snapc %p auth cap %p\n",
1519 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1520 BUG_ON(!list_empty(&ci->i_dirty_item));
1521 spin_lock(&mdsc->cap_dirty_lock);
1522 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1523 spin_unlock(&mdsc->cap_dirty_lock);
1524 if (ci->i_flushing_caps == 0) {
1525 ihold(inode);
1526 dirty |= I_DIRTY_SYNC;
1527 }
1528 } else {
1529 WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1530 }
1531 BUG_ON(list_empty(&ci->i_dirty_item));
1532 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1533 (mask & CEPH_CAP_FILE_BUFFER))
1534 dirty |= I_DIRTY_DATASYNC;
1535 __cap_delay_requeue(mdsc, ci);
1536 return dirty;
1537 }
1538
1539 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1540 {
1541 return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1542 }
1543
1544 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1545 {
1546 if (cf)
1547 kmem_cache_free(ceph_cap_flush_cachep, cf);
1548 }
1549
1550 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1551 {
1552 if (!list_empty(&mdsc->cap_flush_list)) {
1553 struct ceph_cap_flush *cf =
1554 list_first_entry(&mdsc->cap_flush_list,
1555 struct ceph_cap_flush, g_list);
1556 return cf->tid;
1557 }
1558 return 0;
1559 }
1560
1561 /*
1562 * Remove cap_flush from the mdsc's or inode's flushing cap list.
1563 * Return true if caller needs to wake up flush waiters.
1564 */
1565 static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
1566 struct ceph_inode_info *ci,
1567 struct ceph_cap_flush *cf)
1568 {
1569 struct ceph_cap_flush *prev;
1570 bool wake = cf->wake;
1571 if (mdsc) {
1572 /* are there older pending cap flushes? */
1573 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
1574 prev = list_prev_entry(cf, g_list);
1575 prev->wake = true;
1576 wake = false;
1577 }
1578 list_del(&cf->g_list);
1579 } else if (ci) {
1580 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
1581 prev = list_prev_entry(cf, i_list);
1582 prev->wake = true;
1583 wake = false;
1584 }
1585 list_del(&cf->i_list);
1586 } else {
1587 BUG_ON(1);
1588 }
1589 return wake;
1590 }
1591
1592 /*
1593 * Add dirty inode to the flushing list. Assigned a seq number so we
1594 * can wait for caps to flush without starving.
1595 *
1596 * Called under i_ceph_lock.
1597 */
1598 static int __mark_caps_flushing(struct inode *inode,
1599 struct ceph_mds_session *session, bool wake,
1600 u64 *flush_tid, u64 *oldest_flush_tid)
1601 {
1602 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1603 struct ceph_inode_info *ci = ceph_inode(inode);
1604 struct ceph_cap_flush *cf = NULL;
1605 int flushing;
1606
1607 BUG_ON(ci->i_dirty_caps == 0);
1608 BUG_ON(list_empty(&ci->i_dirty_item));
1609 BUG_ON(!ci->i_prealloc_cap_flush);
1610
1611 flushing = ci->i_dirty_caps;
1612 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1613 ceph_cap_string(flushing),
1614 ceph_cap_string(ci->i_flushing_caps),
1615 ceph_cap_string(ci->i_flushing_caps | flushing));
1616 ci->i_flushing_caps |= flushing;
1617 ci->i_dirty_caps = 0;
1618 dout(" inode %p now !dirty\n", inode);
1619
1620 swap(cf, ci->i_prealloc_cap_flush);
1621 cf->caps = flushing;
1622 cf->wake = wake;
1623
1624 spin_lock(&mdsc->cap_dirty_lock);
1625 list_del_init(&ci->i_dirty_item);
1626
1627 cf->tid = ++mdsc->last_cap_flush_tid;
1628 list_add_tail(&cf->g_list, &mdsc->cap_flush_list);
1629 *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1630
1631 if (list_empty(&ci->i_flushing_item)) {
1632 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1633 mdsc->num_cap_flushing++;
1634 }
1635 spin_unlock(&mdsc->cap_dirty_lock);
1636
1637 list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
1638
1639 *flush_tid = cf->tid;
1640 return flushing;
1641 }
1642
1643 /*
1644 * try to invalidate mapping pages without blocking.
1645 */
1646 static int try_nonblocking_invalidate(struct inode *inode)
1647 {
1648 struct ceph_inode_info *ci = ceph_inode(inode);
1649 u32 invalidating_gen = ci->i_rdcache_gen;
1650
1651 spin_unlock(&ci->i_ceph_lock);
1652 invalidate_mapping_pages(&inode->i_data, 0, -1);
1653 spin_lock(&ci->i_ceph_lock);
1654
1655 if (inode->i_data.nrpages == 0 &&
1656 invalidating_gen == ci->i_rdcache_gen) {
1657 /* success. */
1658 dout("try_nonblocking_invalidate %p success\n", inode);
1659 /* save any racing async invalidate some trouble */
1660 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1661 return 0;
1662 }
1663 dout("try_nonblocking_invalidate %p failed\n", inode);
1664 return -1;
1665 }
1666
1667 bool __ceph_should_report_size(struct ceph_inode_info *ci)
1668 {
1669 loff_t size = ci->vfs_inode.i_size;
1670 /* mds will adjust max size according to the reported size */
1671 if (ci->i_flushing_caps & CEPH_CAP_FILE_WR)
1672 return false;
1673 if (size >= ci->i_max_size)
1674 return true;
1675 /* half of previous max_size increment has been used */
1676 if (ci->i_max_size > ci->i_reported_size &&
1677 (size << 1) >= ci->i_max_size + ci->i_reported_size)
1678 return true;
1679 return false;
1680 }
1681
1682 /*
1683 * Swiss army knife function to examine currently used and wanted
1684 * versus held caps. Release, flush, ack revoked caps to mds as
1685 * appropriate.
1686 *
1687 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1688 * cap release further.
1689 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1690 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1691 * further delay.
1692 */
1693 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1694 struct ceph_mds_session *session)
1695 {
1696 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1697 struct ceph_mds_client *mdsc = fsc->mdsc;
1698 struct inode *inode = &ci->vfs_inode;
1699 struct ceph_cap *cap;
1700 u64 flush_tid, oldest_flush_tid;
1701 int file_wanted, used, cap_used;
1702 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
1703 int issued, implemented, want, retain, revoking, flushing = 0;
1704 int mds = -1; /* keep track of how far we've gone through i_caps list
1705 to avoid an infinite loop on retry */
1706 struct rb_node *p;
1707 int delayed = 0, sent = 0, num;
1708 bool is_delayed = flags & CHECK_CAPS_NODELAY;
1709 bool queue_invalidate = false;
1710 bool force_requeue = false;
1711 bool tried_invalidate = false;
1712
1713 /* if we are unmounting, flush any unused caps immediately. */
1714 if (mdsc->stopping)
1715 is_delayed = 1;
1716
1717 spin_lock(&ci->i_ceph_lock);
1718
1719 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1720 flags |= CHECK_CAPS_FLUSH;
1721
1722 goto retry_locked;
1723 retry:
1724 spin_lock(&ci->i_ceph_lock);
1725 retry_locked:
1726 file_wanted = __ceph_caps_file_wanted(ci);
1727 used = __ceph_caps_used(ci);
1728 issued = __ceph_caps_issued(ci, &implemented);
1729 revoking = implemented & ~issued;
1730
1731 want = file_wanted;
1732 retain = file_wanted | used | CEPH_CAP_PIN;
1733 if (!mdsc->stopping && inode->i_nlink > 0) {
1734 if (file_wanted) {
1735 retain |= CEPH_CAP_ANY; /* be greedy */
1736 } else if (S_ISDIR(inode->i_mode) &&
1737 (issued & CEPH_CAP_FILE_SHARED) &&
1738 __ceph_dir_is_complete(ci)) {
1739 /*
1740 * If a directory is complete, we want to keep
1741 * the exclusive cap. So that MDS does not end up
1742 * revoking the shared cap on every create/unlink
1743 * operation.
1744 */
1745 want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1746 retain |= want;
1747 } else {
1748
1749 retain |= CEPH_CAP_ANY_SHARED;
1750 /*
1751 * keep RD only if we didn't have the file open RW,
1752 * because then the mds would revoke it anyway to
1753 * journal max_size=0.
1754 */
1755 if (ci->i_max_size == 0)
1756 retain |= CEPH_CAP_ANY_RD;
1757 }
1758 }
1759
1760 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1761 " issued %s revoking %s retain %s %s%s%s\n", inode,
1762 ceph_cap_string(file_wanted),
1763 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1764 ceph_cap_string(ci->i_flushing_caps),
1765 ceph_cap_string(issued), ceph_cap_string(revoking),
1766 ceph_cap_string(retain),
1767 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1768 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1769 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1770
1771 /*
1772 * If we no longer need to hold onto old our caps, and we may
1773 * have cached pages, but don't want them, then try to invalidate.
1774 * If we fail, it's because pages are locked.... try again later.
1775 */
1776 if ((!is_delayed || mdsc->stopping) &&
1777 !S_ISDIR(inode->i_mode) && /* ignore readdir cache */
1778 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */
1779 inode->i_data.nrpages && /* have cached pages */
1780 (revoking & (CEPH_CAP_FILE_CACHE|
1781 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
1782 !tried_invalidate) {
1783 dout("check_caps trying to invalidate on %p\n", inode);
1784 if (try_nonblocking_invalidate(inode) < 0) {
1785 if (revoking & (CEPH_CAP_FILE_CACHE|
1786 CEPH_CAP_FILE_LAZYIO)) {
1787 dout("check_caps queuing invalidate\n");
1788 queue_invalidate = true;
1789 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1790 } else {
1791 dout("check_caps failed to invalidate pages\n");
1792 /* we failed to invalidate pages. check these
1793 caps again later. */
1794 force_requeue = true;
1795 __cap_set_timeouts(mdsc, ci);
1796 }
1797 }
1798 tried_invalidate = true;
1799 goto retry_locked;
1800 }
1801
1802 num = 0;
1803 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1804 cap = rb_entry(p, struct ceph_cap, ci_node);
1805 num++;
1806
1807 /* avoid looping forever */
1808 if (mds >= cap->mds ||
1809 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1810 continue;
1811
1812 /* NOTE: no side-effects allowed, until we take s_mutex */
1813
1814 cap_used = used;
1815 if (ci->i_auth_cap && cap != ci->i_auth_cap)
1816 cap_used &= ~ci->i_auth_cap->issued;
1817
1818 revoking = cap->implemented & ~cap->issued;
1819 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1820 cap->mds, cap, ceph_cap_string(cap_used),
1821 ceph_cap_string(cap->issued),
1822 ceph_cap_string(cap->implemented),
1823 ceph_cap_string(revoking));
1824
1825 if (cap == ci->i_auth_cap &&
1826 (cap->issued & CEPH_CAP_FILE_WR)) {
1827 /* request larger max_size from MDS? */
1828 if (ci->i_wanted_max_size > ci->i_max_size &&
1829 ci->i_wanted_max_size > ci->i_requested_max_size) {
1830 dout("requesting new max_size\n");
1831 goto ack;
1832 }
1833
1834 /* approaching file_max? */
1835 if (__ceph_should_report_size(ci)) {
1836 dout("i_size approaching max_size\n");
1837 goto ack;
1838 }
1839 }
1840 /* flush anything dirty? */
1841 if (cap == ci->i_auth_cap) {
1842 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
1843 dout("flushing dirty caps\n");
1844 goto ack;
1845 }
1846 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
1847 dout("flushing snap caps\n");
1848 goto ack;
1849 }
1850 }
1851
1852 /* completed revocation? going down and there are no caps? */
1853 if (revoking && (revoking & cap_used) == 0) {
1854 dout("completed revocation of %s\n",
1855 ceph_cap_string(cap->implemented & ~cap->issued));
1856 goto ack;
1857 }
1858
1859 /* want more caps from mds? */
1860 if (want & ~(cap->mds_wanted | cap->issued))
1861 goto ack;
1862
1863 /* things we might delay */
1864 if ((cap->issued & ~retain) == 0 &&
1865 cap->mds_wanted == want)
1866 continue; /* nope, all good */
1867
1868 if (is_delayed)
1869 goto ack;
1870
1871 /* delay? */
1872 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1873 time_before(jiffies, ci->i_hold_caps_max)) {
1874 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1875 ceph_cap_string(cap->issued),
1876 ceph_cap_string(cap->issued & retain),
1877 ceph_cap_string(cap->mds_wanted),
1878 ceph_cap_string(want));
1879 delayed++;
1880 continue;
1881 }
1882
1883 ack:
1884 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1885 dout(" skipping %p I_NOFLUSH set\n", inode);
1886 continue;
1887 }
1888
1889 if (session && session != cap->session) {
1890 dout("oops, wrong session %p mutex\n", session);
1891 mutex_unlock(&session->s_mutex);
1892 session = NULL;
1893 }
1894 if (!session) {
1895 session = cap->session;
1896 if (mutex_trylock(&session->s_mutex) == 0) {
1897 dout("inverting session/ino locks on %p\n",
1898 session);
1899 spin_unlock(&ci->i_ceph_lock);
1900 if (took_snap_rwsem) {
1901 up_read(&mdsc->snap_rwsem);
1902 took_snap_rwsem = 0;
1903 }
1904 mutex_lock(&session->s_mutex);
1905 goto retry;
1906 }
1907 }
1908
1909 /* kick flushing and flush snaps before sending normal
1910 * cap message */
1911 if (cap == ci->i_auth_cap &&
1912 (ci->i_ceph_flags &
1913 (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) {
1914 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
1915 __kick_flushing_caps(mdsc, session, ci, 0);
1916 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
1917 }
1918 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
1919 __ceph_flush_snaps(ci, session);
1920
1921 goto retry_locked;
1922 }
1923
1924 /* take snap_rwsem after session mutex */
1925 if (!took_snap_rwsem) {
1926 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1927 dout("inverting snap/in locks on %p\n",
1928 inode);
1929 spin_unlock(&ci->i_ceph_lock);
1930 down_read(&mdsc->snap_rwsem);
1931 took_snap_rwsem = 1;
1932 goto retry;
1933 }
1934 took_snap_rwsem = 1;
1935 }
1936
1937 if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
1938 flushing = __mark_caps_flushing(inode, session, false,
1939 &flush_tid,
1940 &oldest_flush_tid);
1941 } else {
1942 flushing = 0;
1943 flush_tid = 0;
1944 spin_lock(&mdsc->cap_dirty_lock);
1945 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1946 spin_unlock(&mdsc->cap_dirty_lock);
1947 }
1948
1949 mds = cap->mds; /* remember mds, so we don't repeat */
1950 sent++;
1951
1952 /* __send_cap drops i_ceph_lock */
1953 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, false,
1954 cap_used, want, retain, flushing,
1955 flush_tid, oldest_flush_tid);
1956 goto retry; /* retake i_ceph_lock and restart our cap scan. */
1957 }
1958
1959 /*
1960 * Reschedule delayed caps release if we delayed anything,
1961 * otherwise cancel.
1962 */
1963 if (delayed && is_delayed)
1964 force_requeue = true; /* __send_cap delayed release; requeue */
1965 if (!delayed && !is_delayed)
1966 __cap_delay_cancel(mdsc, ci);
1967 else if (!is_delayed || force_requeue)
1968 __cap_delay_requeue(mdsc, ci);
1969
1970 spin_unlock(&ci->i_ceph_lock);
1971
1972 if (queue_invalidate)
1973 ceph_queue_invalidate(inode);
1974
1975 if (session)
1976 mutex_unlock(&session->s_mutex);
1977 if (took_snap_rwsem)
1978 up_read(&mdsc->snap_rwsem);
1979 }
1980
1981 /*
1982 * Try to flush dirty caps back to the auth mds.
1983 */
1984 static int try_flush_caps(struct inode *inode, u64 *ptid)
1985 {
1986 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1987 struct ceph_inode_info *ci = ceph_inode(inode);
1988 struct ceph_mds_session *session = NULL;
1989 int flushing = 0;
1990 u64 flush_tid = 0, oldest_flush_tid = 0;
1991
1992 retry:
1993 spin_lock(&ci->i_ceph_lock);
1994 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1995 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1996 goto out;
1997 }
1998 if (ci->i_dirty_caps && ci->i_auth_cap) {
1999 struct ceph_cap *cap = ci->i_auth_cap;
2000 int used = __ceph_caps_used(ci);
2001 int want = __ceph_caps_wanted(ci);
2002 int delayed;
2003
2004 if (!session || session != cap->session) {
2005 spin_unlock(&ci->i_ceph_lock);
2006 if (session)
2007 mutex_unlock(&session->s_mutex);
2008 session = cap->session;
2009 mutex_lock(&session->s_mutex);
2010 goto retry;
2011 }
2012 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
2013 goto out;
2014
2015 flushing = __mark_caps_flushing(inode, session, true,
2016 &flush_tid, &oldest_flush_tid);
2017
2018 /* __send_cap drops i_ceph_lock */
2019 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, true,
2020 used, want, (cap->issued | cap->implemented),
2021 flushing, flush_tid, oldest_flush_tid);
2022
2023 if (delayed) {
2024 spin_lock(&ci->i_ceph_lock);
2025 __cap_delay_requeue(mdsc, ci);
2026 spin_unlock(&ci->i_ceph_lock);
2027 }
2028 } else {
2029 if (!list_empty(&ci->i_cap_flush_list)) {
2030 struct ceph_cap_flush *cf =
2031 list_last_entry(&ci->i_cap_flush_list,
2032 struct ceph_cap_flush, i_list);
2033 cf->wake = true;
2034 flush_tid = cf->tid;
2035 }
2036 flushing = ci->i_flushing_caps;
2037 spin_unlock(&ci->i_ceph_lock);
2038 }
2039 out:
2040 if (session)
2041 mutex_unlock(&session->s_mutex);
2042
2043 *ptid = flush_tid;
2044 return flushing;
2045 }
2046
2047 /*
2048 * Return true if we've flushed caps through the given flush_tid.
2049 */
2050 static int caps_are_flushed(struct inode *inode, u64 flush_tid)
2051 {
2052 struct ceph_inode_info *ci = ceph_inode(inode);
2053 int ret = 1;
2054
2055 spin_lock(&ci->i_ceph_lock);
2056 if (!list_empty(&ci->i_cap_flush_list)) {
2057 struct ceph_cap_flush * cf =
2058 list_first_entry(&ci->i_cap_flush_list,
2059 struct ceph_cap_flush, i_list);
2060 if (cf->tid <= flush_tid)
2061 ret = 0;
2062 }
2063 spin_unlock(&ci->i_ceph_lock);
2064 return ret;
2065 }
2066
2067 /*
2068 * wait for any unsafe requests to complete.
2069 */
2070 static int unsafe_request_wait(struct inode *inode)
2071 {
2072 struct ceph_inode_info *ci = ceph_inode(inode);
2073 struct ceph_mds_request *req1 = NULL, *req2 = NULL;
2074 int ret, err = 0;
2075
2076 spin_lock(&ci->i_unsafe_lock);
2077 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
2078 req1 = list_last_entry(&ci->i_unsafe_dirops,
2079 struct ceph_mds_request,
2080 r_unsafe_dir_item);
2081 ceph_mdsc_get_request(req1);
2082 }
2083 if (!list_empty(&ci->i_unsafe_iops)) {
2084 req2 = list_last_entry(&ci->i_unsafe_iops,
2085 struct ceph_mds_request,
2086 r_unsafe_target_item);
2087 ceph_mdsc_get_request(req2);
2088 }
2089 spin_unlock(&ci->i_unsafe_lock);
2090
2091 dout("unsafe_request_wait %p wait on tid %llu %llu\n",
2092 inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
2093 if (req1) {
2094 ret = !wait_for_completion_timeout(&req1->r_safe_completion,
2095 ceph_timeout_jiffies(req1->r_timeout));
2096 if (ret)
2097 err = -EIO;
2098 ceph_mdsc_put_request(req1);
2099 }
2100 if (req2) {
2101 ret = !wait_for_completion_timeout(&req2->r_safe_completion,
2102 ceph_timeout_jiffies(req2->r_timeout));
2103 if (ret)
2104 err = -EIO;
2105 ceph_mdsc_put_request(req2);
2106 }
2107 return err;
2108 }
2109
2110 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2111 {
2112 struct inode *inode = file->f_mapping->host;
2113 struct ceph_inode_info *ci = ceph_inode(inode);
2114 u64 flush_tid;
2115 int ret;
2116 int dirty;
2117
2118 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
2119
2120 ret = file_write_and_wait_range(file, start, end);
2121 if (ret < 0)
2122 goto out;
2123
2124 if (datasync)
2125 goto out;
2126
2127 inode_lock(inode);
2128
2129 dirty = try_flush_caps(inode, &flush_tid);
2130 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2131
2132 ret = unsafe_request_wait(inode);
2133
2134 /*
2135 * only wait on non-file metadata writeback (the mds
2136 * can recover size and mtime, so we don't need to
2137 * wait for that)
2138 */
2139 if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
2140 ret = wait_event_interruptible(ci->i_cap_wq,
2141 caps_are_flushed(inode, flush_tid));
2142 }
2143 inode_unlock(inode);
2144 out:
2145 dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
2146 return ret;
2147 }
2148
2149 /*
2150 * Flush any dirty caps back to the mds. If we aren't asked to wait,
2151 * queue inode for flush but don't do so immediately, because we can
2152 * get by with fewer MDS messages if we wait for data writeback to
2153 * complete first.
2154 */
2155 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
2156 {
2157 struct ceph_inode_info *ci = ceph_inode(inode);
2158 u64 flush_tid;
2159 int err = 0;
2160 int dirty;
2161 int wait = wbc->sync_mode == WB_SYNC_ALL;
2162
2163 dout("write_inode %p wait=%d\n", inode, wait);
2164 if (wait) {
2165 dirty = try_flush_caps(inode, &flush_tid);
2166 if (dirty)
2167 err = wait_event_interruptible(ci->i_cap_wq,
2168 caps_are_flushed(inode, flush_tid));
2169 } else {
2170 struct ceph_mds_client *mdsc =
2171 ceph_sb_to_client(inode->i_sb)->mdsc;
2172
2173 spin_lock(&ci->i_ceph_lock);
2174 if (__ceph_caps_dirty(ci))
2175 __cap_delay_requeue_front(mdsc, ci);
2176 spin_unlock(&ci->i_ceph_lock);
2177 }
2178 return err;
2179 }
2180
2181 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
2182 struct ceph_mds_session *session,
2183 struct ceph_inode_info *ci,
2184 u64 oldest_flush_tid)
2185 __releases(ci->i_ceph_lock)
2186 __acquires(ci->i_ceph_lock)
2187 {
2188 struct inode *inode = &ci->vfs_inode;
2189 struct ceph_cap *cap;
2190 struct ceph_cap_flush *cf;
2191 int ret;
2192 u64 first_tid = 0;
2193
2194 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
2195 if (cf->tid < first_tid)
2196 continue;
2197
2198 cap = ci->i_auth_cap;
2199 if (!(cap && cap->session == session)) {
2200 pr_err("%p auth cap %p not mds%d ???\n",
2201 inode, cap, session->s_mds);
2202 break;
2203 }
2204
2205 first_tid = cf->tid + 1;
2206
2207 if (cf->caps) {
2208 dout("kick_flushing_caps %p cap %p tid %llu %s\n",
2209 inode, cap, cf->tid, ceph_cap_string(cf->caps));
2210 ci->i_ceph_flags |= CEPH_I_NODELAY;
2211 ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2212 false, __ceph_caps_used(ci),
2213 __ceph_caps_wanted(ci),
2214 cap->issued | cap->implemented,
2215 cf->caps, cf->tid, oldest_flush_tid);
2216 if (ret) {
2217 pr_err("kick_flushing_caps: error sending "
2218 "cap flush, ino (%llx.%llx) "
2219 "tid %llu flushing %s\n",
2220 ceph_vinop(inode), cf->tid,
2221 ceph_cap_string(cf->caps));
2222 }
2223 } else {
2224 struct ceph_cap_snap *capsnap =
2225 container_of(cf, struct ceph_cap_snap,
2226 cap_flush);
2227 dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
2228 inode, capsnap, cf->tid,
2229 ceph_cap_string(capsnap->dirty));
2230
2231 refcount_inc(&capsnap->nref);
2232 spin_unlock(&ci->i_ceph_lock);
2233
2234 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
2235 oldest_flush_tid);
2236 if (ret < 0) {
2237 pr_err("kick_flushing_caps: error sending "
2238 "cap flushsnap, ino (%llx.%llx) "
2239 "tid %llu follows %llu\n",
2240 ceph_vinop(inode), cf->tid,
2241 capsnap->follows);
2242 }
2243
2244 ceph_put_cap_snap(capsnap);
2245 }
2246
2247 spin_lock(&ci->i_ceph_lock);
2248 }
2249 }
2250
2251 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2252 struct ceph_mds_session *session)
2253 {
2254 struct ceph_inode_info *ci;
2255 struct ceph_cap *cap;
2256 u64 oldest_flush_tid;
2257
2258 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2259
2260 spin_lock(&mdsc->cap_dirty_lock);
2261 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2262 spin_unlock(&mdsc->cap_dirty_lock);
2263
2264 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2265 spin_lock(&ci->i_ceph_lock);
2266 cap = ci->i_auth_cap;
2267 if (!(cap && cap->session == session)) {
2268 pr_err("%p auth cap %p not mds%d ???\n",
2269 &ci->vfs_inode, cap, session->s_mds);
2270 spin_unlock(&ci->i_ceph_lock);
2271 continue;
2272 }
2273
2274
2275 /*
2276 * if flushing caps were revoked, we re-send the cap flush
2277 * in client reconnect stage. This guarantees MDS * processes
2278 * the cap flush message before issuing the flushing caps to
2279 * other client.
2280 */
2281 if ((cap->issued & ci->i_flushing_caps) !=
2282 ci->i_flushing_caps) {
2283 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2284 __kick_flushing_caps(mdsc, session, ci,
2285 oldest_flush_tid);
2286 } else {
2287 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH;
2288 }
2289
2290 spin_unlock(&ci->i_ceph_lock);
2291 }
2292 }
2293
2294 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2295 struct ceph_mds_session *session)
2296 {
2297 struct ceph_inode_info *ci;
2298 struct ceph_cap *cap;
2299 u64 oldest_flush_tid;
2300
2301 dout("kick_flushing_caps mds%d\n", session->s_mds);
2302
2303 spin_lock(&mdsc->cap_dirty_lock);
2304 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2305 spin_unlock(&mdsc->cap_dirty_lock);
2306
2307 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2308 spin_lock(&ci->i_ceph_lock);
2309 cap = ci->i_auth_cap;
2310 if (!(cap && cap->session == session)) {
2311 pr_err("%p auth cap %p not mds%d ???\n",
2312 &ci->vfs_inode, cap, session->s_mds);
2313 spin_unlock(&ci->i_ceph_lock);
2314 continue;
2315 }
2316 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
2317 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2318 __kick_flushing_caps(mdsc, session, ci,
2319 oldest_flush_tid);
2320 }
2321 spin_unlock(&ci->i_ceph_lock);
2322 }
2323 }
2324
2325 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2326 struct ceph_mds_session *session,
2327 struct inode *inode)
2328 __releases(ci->i_ceph_lock)
2329 {
2330 struct ceph_inode_info *ci = ceph_inode(inode);
2331 struct ceph_cap *cap;
2332
2333 cap = ci->i_auth_cap;
2334 dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2335 ceph_cap_string(ci->i_flushing_caps));
2336
2337 if (!list_empty(&ci->i_cap_flush_list)) {
2338 u64 oldest_flush_tid;
2339 spin_lock(&mdsc->cap_dirty_lock);
2340 list_move_tail(&ci->i_flushing_item,
2341 &cap->session->s_cap_flushing);
2342 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2343 spin_unlock(&mdsc->cap_dirty_lock);
2344
2345 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2346 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid);
2347 spin_unlock(&ci->i_ceph_lock);
2348 } else {
2349 spin_unlock(&ci->i_ceph_lock);
2350 }
2351 }
2352
2353
2354 /*
2355 * Take references to capabilities we hold, so that we don't release
2356 * them to the MDS prematurely.
2357 *
2358 * Protected by i_ceph_lock.
2359 */
2360 static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2361 bool snap_rwsem_locked)
2362 {
2363 if (got & CEPH_CAP_PIN)
2364 ci->i_pin_ref++;
2365 if (got & CEPH_CAP_FILE_RD)
2366 ci->i_rd_ref++;
2367 if (got & CEPH_CAP_FILE_CACHE)
2368 ci->i_rdcache_ref++;
2369 if (got & CEPH_CAP_FILE_WR) {
2370 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2371 BUG_ON(!snap_rwsem_locked);
2372 ci->i_head_snapc = ceph_get_snap_context(
2373 ci->i_snap_realm->cached_context);
2374 }
2375 ci->i_wr_ref++;
2376 }
2377 if (got & CEPH_CAP_FILE_BUFFER) {
2378 if (ci->i_wb_ref == 0)
2379 ihold(&ci->vfs_inode);
2380 ci->i_wb_ref++;
2381 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2382 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2383 }
2384 }
2385
2386 /*
2387 * Try to grab cap references. Specify those refs we @want, and the
2388 * minimal set we @need. Also include the larger offset we are writing
2389 * to (when applicable), and check against max_size here as well.
2390 * Note that caller is responsible for ensuring max_size increases are
2391 * requested from the MDS.
2392 */
2393 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2394 loff_t endoff, bool nonblock, int *got, int *err)
2395 {
2396 struct inode *inode = &ci->vfs_inode;
2397 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2398 int ret = 0;
2399 int have, implemented;
2400 int file_wanted;
2401 bool snap_rwsem_locked = false;
2402
2403 dout("get_cap_refs %p need %s want %s\n", inode,
2404 ceph_cap_string(need), ceph_cap_string(want));
2405
2406 again:
2407 spin_lock(&ci->i_ceph_lock);
2408
2409 /* make sure file is actually open */
2410 file_wanted = __ceph_caps_file_wanted(ci);
2411 if ((file_wanted & need) != need) {
2412 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2413 ceph_cap_string(need), ceph_cap_string(file_wanted));
2414 *err = -EBADF;
2415 ret = 1;
2416 goto out_unlock;
2417 }
2418
2419 /* finish pending truncate */
2420 while (ci->i_truncate_pending) {
2421 spin_unlock(&ci->i_ceph_lock);
2422 if (snap_rwsem_locked) {
2423 up_read(&mdsc->snap_rwsem);
2424 snap_rwsem_locked = false;
2425 }
2426 __ceph_do_pending_vmtruncate(inode);
2427 spin_lock(&ci->i_ceph_lock);
2428 }
2429
2430 have = __ceph_caps_issued(ci, &implemented);
2431
2432 if (have & need & CEPH_CAP_FILE_WR) {
2433 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2434 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2435 inode, endoff, ci->i_max_size);
2436 if (endoff > ci->i_requested_max_size) {
2437 *err = -EAGAIN;
2438 ret = 1;
2439 }
2440 goto out_unlock;
2441 }
2442 /*
2443 * If a sync write is in progress, we must wait, so that we
2444 * can get a final snapshot value for size+mtime.
2445 */
2446 if (__ceph_have_pending_cap_snap(ci)) {
2447 dout("get_cap_refs %p cap_snap_pending\n", inode);
2448 goto out_unlock;
2449 }
2450 }
2451
2452 if ((have & need) == need) {
2453 /*
2454 * Look at (implemented & ~have & not) so that we keep waiting
2455 * on transition from wanted -> needed caps. This is needed
2456 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2457 * going before a prior buffered writeback happens.
2458 */
2459 int not = want & ~(have & need);
2460 int revoking = implemented & ~have;
2461 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2462 inode, ceph_cap_string(have), ceph_cap_string(not),
2463 ceph_cap_string(revoking));
2464 if ((revoking & not) == 0) {
2465 if (!snap_rwsem_locked &&
2466 !ci->i_head_snapc &&
2467 (need & CEPH_CAP_FILE_WR)) {
2468 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2469 /*
2470 * we can not call down_read() when
2471 * task isn't in TASK_RUNNING state
2472 */
2473 if (nonblock) {
2474 *err = -EAGAIN;
2475 ret = 1;
2476 goto out_unlock;
2477 }
2478
2479 spin_unlock(&ci->i_ceph_lock);
2480 down_read(&mdsc->snap_rwsem);
2481 snap_rwsem_locked = true;
2482 goto again;
2483 }
2484 snap_rwsem_locked = true;
2485 }
2486 *got = need | (have & want);
2487 if ((need & CEPH_CAP_FILE_RD) &&
2488 !(*got & CEPH_CAP_FILE_CACHE))
2489 ceph_disable_fscache_readpage(ci);
2490 __take_cap_refs(ci, *got, true);
2491 ret = 1;
2492 }
2493 } else {
2494 int session_readonly = false;
2495 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2496 struct ceph_mds_session *s = ci->i_auth_cap->session;
2497 spin_lock(&s->s_cap_lock);
2498 session_readonly = s->s_readonly;
2499 spin_unlock(&s->s_cap_lock);
2500 }
2501 if (session_readonly) {
2502 dout("get_cap_refs %p needed %s but mds%d readonly\n",
2503 inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2504 *err = -EROFS;
2505 ret = 1;
2506 goto out_unlock;
2507 }
2508
2509 if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) {
2510 int mds_wanted;
2511 if (READ_ONCE(mdsc->fsc->mount_state) ==
2512 CEPH_MOUNT_SHUTDOWN) {
2513 dout("get_cap_refs %p forced umount\n", inode);
2514 *err = -EIO;
2515 ret = 1;
2516 goto out_unlock;
2517 }
2518 mds_wanted = __ceph_caps_mds_wanted(ci, false);
2519 if (need & ~(mds_wanted & need)) {
2520 dout("get_cap_refs %p caps were dropped"
2521 " (session killed?)\n", inode);
2522 *err = -ESTALE;
2523 ret = 1;
2524 goto out_unlock;
2525 }
2526 if (!(file_wanted & ~mds_wanted))
2527 ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED;
2528 }
2529
2530 dout("get_cap_refs %p have %s needed %s\n", inode,
2531 ceph_cap_string(have), ceph_cap_string(need));
2532 }
2533 out_unlock:
2534 spin_unlock(&ci->i_ceph_lock);
2535 if (snap_rwsem_locked)
2536 up_read(&mdsc->snap_rwsem);
2537
2538 dout("get_cap_refs %p ret %d got %s\n", inode,
2539 ret, ceph_cap_string(*got));
2540 return ret;
2541 }
2542
2543 /*
2544 * Check the offset we are writing up to against our current
2545 * max_size. If necessary, tell the MDS we want to write to
2546 * a larger offset.
2547 */
2548 static void check_max_size(struct inode *inode, loff_t endoff)
2549 {
2550 struct ceph_inode_info *ci = ceph_inode(inode);
2551 int check = 0;
2552
2553 /* do we need to explicitly request a larger max_size? */
2554 spin_lock(&ci->i_ceph_lock);
2555 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2556 dout("write %p at large endoff %llu, req max_size\n",
2557 inode, endoff);
2558 ci->i_wanted_max_size = endoff;
2559 }
2560 /* duplicate ceph_check_caps()'s logic */
2561 if (ci->i_auth_cap &&
2562 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2563 ci->i_wanted_max_size > ci->i_max_size &&
2564 ci->i_wanted_max_size > ci->i_requested_max_size)
2565 check = 1;
2566 spin_unlock(&ci->i_ceph_lock);
2567 if (check)
2568 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2569 }
2570
2571 int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
2572 {
2573 int ret, err = 0;
2574
2575 BUG_ON(need & ~CEPH_CAP_FILE_RD);
2576 BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
2577 ret = ceph_pool_perm_check(ci, need);
2578 if (ret < 0)
2579 return ret;
2580
2581 ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
2582 if (ret) {
2583 if (err == -EAGAIN) {
2584 ret = 0;
2585 } else if (err < 0) {
2586 ret = err;
2587 }
2588 }
2589 return ret;
2590 }
2591
2592 /*
2593 * Wait for caps, and take cap references. If we can't get a WR cap
2594 * due to a small max_size, make sure we check_max_size (and possibly
2595 * ask the mds) so we don't get hung up indefinitely.
2596 */
2597 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2598 loff_t endoff, int *got, struct page **pinned_page)
2599 {
2600 int _got, ret, err = 0;
2601
2602 ret = ceph_pool_perm_check(ci, need);
2603 if (ret < 0)
2604 return ret;
2605
2606 while (true) {
2607 if (endoff > 0)
2608 check_max_size(&ci->vfs_inode, endoff);
2609
2610 err = 0;
2611 _got = 0;
2612 ret = try_get_cap_refs(ci, need, want, endoff,
2613 false, &_got, &err);
2614 if (ret) {
2615 if (err == -EAGAIN)
2616 continue;
2617 if (err < 0)
2618 ret = err;
2619 } else {
2620 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2621 add_wait_queue(&ci->i_cap_wq, &wait);
2622
2623 while (!try_get_cap_refs(ci, need, want, endoff,
2624 true, &_got, &err)) {
2625 if (signal_pending(current)) {
2626 ret = -ERESTARTSYS;
2627 break;
2628 }
2629 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2630 }
2631
2632 remove_wait_queue(&ci->i_cap_wq, &wait);
2633
2634 if (err == -EAGAIN)
2635 continue;
2636 if (err < 0)
2637 ret = err;
2638 }
2639 if (ret < 0) {
2640 if (err == -ESTALE) {
2641 /* session was killed, try renew caps */
2642 ret = ceph_renew_caps(&ci->vfs_inode);
2643 if (ret == 0)
2644 continue;
2645 }
2646 return ret;
2647 }
2648
2649 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2650 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2651 i_size_read(&ci->vfs_inode) > 0) {
2652 struct page *page =
2653 find_get_page(ci->vfs_inode.i_mapping, 0);
2654 if (page) {
2655 if (PageUptodate(page)) {
2656 *pinned_page = page;
2657 break;
2658 }
2659 put_page(page);
2660 }
2661 /*
2662 * drop cap refs first because getattr while
2663 * holding * caps refs can cause deadlock.
2664 */
2665 ceph_put_cap_refs(ci, _got);
2666 _got = 0;
2667
2668 /*
2669 * getattr request will bring inline data into
2670 * page cache
2671 */
2672 ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2673 CEPH_STAT_CAP_INLINE_DATA,
2674 true);
2675 if (ret < 0)
2676 return ret;
2677 continue;
2678 }
2679 break;
2680 }
2681
2682 if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
2683 ceph_fscache_revalidate_cookie(ci);
2684
2685 *got = _got;
2686 return 0;
2687 }
2688
2689 /*
2690 * Take cap refs. Caller must already know we hold at least one ref
2691 * on the caps in question or we don't know this is safe.
2692 */
2693 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2694 {
2695 spin_lock(&ci->i_ceph_lock);
2696 __take_cap_refs(ci, caps, false);
2697 spin_unlock(&ci->i_ceph_lock);
2698 }
2699
2700
2701 /*
2702 * drop cap_snap that is not associated with any snapshot.
2703 * we don't need to send FLUSHSNAP message for it.
2704 */
2705 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
2706 struct ceph_cap_snap *capsnap)
2707 {
2708 if (!capsnap->need_flush &&
2709 !capsnap->writing && !capsnap->dirty_pages) {
2710 dout("dropping cap_snap %p follows %llu\n",
2711 capsnap, capsnap->follows);
2712 BUG_ON(capsnap->cap_flush.tid > 0);
2713 ceph_put_snap_context(capsnap->context);
2714 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
2715 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2716
2717 list_del(&capsnap->ci_item);
2718 ceph_put_cap_snap(capsnap);
2719 return 1;
2720 }
2721 return 0;
2722 }
2723
2724 /*
2725 * Release cap refs.
2726 *
2727 * If we released the last ref on any given cap, call ceph_check_caps
2728 * to release (or schedule a release).
2729 *
2730 * If we are releasing a WR cap (from a sync write), finalize any affected
2731 * cap_snap, and wake up any waiters.
2732 */
2733 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2734 {
2735 struct inode *inode = &ci->vfs_inode;
2736 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2737
2738 spin_lock(&ci->i_ceph_lock);
2739 if (had & CEPH_CAP_PIN)
2740 --ci->i_pin_ref;
2741 if (had & CEPH_CAP_FILE_RD)
2742 if (--ci->i_rd_ref == 0)
2743 last++;
2744 if (had & CEPH_CAP_FILE_CACHE)
2745 if (--ci->i_rdcache_ref == 0)
2746 last++;
2747 if (had & CEPH_CAP_FILE_BUFFER) {
2748 if (--ci->i_wb_ref == 0) {
2749 last++;
2750 put++;
2751 }
2752 dout("put_cap_refs %p wb %d -> %d (?)\n",
2753 inode, ci->i_wb_ref+1, ci->i_wb_ref);
2754 }
2755 if (had & CEPH_CAP_FILE_WR)
2756 if (--ci->i_wr_ref == 0) {
2757 last++;
2758 if (__ceph_have_pending_cap_snap(ci)) {
2759 struct ceph_cap_snap *capsnap =
2760 list_last_entry(&ci->i_cap_snaps,
2761 struct ceph_cap_snap,
2762 ci_item);
2763 capsnap->writing = 0;
2764 if (ceph_try_drop_cap_snap(ci, capsnap))
2765 put++;
2766 else if (__ceph_finish_cap_snap(ci, capsnap))
2767 flushsnaps = 1;
2768 wake = 1;
2769 }
2770 if (ci->i_wrbuffer_ref_head == 0 &&
2771 ci->i_dirty_caps == 0 &&
2772 ci->i_flushing_caps == 0) {
2773 BUG_ON(!ci->i_head_snapc);
2774 ceph_put_snap_context(ci->i_head_snapc);
2775 ci->i_head_snapc = NULL;
2776 }
2777 /* see comment in __ceph_remove_cap() */
2778 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2779 drop_inode_snap_realm(ci);
2780 }
2781 spin_unlock(&ci->i_ceph_lock);
2782
2783 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2784 last ? " last" : "", put ? " put" : "");
2785
2786 if (last && !flushsnaps)
2787 ceph_check_caps(ci, 0, NULL);
2788 else if (flushsnaps)
2789 ceph_flush_snaps(ci, NULL);
2790 if (wake)
2791 wake_up_all(&ci->i_cap_wq);
2792 while (put-- > 0)
2793 iput(inode);
2794 }
2795
2796 /*
2797 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2798 * context. Adjust per-snap dirty page accounting as appropriate.
2799 * Once all dirty data for a cap_snap is flushed, flush snapped file
2800 * metadata back to the MDS. If we dropped the last ref, call
2801 * ceph_check_caps.
2802 */
2803 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2804 struct ceph_snap_context *snapc)
2805 {
2806 struct inode *inode = &ci->vfs_inode;
2807 struct ceph_cap_snap *capsnap = NULL;
2808 int put = 0;
2809 bool last = false;
2810 bool found = false;
2811 bool flush_snaps = false;
2812 bool complete_capsnap = false;
2813
2814 spin_lock(&ci->i_ceph_lock);
2815 ci->i_wrbuffer_ref -= nr;
2816 if (ci->i_wrbuffer_ref == 0) {
2817 last = true;
2818 put++;
2819 }
2820
2821 if (ci->i_head_snapc == snapc) {
2822 ci->i_wrbuffer_ref_head -= nr;
2823 if (ci->i_wrbuffer_ref_head == 0 &&
2824 ci->i_wr_ref == 0 &&
2825 ci->i_dirty_caps == 0 &&
2826 ci->i_flushing_caps == 0) {
2827 BUG_ON(!ci->i_head_snapc);
2828 ceph_put_snap_context(ci->i_head_snapc);
2829 ci->i_head_snapc = NULL;
2830 }
2831 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2832 inode,
2833 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2834 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2835 last ? " LAST" : "");
2836 } else {
2837 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2838 if (capsnap->context == snapc) {
2839 found = true;
2840 break;
2841 }
2842 }
2843 BUG_ON(!found);
2844 capsnap->dirty_pages -= nr;
2845 if (capsnap->dirty_pages == 0) {
2846 complete_capsnap = true;
2847 if (!capsnap->writing) {
2848 if (ceph_try_drop_cap_snap(ci, capsnap)) {
2849 put++;
2850 } else {
2851 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2852 flush_snaps = true;
2853 }
2854 }
2855 }
2856 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2857 " snap %lld %d/%d -> %d/%d %s%s\n",
2858 inode, capsnap, capsnap->context->seq,
2859 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2860 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2861 last ? " (wrbuffer last)" : "",
2862 complete_capsnap ? " (complete capsnap)" : "");
2863 }
2864
2865 spin_unlock(&ci->i_ceph_lock);
2866
2867 if (last) {
2868 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2869 } else if (flush_snaps) {
2870 ceph_flush_snaps(ci, NULL);
2871 }
2872 if (complete_capsnap)
2873 wake_up_all(&ci->i_cap_wq);
2874 while (put-- > 0)
2875 iput(inode);
2876 }
2877
2878 /*
2879 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2880 */
2881 static void invalidate_aliases(struct inode *inode)
2882 {
2883 struct dentry *dn, *prev = NULL;
2884
2885 dout("invalidate_aliases inode %p\n", inode);
2886 d_prune_aliases(inode);
2887 /*
2888 * For non-directory inode, d_find_alias() only returns
2889 * hashed dentry. After calling d_invalidate(), the
2890 * dentry becomes unhashed.
2891 *
2892 * For directory inode, d_find_alias() can return
2893 * unhashed dentry. But directory inode should have
2894 * one alias at most.
2895 */
2896 while ((dn = d_find_alias(inode))) {
2897 if (dn == prev) {
2898 dput(dn);
2899 break;
2900 }
2901 d_invalidate(dn);
2902 if (prev)
2903 dput(prev);
2904 prev = dn;
2905 }
2906 if (prev)
2907 dput(prev);
2908 }
2909
2910 /*
2911 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2912 * actually be a revocation if it specifies a smaller cap set.)
2913 *
2914 * caller holds s_mutex and i_ceph_lock, we drop both.
2915 */
2916 static void handle_cap_grant(struct ceph_mds_client *mdsc,
2917 struct inode *inode, struct ceph_mds_caps *grant,
2918 struct ceph_string **pns, u64 inline_version,
2919 void *inline_data, u32 inline_len,
2920 struct ceph_buffer *xattr_buf,
2921 struct ceph_mds_session *session,
2922 struct ceph_cap *cap, int issued)
2923 __releases(ci->i_ceph_lock)
2924 __releases(mdsc->snap_rwsem)
2925 {
2926 struct ceph_inode_info *ci = ceph_inode(inode);
2927 int mds = session->s_mds;
2928 int seq = le32_to_cpu(grant->seq);
2929 int newcaps = le32_to_cpu(grant->caps);
2930 int used, wanted, dirty;
2931 u64 size = le64_to_cpu(grant->size);
2932 u64 max_size = le64_to_cpu(grant->max_size);
2933 struct timespec mtime, atime, ctime;
2934 int check_caps = 0;
2935 bool wake = false;
2936 bool writeback = false;
2937 bool queue_trunc = false;
2938 bool queue_invalidate = false;
2939 bool deleted_inode = false;
2940 bool fill_inline = false;
2941
2942 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2943 inode, cap, mds, seq, ceph_cap_string(newcaps));
2944 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2945 inode->i_size);
2946
2947
2948 /*
2949 * auth mds of the inode changed. we received the cap export message,
2950 * but still haven't received the cap import message. handle_cap_export
2951 * updated the new auth MDS' cap.
2952 *
2953 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2954 * that was sent before the cap import message. So don't remove caps.
2955 */
2956 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2957 WARN_ON(cap != ci->i_auth_cap);
2958 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2959 seq = cap->seq;
2960 newcaps |= cap->issued;
2961 }
2962
2963 /*
2964 * If CACHE is being revoked, and we have no dirty buffers,
2965 * try to invalidate (once). (If there are dirty buffers, we
2966 * will invalidate _after_ writeback.)
2967 */
2968 if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2969 ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2970 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2971 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
2972 if (try_nonblocking_invalidate(inode)) {
2973 /* there were locked pages.. invalidate later
2974 in a separate thread. */
2975 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2976 queue_invalidate = true;
2977 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2978 }
2979 }
2980 }
2981
2982 /* side effects now are allowed */
2983 cap->cap_gen = session->s_cap_gen;
2984 cap->seq = seq;
2985
2986 __check_cap_issue(ci, cap, newcaps);
2987
2988 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2989 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2990 inode->i_mode = le32_to_cpu(grant->mode);
2991 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2992 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2993 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2994 from_kuid(&init_user_ns, inode->i_uid),
2995 from_kgid(&init_user_ns, inode->i_gid));
2996 }
2997
2998 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2999 (issued & CEPH_CAP_LINK_EXCL) == 0) {
3000 set_nlink(inode, le32_to_cpu(grant->nlink));
3001 if (inode->i_nlink == 0 &&
3002 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
3003 deleted_inode = true;
3004 }
3005
3006 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
3007 int len = le32_to_cpu(grant->xattr_len);
3008 u64 version = le64_to_cpu(grant->xattr_version);
3009
3010 if (version > ci->i_xattrs.version) {
3011 dout(" got new xattrs v%llu on %p len %d\n",
3012 version, inode, len);
3013 if (ci->i_xattrs.blob)
3014 ceph_buffer_put(ci->i_xattrs.blob);
3015 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
3016 ci->i_xattrs.version = version;
3017 ceph_forget_all_cached_acls(inode);
3018 }
3019 }
3020
3021 if (newcaps & CEPH_CAP_ANY_RD) {
3022 /* ctime/mtime/atime? */
3023 ceph_decode_timespec(&mtime, &grant->mtime);
3024 ceph_decode_timespec(&atime, &grant->atime);
3025 ceph_decode_timespec(&ctime, &grant->ctime);
3026 ceph_fill_file_time(inode, issued,
3027 le32_to_cpu(grant->time_warp_seq),
3028 &ctime, &mtime, &atime);
3029 }
3030
3031 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
3032 /* file layout may have changed */
3033 s64 old_pool = ci->i_layout.pool_id;
3034 struct ceph_string *old_ns;
3035
3036 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout);
3037 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
3038 lockdep_is_held(&ci->i_ceph_lock));
3039 rcu_assign_pointer(ci->i_layout.pool_ns, *pns);
3040
3041 if (ci->i_layout.pool_id != old_pool || *pns != old_ns)
3042 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
3043
3044 *pns = old_ns;
3045
3046 /* size/truncate_seq? */
3047 queue_trunc = ceph_fill_file_size(inode, issued,
3048 le32_to_cpu(grant->truncate_seq),
3049 le64_to_cpu(grant->truncate_size),
3050 size);
3051 }
3052
3053 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) {
3054 if (max_size != ci->i_max_size) {
3055 dout("max_size %lld -> %llu\n",
3056 ci->i_max_size, max_size);
3057 ci->i_max_size = max_size;
3058 if (max_size >= ci->i_wanted_max_size) {
3059 ci->i_wanted_max_size = 0; /* reset */
3060 ci->i_requested_max_size = 0;
3061 }
3062 wake = true;
3063 } else if (ci->i_wanted_max_size > ci->i_max_size &&
3064 ci->i_wanted_max_size > ci->i_requested_max_size) {
3065 /* CEPH_CAP_OP_IMPORT */
3066 wake = true;
3067 }
3068 }
3069
3070 /* check cap bits */
3071 wanted = __ceph_caps_wanted(ci);
3072 used = __ceph_caps_used(ci);
3073 dirty = __ceph_caps_dirty(ci);
3074 dout(" my wanted = %s, used = %s, dirty %s\n",
3075 ceph_cap_string(wanted),
3076 ceph_cap_string(used),
3077 ceph_cap_string(dirty));
3078 if (wanted != le32_to_cpu(grant->wanted)) {
3079 dout("mds wanted %s -> %s\n",
3080 ceph_cap_string(le32_to_cpu(grant->wanted)),
3081 ceph_cap_string(wanted));
3082 /* imported cap may not have correct mds_wanted */
3083 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
3084 check_caps = 1;
3085 }
3086
3087 /* revocation, grant, or no-op? */
3088 if (cap->issued & ~newcaps) {
3089 int revoking = cap->issued & ~newcaps;
3090
3091 dout("revocation: %s -> %s (revoking %s)\n",
3092 ceph_cap_string(cap->issued),
3093 ceph_cap_string(newcaps),
3094 ceph_cap_string(revoking));
3095 if (revoking & used & CEPH_CAP_FILE_BUFFER)
3096 writeback = true; /* initiate writeback; will delay ack */
3097 else if (revoking == CEPH_CAP_FILE_CACHE &&
3098 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
3099 queue_invalidate)
3100 ; /* do nothing yet, invalidation will be queued */
3101 else if (cap == ci->i_auth_cap)
3102 check_caps = 1; /* check auth cap only */
3103 else
3104 check_caps = 2; /* check all caps */
3105 cap->issued = newcaps;
3106 cap->implemented |= newcaps;
3107 } else if (cap->issued == newcaps) {
3108 dout("caps unchanged: %s -> %s\n",
3109 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
3110 } else {
3111 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
3112 ceph_cap_string(newcaps));
3113 /* non-auth MDS is revoking the newly grant caps ? */
3114 if (cap == ci->i_auth_cap &&
3115 __ceph_caps_revoking_other(ci, cap, newcaps))
3116 check_caps = 2;
3117
3118 cap->issued = newcaps;
3119 cap->implemented |= newcaps; /* add bits only, to
3120 * avoid stepping on a
3121 * pending revocation */
3122 wake = true;
3123 }
3124 BUG_ON(cap->issued & ~cap->implemented);
3125
3126 if (inline_version > 0 && inline_version >= ci->i_inline_version) {
3127 ci->i_inline_version = inline_version;
3128 if (ci->i_inline_version != CEPH_INLINE_NONE &&
3129 (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
3130 fill_inline = true;
3131 }
3132
3133 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
3134 if (newcaps & ~issued)
3135 wake = true;
3136 kick_flushing_inode_caps(mdsc, session, inode);
3137 up_read(&mdsc->snap_rwsem);
3138 } else {
3139 spin_unlock(&ci->i_ceph_lock);
3140 }
3141
3142 if (fill_inline)
3143 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
3144
3145 if (queue_trunc)
3146 ceph_queue_vmtruncate(inode);
3147
3148 if (writeback)
3149 /*
3150 * queue inode for writeback: we can't actually call
3151 * filemap_write_and_wait, etc. from message handler
3152 * context.
3153 */
3154 ceph_queue_writeback(inode);
3155 if (queue_invalidate)
3156 ceph_queue_invalidate(inode);
3157 if (deleted_inode)
3158 invalidate_aliases(inode);
3159 if (wake)
3160 wake_up_all(&ci->i_cap_wq);
3161
3162 if (check_caps == 1)
3163 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
3164 session);
3165 else if (check_caps == 2)
3166 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
3167 else
3168 mutex_unlock(&session->s_mutex);
3169 }
3170
3171 /*
3172 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3173 * MDS has been safely committed.
3174 */
3175 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
3176 struct ceph_mds_caps *m,
3177 struct ceph_mds_session *session,
3178 struct ceph_cap *cap)
3179 __releases(ci->i_ceph_lock)
3180 {
3181 struct ceph_inode_info *ci = ceph_inode(inode);
3182 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3183 struct ceph_cap_flush *cf, *tmp_cf;
3184 LIST_HEAD(to_remove);
3185 unsigned seq = le32_to_cpu(m->seq);
3186 int dirty = le32_to_cpu(m->dirty);
3187 int cleaned = 0;
3188 bool drop = false;
3189 bool wake_ci = 0;
3190 bool wake_mdsc = 0;
3191
3192 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
3193 if (cf->tid == flush_tid)
3194 cleaned = cf->caps;
3195 if (cf->caps == 0) /* capsnap */
3196 continue;
3197 if (cf->tid <= flush_tid) {
3198 if (__finish_cap_flush(NULL, ci, cf))
3199 wake_ci = true;
3200 list_add_tail(&cf->i_list, &to_remove);
3201 } else {
3202 cleaned &= ~cf->caps;
3203 if (!cleaned)
3204 break;
3205 }
3206 }
3207
3208 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3209 " flushing %s -> %s\n",
3210 inode, session->s_mds, seq, ceph_cap_string(dirty),
3211 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3212 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3213
3214 if (list_empty(&to_remove) && !cleaned)
3215 goto out;
3216
3217 ci->i_flushing_caps &= ~cleaned;
3218
3219 spin_lock(&mdsc->cap_dirty_lock);
3220
3221 list_for_each_entry(cf, &to_remove, i_list) {
3222 if (__finish_cap_flush(mdsc, NULL, cf))
3223 wake_mdsc = true;
3224 }
3225
3226 if (ci->i_flushing_caps == 0) {
3227 if (list_empty(&ci->i_cap_flush_list)) {
3228 list_del_init(&ci->i_flushing_item);
3229 if (!list_empty(&session->s_cap_flushing)) {
3230 dout(" mds%d still flushing cap on %p\n",
3231 session->s_mds,
3232 &list_first_entry(&session->s_cap_flushing,
3233 struct ceph_inode_info,
3234 i_flushing_item)->vfs_inode);
3235 }
3236 }
3237 mdsc->num_cap_flushing--;
3238 dout(" inode %p now !flushing\n", inode);
3239
3240 if (ci->i_dirty_caps == 0) {
3241 dout(" inode %p now clean\n", inode);
3242 BUG_ON(!list_empty(&ci->i_dirty_item));
3243 drop = true;
3244 if (ci->i_wr_ref == 0 &&
3245 ci->i_wrbuffer_ref_head == 0) {
3246 BUG_ON(!ci->i_head_snapc);
3247 ceph_put_snap_context(ci->i_head_snapc);
3248 ci->i_head_snapc = NULL;
3249 }
3250 } else {
3251 BUG_ON(list_empty(&ci->i_dirty_item));
3252 }
3253 }
3254 spin_unlock(&mdsc->cap_dirty_lock);
3255
3256 out:
3257 spin_unlock(&ci->i_ceph_lock);
3258
3259 while (!list_empty(&to_remove)) {
3260 cf = list_first_entry(&to_remove,
3261 struct ceph_cap_flush, i_list);
3262 list_del(&cf->i_list);
3263 ceph_free_cap_flush(cf);
3264 }
3265
3266 if (wake_ci)
3267 wake_up_all(&ci->i_cap_wq);
3268 if (wake_mdsc)
3269 wake_up_all(&mdsc->cap_flushing_wq);
3270 if (drop)
3271 iput(inode);
3272 }
3273
3274 /*
3275 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
3276 * throw away our cap_snap.
3277 *
3278 * Caller hold s_mutex.
3279 */
3280 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
3281 struct ceph_mds_caps *m,
3282 struct ceph_mds_session *session)
3283 {
3284 struct ceph_inode_info *ci = ceph_inode(inode);
3285 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3286 u64 follows = le64_to_cpu(m->snap_follows);
3287 struct ceph_cap_snap *capsnap;
3288 bool flushed = false;
3289 bool wake_ci = false;
3290 bool wake_mdsc = false;
3291
3292 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3293 inode, ci, session->s_mds, follows);
3294
3295 spin_lock(&ci->i_ceph_lock);
3296 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3297 if (capsnap->follows == follows) {
3298 if (capsnap->cap_flush.tid != flush_tid) {
3299 dout(" cap_snap %p follows %lld tid %lld !="
3300 " %lld\n", capsnap, follows,
3301 flush_tid, capsnap->cap_flush.tid);
3302 break;
3303 }
3304 flushed = true;
3305 break;
3306 } else {
3307 dout(" skipping cap_snap %p follows %lld\n",
3308 capsnap, capsnap->follows);
3309 }
3310 }
3311 if (flushed) {
3312 WARN_ON(capsnap->dirty_pages || capsnap->writing);
3313 dout(" removing %p cap_snap %p follows %lld\n",
3314 inode, capsnap, follows);
3315 list_del(&capsnap->ci_item);
3316 if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
3317 wake_ci = true;
3318
3319 spin_lock(&mdsc->cap_dirty_lock);
3320
3321 if (list_empty(&ci->i_cap_flush_list))
3322 list_del_init(&ci->i_flushing_item);
3323
3324 if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
3325 wake_mdsc = true;
3326
3327 spin_unlock(&mdsc->cap_dirty_lock);
3328 }
3329 spin_unlock(&ci->i_ceph_lock);
3330 if (flushed) {
3331 ceph_put_snap_context(capsnap->context);
3332 ceph_put_cap_snap(capsnap);
3333 if (wake_ci)
3334 wake_up_all(&ci->i_cap_wq);
3335 if (wake_mdsc)
3336 wake_up_all(&mdsc->cap_flushing_wq);
3337 iput(inode);
3338 }
3339 }
3340
3341 /*
3342 * Handle TRUNC from MDS, indicating file truncation.
3343 *
3344 * caller hold s_mutex.
3345 */
3346 static void handle_cap_trunc(struct inode *inode,
3347 struct ceph_mds_caps *trunc,
3348 struct ceph_mds_session *session)
3349 __releases(ci->i_ceph_lock)
3350 {
3351 struct ceph_inode_info *ci = ceph_inode(inode);
3352 int mds = session->s_mds;
3353 int seq = le32_to_cpu(trunc->seq);
3354 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3355 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3356 u64 size = le64_to_cpu(trunc->size);
3357 int implemented = 0;
3358 int dirty = __ceph_caps_dirty(ci);
3359 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3360 int queue_trunc = 0;
3361
3362 issued |= implemented | dirty;
3363
3364 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3365 inode, mds, seq, truncate_size, truncate_seq);
3366 queue_trunc = ceph_fill_file_size(inode, issued,
3367 truncate_seq, truncate_size, size);
3368 spin_unlock(&ci->i_ceph_lock);
3369
3370 if (queue_trunc)
3371 ceph_queue_vmtruncate(inode);
3372 }
3373
3374 /*
3375 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
3376 * different one. If we are the most recent migration we've seen (as
3377 * indicated by mseq), make note of the migrating cap bits for the
3378 * duration (until we see the corresponding IMPORT).
3379 *
3380 * caller holds s_mutex
3381 */
3382 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
3383 struct ceph_mds_cap_peer *ph,
3384 struct ceph_mds_session *session)
3385 {
3386 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
3387 struct ceph_mds_session *tsession = NULL;
3388 struct ceph_cap *cap, *tcap, *new_cap = NULL;
3389 struct ceph_inode_info *ci = ceph_inode(inode);
3390 u64 t_cap_id;
3391 unsigned mseq = le32_to_cpu(ex->migrate_seq);
3392 unsigned t_seq, t_mseq;
3393 int target, issued;
3394 int mds = session->s_mds;
3395
3396 if (ph) {
3397 t_cap_id = le64_to_cpu(ph->cap_id);
3398 t_seq = le32_to_cpu(ph->seq);
3399 t_mseq = le32_to_cpu(ph->mseq);
3400 target = le32_to_cpu(ph->mds);
3401 } else {
3402 t_cap_id = t_seq = t_mseq = 0;
3403 target = -1;
3404 }
3405
3406 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3407 inode, ci, mds, mseq, target);
3408 retry:
3409 spin_lock(&ci->i_ceph_lock);
3410 cap = __get_cap_for_mds(ci, mds);
3411 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
3412 goto out_unlock;
3413
3414 if (target < 0) {
3415 __ceph_remove_cap(cap, false);
3416 if (!ci->i_auth_cap)
3417 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
3418 goto out_unlock;
3419 }
3420
3421 /*
3422 * now we know we haven't received the cap import message yet
3423 * because the exported cap still exist.
3424 */
3425
3426 issued = cap->issued;
3427 WARN_ON(issued != cap->implemented);
3428
3429 tcap = __get_cap_for_mds(ci, target);
3430 if (tcap) {
3431 /* already have caps from the target */
3432 if (tcap->cap_id == t_cap_id &&
3433 ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3434 dout(" updating import cap %p mds%d\n", tcap, target);
3435 tcap->cap_id = t_cap_id;
3436 tcap->seq = t_seq - 1;
3437 tcap->issue_seq = t_seq - 1;
3438 tcap->mseq = t_mseq;
3439 tcap->issued |= issued;
3440 tcap->implemented |= issued;
3441 if (cap == ci->i_auth_cap)
3442 ci->i_auth_cap = tcap;
3443
3444 if (!list_empty(&ci->i_cap_flush_list) &&
3445 ci->i_auth_cap == tcap) {
3446 spin_lock(&mdsc->cap_dirty_lock);
3447 list_move_tail(&ci->i_flushing_item,
3448 &tcap->session->s_cap_flushing);
3449 spin_unlock(&mdsc->cap_dirty_lock);
3450 }
3451 }
3452 __ceph_remove_cap(cap, false);
3453 goto out_unlock;
3454 } else if (tsession) {
3455 /* add placeholder for the export tagert */
3456 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
3457 tcap = new_cap;
3458 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
3459 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3460
3461 if (!list_empty(&ci->i_cap_flush_list) &&
3462 ci->i_auth_cap == tcap) {
3463 spin_lock(&mdsc->cap_dirty_lock);
3464 list_move_tail(&ci->i_flushing_item,
3465 &tcap->session->s_cap_flushing);
3466 spin_unlock(&mdsc->cap_dirty_lock);
3467 }
3468
3469 __ceph_remove_cap(cap, false);
3470 goto out_unlock;
3471 }
3472
3473 spin_unlock(&ci->i_ceph_lock);
3474 mutex_unlock(&session->s_mutex);
3475
3476 /* open target session */
3477 tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3478 if (!IS_ERR(tsession)) {
3479 if (mds > target) {
3480 mutex_lock(&session->s_mutex);
3481 mutex_lock_nested(&tsession->s_mutex,
3482 SINGLE_DEPTH_NESTING);
3483 } else {
3484 mutex_lock(&tsession->s_mutex);
3485 mutex_lock_nested(&session->s_mutex,
3486 SINGLE_DEPTH_NESTING);
3487 }
3488 new_cap = ceph_get_cap(mdsc, NULL);
3489 } else {
3490 WARN_ON(1);
3491 tsession = NULL;
3492 target = -1;
3493 }
3494 goto retry;
3495
3496 out_unlock:
3497 spin_unlock(&ci->i_ceph_lock);
3498 mutex_unlock(&session->s_mutex);
3499 if (tsession) {
3500 mutex_unlock(&tsession->s_mutex);
3501 ceph_put_mds_session(tsession);
3502 }
3503 if (new_cap)
3504 ceph_put_cap(mdsc, new_cap);
3505 }
3506
3507 /*
3508 * Handle cap IMPORT.
3509 *
3510 * caller holds s_mutex. acquires i_ceph_lock
3511 */
3512 static void handle_cap_import(struct ceph_mds_client *mdsc,
3513 struct inode *inode, struct ceph_mds_caps *im,
3514 struct ceph_mds_cap_peer *ph,
3515 struct ceph_mds_session *session,
3516 struct ceph_cap **target_cap, int *old_issued)
3517 __acquires(ci->i_ceph_lock)
3518 {
3519 struct ceph_inode_info *ci = ceph_inode(inode);
3520 struct ceph_cap *cap, *ocap, *new_cap = NULL;
3521 int mds = session->s_mds;
3522 int issued;
3523 unsigned caps = le32_to_cpu(im->caps);
3524 unsigned wanted = le32_to_cpu(im->wanted);
3525 unsigned seq = le32_to_cpu(im->seq);
3526 unsigned mseq = le32_to_cpu(im->migrate_seq);
3527 u64 realmino = le64_to_cpu(im->realm);
3528 u64 cap_id = le64_to_cpu(im->cap_id);
3529 u64 p_cap_id;
3530 int peer;
3531
3532 if (ph) {
3533 p_cap_id = le64_to_cpu(ph->cap_id);
3534 peer = le32_to_cpu(ph->mds);
3535 } else {
3536 p_cap_id = 0;
3537 peer = -1;
3538 }
3539
3540 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3541 inode, ci, mds, mseq, peer);
3542
3543 retry:
3544 spin_lock(&ci->i_ceph_lock);
3545 cap = __get_cap_for_mds(ci, mds);
3546 if (!cap) {
3547 if (!new_cap) {
3548 spin_unlock(&ci->i_ceph_lock);
3549 new_cap = ceph_get_cap(mdsc, NULL);
3550 goto retry;
3551 }
3552 cap = new_cap;
3553 } else {
3554 if (new_cap) {
3555 ceph_put_cap(mdsc, new_cap);
3556 new_cap = NULL;
3557 }
3558 }
3559
3560 __ceph_caps_issued(ci, &issued);
3561 issued |= __ceph_caps_dirty(ci);
3562
3563 ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
3564 realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3565
3566 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3567 if (ocap && ocap->cap_id == p_cap_id) {
3568 dout(" remove export cap %p mds%d flags %d\n",
3569 ocap, peer, ph->flags);
3570 if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
3571 (ocap->seq != le32_to_cpu(ph->seq) ||
3572 ocap->mseq != le32_to_cpu(ph->mseq))) {
3573 pr_err("handle_cap_import: mismatched seq/mseq: "
3574 "ino (%llx.%llx) mds%d seq %d mseq %d "
3575 "importer mds%d has peer seq %d mseq %d\n",
3576 ceph_vinop(inode), peer, ocap->seq,
3577 ocap->mseq, mds, le32_to_cpu(ph->seq),
3578 le32_to_cpu(ph->mseq));
3579 }
3580 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
3581 }
3582
3583 /* make sure we re-request max_size, if necessary */
3584 ci->i_requested_max_size = 0;
3585
3586 *old_issued = issued;
3587 *target_cap = cap;
3588 }
3589
3590 /*
3591 * Handle a caps message from the MDS.
3592 *
3593 * Identify the appropriate session, inode, and call the right handler
3594 * based on the cap op.
3595 */
3596 void ceph_handle_caps(struct ceph_mds_session *session,
3597 struct ceph_msg *msg)
3598 {
3599 struct ceph_mds_client *mdsc = session->s_mdsc;
3600 struct super_block *sb = mdsc->fsc->sb;
3601 struct inode *inode;
3602 struct ceph_inode_info *ci;
3603 struct ceph_cap *cap;
3604 struct ceph_mds_caps *h;
3605 struct ceph_mds_cap_peer *peer = NULL;
3606 struct ceph_snap_realm *realm = NULL;
3607 struct ceph_string *pool_ns = NULL;
3608 int mds = session->s_mds;
3609 int op, issued;
3610 u32 seq, mseq;
3611 struct ceph_vino vino;
3612 u64 tid;
3613 u64 inline_version = 0;
3614 void *inline_data = NULL;
3615 u32 inline_len = 0;
3616 void *snaptrace;
3617 size_t snaptrace_len;
3618 void *p, *end;
3619
3620 dout("handle_caps from mds%d\n", mds);
3621
3622 /* decode */
3623 end = msg->front.iov_base + msg->front.iov_len;
3624 tid = le64_to_cpu(msg->hdr.tid);
3625 if (msg->front.iov_len < sizeof(*h))
3626 goto bad;
3627 h = msg->front.iov_base;
3628 op = le32_to_cpu(h->op);
3629 vino.ino = le64_to_cpu(h->ino);
3630 vino.snap = CEPH_NOSNAP;
3631 seq = le32_to_cpu(h->seq);
3632 mseq = le32_to_cpu(h->migrate_seq);
3633
3634 snaptrace = h + 1;
3635 snaptrace_len = le32_to_cpu(h->snap_trace_len);
3636 p = snaptrace + snaptrace_len;
3637
3638 if (le16_to_cpu(msg->hdr.version) >= 2) {
3639 u32 flock_len;
3640 ceph_decode_32_safe(&p, end, flock_len, bad);
3641 if (p + flock_len > end)
3642 goto bad;
3643 p += flock_len;
3644 }
3645
3646 if (le16_to_cpu(msg->hdr.version) >= 3) {
3647 if (op == CEPH_CAP_OP_IMPORT) {
3648 if (p + sizeof(*peer) > end)
3649 goto bad;
3650 peer = p;
3651 p += sizeof(*peer);
3652 } else if (op == CEPH_CAP_OP_EXPORT) {
3653 /* recorded in unused fields */
3654 peer = (void *)&h->size;
3655 }
3656 }
3657
3658 if (le16_to_cpu(msg->hdr.version) >= 4) {
3659 ceph_decode_64_safe(&p, end, inline_version, bad);
3660 ceph_decode_32_safe(&p, end, inline_len, bad);
3661 if (p + inline_len > end)
3662 goto bad;
3663 inline_data = p;
3664 p += inline_len;
3665 }
3666
3667 if (le16_to_cpu(msg->hdr.version) >= 5) {
3668 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
3669 u32 epoch_barrier;
3670
3671 ceph_decode_32_safe(&p, end, epoch_barrier, bad);
3672 ceph_osdc_update_epoch_barrier(osdc, epoch_barrier);
3673 }
3674
3675 if (le16_to_cpu(msg->hdr.version) >= 8) {
3676 u64 flush_tid;
3677 u32 caller_uid, caller_gid;
3678 u32 pool_ns_len;
3679
3680 /* version >= 6 */
3681 ceph_decode_64_safe(&p, end, flush_tid, bad);
3682 /* version >= 7 */
3683 ceph_decode_32_safe(&p, end, caller_uid, bad);
3684 ceph_decode_32_safe(&p, end, caller_gid, bad);
3685 /* version >= 8 */
3686 ceph_decode_32_safe(&p, end, pool_ns_len, bad);
3687 if (pool_ns_len > 0) {
3688 ceph_decode_need(&p, end, pool_ns_len, bad);
3689 pool_ns = ceph_find_or_create_string(p, pool_ns_len);
3690 p += pool_ns_len;
3691 }
3692 }
3693
3694 /* lookup ino */
3695 inode = ceph_find_inode(sb, vino);
3696 ci = ceph_inode(inode);
3697 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3698 vino.snap, inode);
3699
3700 mutex_lock(&session->s_mutex);
3701 session->s_seq++;
3702 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3703 (unsigned)seq);
3704
3705 if (!inode) {
3706 dout(" i don't have ino %llx\n", vino.ino);
3707
3708 if (op == CEPH_CAP_OP_IMPORT) {
3709 cap = ceph_get_cap(mdsc, NULL);
3710 cap->cap_ino = vino.ino;
3711 cap->queue_release = 1;
3712 cap->cap_id = le64_to_cpu(h->cap_id);
3713 cap->mseq = mseq;
3714 cap->seq = seq;
3715 cap->issue_seq = seq;
3716 spin_lock(&session->s_cap_lock);
3717 list_add_tail(&cap->session_caps,
3718 &session->s_cap_releases);
3719 session->s_num_cap_releases++;
3720 spin_unlock(&session->s_cap_lock);
3721 }
3722 goto flush_cap_releases;
3723 }
3724
3725 /* these will work even if we don't have a cap yet */
3726 switch (op) {
3727 case CEPH_CAP_OP_FLUSHSNAP_ACK:
3728 handle_cap_flushsnap_ack(inode, tid, h, session);
3729 goto done;
3730
3731 case CEPH_CAP_OP_EXPORT:
3732 handle_cap_export(inode, h, peer, session);
3733 goto done_unlocked;
3734
3735 case CEPH_CAP_OP_IMPORT:
3736 realm = NULL;
3737 if (snaptrace_len) {
3738 down_write(&mdsc->snap_rwsem);
3739 ceph_update_snap_trace(mdsc, snaptrace,
3740 snaptrace + snaptrace_len,
3741 false, &realm);
3742 downgrade_write(&mdsc->snap_rwsem);
3743 } else {
3744 down_read(&mdsc->snap_rwsem);
3745 }
3746 handle_cap_import(mdsc, inode, h, peer, session,
3747 &cap, &issued);
3748 handle_cap_grant(mdsc, inode, h, &pool_ns,
3749 inline_version, inline_data, inline_len,
3750 msg->middle, session, cap, issued);
3751 if (realm)
3752 ceph_put_snap_realm(mdsc, realm);
3753 goto done_unlocked;
3754 }
3755
3756 /* the rest require a cap */
3757 spin_lock(&ci->i_ceph_lock);
3758 cap = __get_cap_for_mds(ceph_inode(inode), mds);
3759 if (!cap) {
3760 dout(" no cap on %p ino %llx.%llx from mds%d\n",
3761 inode, ceph_ino(inode), ceph_snap(inode), mds);
3762 spin_unlock(&ci->i_ceph_lock);
3763 goto flush_cap_releases;
3764 }
3765
3766 /* note that each of these drops i_ceph_lock for us */
3767 switch (op) {
3768 case CEPH_CAP_OP_REVOKE:
3769 case CEPH_CAP_OP_GRANT:
3770 __ceph_caps_issued(ci, &issued);
3771 issued |= __ceph_caps_dirty(ci);
3772 handle_cap_grant(mdsc, inode, h, &pool_ns,
3773 inline_version, inline_data, inline_len,
3774 msg->middle, session, cap, issued);
3775 goto done_unlocked;
3776
3777 case CEPH_CAP_OP_FLUSH_ACK:
3778 handle_cap_flush_ack(inode, tid, h, session, cap);
3779 break;
3780
3781 case CEPH_CAP_OP_TRUNC:
3782 handle_cap_trunc(inode, h, session);
3783 break;
3784
3785 default:
3786 spin_unlock(&ci->i_ceph_lock);
3787 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3788 ceph_cap_op_name(op));
3789 }
3790
3791 goto done;
3792
3793 flush_cap_releases:
3794 /*
3795 * send any cap release message to try to move things
3796 * along for the mds (who clearly thinks we still have this
3797 * cap).
3798 */
3799 ceph_send_cap_releases(mdsc, session);
3800
3801 done:
3802 mutex_unlock(&session->s_mutex);
3803 done_unlocked:
3804 iput(inode);
3805 ceph_put_string(pool_ns);
3806 return;
3807
3808 bad:
3809 pr_err("ceph_handle_caps: corrupt message\n");
3810 ceph_msg_dump(msg);
3811 return;
3812 }
3813
3814 /*
3815 * Delayed work handler to process end of delayed cap release LRU list.
3816 */
3817 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3818 {
3819 struct inode *inode;
3820 struct ceph_inode_info *ci;
3821 int flags = CHECK_CAPS_NODELAY;
3822
3823 dout("check_delayed_caps\n");
3824 while (1) {
3825 spin_lock(&mdsc->cap_delay_lock);
3826 if (list_empty(&mdsc->cap_delay_list))
3827 break;
3828 ci = list_first_entry(&mdsc->cap_delay_list,
3829 struct ceph_inode_info,
3830 i_cap_delay_list);
3831 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3832 time_before(jiffies, ci->i_hold_caps_max))
3833 break;
3834 list_del_init(&ci->i_cap_delay_list);
3835
3836 inode = igrab(&ci->vfs_inode);
3837 spin_unlock(&mdsc->cap_delay_lock);
3838
3839 if (inode) {
3840 dout("check_delayed_caps on %p\n", inode);
3841 ceph_check_caps(ci, flags, NULL);
3842 iput(inode);
3843 }
3844 }
3845 spin_unlock(&mdsc->cap_delay_lock);
3846 }
3847
3848 /*
3849 * Flush all dirty caps to the mds
3850 */
3851 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3852 {
3853 struct ceph_inode_info *ci;
3854 struct inode *inode;
3855
3856 dout("flush_dirty_caps\n");
3857 spin_lock(&mdsc->cap_dirty_lock);
3858 while (!list_empty(&mdsc->cap_dirty)) {
3859 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3860 i_dirty_item);
3861 inode = &ci->vfs_inode;
3862 ihold(inode);
3863 dout("flush_dirty_caps %p\n", inode);
3864 spin_unlock(&mdsc->cap_dirty_lock);
3865 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3866 iput(inode);
3867 spin_lock(&mdsc->cap_dirty_lock);
3868 }
3869 spin_unlock(&mdsc->cap_dirty_lock);
3870 dout("flush_dirty_caps done\n");
3871 }
3872
3873 void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode)
3874 {
3875 int i;
3876 int bits = (fmode << 1) | 1;
3877 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3878 if (bits & (1 << i))
3879 ci->i_nr_by_mode[i]++;
3880 }
3881 }
3882
3883 /*
3884 * Drop open file reference. If we were the last open file,
3885 * we may need to release capabilities to the MDS (or schedule
3886 * their delayed release).
3887 */
3888 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3889 {
3890 int i, last = 0;
3891 int bits = (fmode << 1) | 1;
3892 spin_lock(&ci->i_ceph_lock);
3893 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3894 if (bits & (1 << i)) {
3895 BUG_ON(ci->i_nr_by_mode[i] == 0);
3896 if (--ci->i_nr_by_mode[i] == 0)
3897 last++;
3898 }
3899 }
3900 dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n",
3901 &ci->vfs_inode, fmode,
3902 ci->i_nr_by_mode[0], ci->i_nr_by_mode[1],
3903 ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]);
3904 spin_unlock(&ci->i_ceph_lock);
3905
3906 if (last && ci->i_vino.snap == CEPH_NOSNAP)
3907 ceph_check_caps(ci, 0, NULL);
3908 }
3909
3910 /*
3911 * Helpers for embedding cap and dentry lease releases into mds
3912 * requests.
3913 *
3914 * @force is used by dentry_release (below) to force inclusion of a
3915 * record for the directory inode, even when there aren't any caps to
3916 * drop.
3917 */
3918 int ceph_encode_inode_release(void **p, struct inode *inode,
3919 int mds, int drop, int unless, int force)
3920 {
3921 struct ceph_inode_info *ci = ceph_inode(inode);
3922 struct ceph_cap *cap;
3923 struct ceph_mds_request_release *rel = *p;
3924 int used, dirty;
3925 int ret = 0;
3926
3927 spin_lock(&ci->i_ceph_lock);
3928 used = __ceph_caps_used(ci);
3929 dirty = __ceph_caps_dirty(ci);
3930
3931 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3932 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3933 ceph_cap_string(unless));
3934
3935 /* only drop unused, clean caps */
3936 drop &= ~(used | dirty);
3937
3938 cap = __get_cap_for_mds(ci, mds);
3939 if (cap && __cap_is_valid(cap)) {
3940 if (force ||
3941 ((cap->issued & drop) &&
3942 (cap->issued & unless) == 0)) {
3943 if ((cap->issued & drop) &&
3944 (cap->issued & unless) == 0) {
3945 int wanted = __ceph_caps_wanted(ci);
3946 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3947 wanted |= cap->mds_wanted;
3948 dout("encode_inode_release %p cap %p "
3949 "%s -> %s, wanted %s -> %s\n", inode, cap,
3950 ceph_cap_string(cap->issued),
3951 ceph_cap_string(cap->issued & ~drop),
3952 ceph_cap_string(cap->mds_wanted),
3953 ceph_cap_string(wanted));
3954
3955 cap->issued &= ~drop;
3956 cap->implemented &= ~drop;
3957 cap->mds_wanted = wanted;
3958 } else {
3959 dout("encode_inode_release %p cap %p %s"
3960 " (force)\n", inode, cap,
3961 ceph_cap_string(cap->issued));
3962 }
3963
3964 rel->ino = cpu_to_le64(ceph_ino(inode));
3965 rel->cap_id = cpu_to_le64(cap->cap_id);
3966 rel->seq = cpu_to_le32(cap->seq);
3967 rel->issue_seq = cpu_to_le32(cap->issue_seq);
3968 rel->mseq = cpu_to_le32(cap->mseq);
3969 rel->caps = cpu_to_le32(cap->implemented);
3970 rel->wanted = cpu_to_le32(cap->mds_wanted);
3971 rel->dname_len = 0;
3972 rel->dname_seq = 0;
3973 *p += sizeof(*rel);
3974 ret = 1;
3975 } else {
3976 dout("encode_inode_release %p cap %p %s\n",
3977 inode, cap, ceph_cap_string(cap->issued));
3978 }
3979 }
3980 spin_unlock(&ci->i_ceph_lock);
3981 return ret;
3982 }
3983
3984 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3985 struct inode *dir,
3986 int mds, int drop, int unless)
3987 {
3988 struct dentry *parent = NULL;
3989 struct ceph_mds_request_release *rel = *p;
3990 struct ceph_dentry_info *di = ceph_dentry(dentry);
3991 int force = 0;
3992 int ret;
3993
3994 /*
3995 * force an record for the directory caps if we have a dentry lease.
3996 * this is racy (can't take i_ceph_lock and d_lock together), but it
3997 * doesn't have to be perfect; the mds will revoke anything we don't
3998 * release.
3999 */
4000 spin_lock(&dentry->d_lock);
4001 if (di->lease_session && di->lease_session->s_mds == mds)
4002 force = 1;
4003 if (!dir) {
4004 parent = dget(dentry->d_parent);
4005 dir = d_inode(parent);
4006 }
4007 spin_unlock(&dentry->d_lock);
4008
4009 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
4010 dput(parent);
4011
4012 spin_lock(&dentry->d_lock);
4013 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
4014 dout("encode_dentry_release %p mds%d seq %d\n",
4015 dentry, mds, (int)di->lease_seq);
4016 rel->dname_len = cpu_to_le32(dentry->d_name.len);
4017 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
4018 *p += dentry->d_name.len;
4019 rel->dname_seq = cpu_to_le32(di->lease_seq);
4020 __ceph_mdsc_drop_dentry_lease(dentry);
4021 }
4022 spin_unlock(&dentry->d_lock);
4023 return ret;
4024 }