]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/ceph/mds_client.c
ceph: initialize i_size/i_rbytes on snapdir
[mirror_ubuntu-hirsute-kernel.git] / fs / ceph / mds_client.c
CommitLineData
2f2dc053
SW
1#include "ceph_debug.h"
2
3#include <linux/wait.h>
4#include <linux/sched.h>
5
6#include "mds_client.h"
7#include "mon_client.h"
8#include "super.h"
9#include "messenger.h"
10#include "decode.h"
11
12/*
13 * A cluster of MDS (metadata server) daemons is responsible for
14 * managing the file system namespace (the directory hierarchy and
15 * inodes) and for coordinating shared access to storage. Metadata is
16 * partitioning hierarchically across a number of servers, and that
17 * partition varies over time as the cluster adjusts the distribution
18 * in order to balance load.
19 *
20 * The MDS client is primarily responsible to managing synchronous
21 * metadata requests for operations like open, unlink, and so forth.
22 * If there is a MDS failure, we find out about it when we (possibly
23 * request and) receive a new MDS map, and can resubmit affected
24 * requests.
25 *
26 * For the most part, though, we take advantage of a lossless
27 * communications channel to the MDS, and do not need to worry about
28 * timing out or resubmitting requests.
29 *
30 * We maintain a stateful "session" with each MDS we interact with.
31 * Within each session, we sent periodic heartbeat messages to ensure
32 * any capabilities or leases we have been issues remain valid. If
33 * the session times out and goes stale, our leases and capabilities
34 * are no longer valid.
35 */
36
37static void __wake_requests(struct ceph_mds_client *mdsc,
38 struct list_head *head);
39
40const static struct ceph_connection_operations mds_con_ops;
41
42
43/*
44 * mds reply parsing
45 */
46
47/*
48 * parse individual inode info
49 */
50static int parse_reply_info_in(void **p, void *end,
51 struct ceph_mds_reply_info_in *info)
52{
53 int err = -EIO;
54
55 info->in = *p;
56 *p += sizeof(struct ceph_mds_reply_inode) +
57 sizeof(*info->in->fragtree.splits) *
58 le32_to_cpu(info->in->fragtree.nsplits);
59
60 ceph_decode_32_safe(p, end, info->symlink_len, bad);
61 ceph_decode_need(p, end, info->symlink_len, bad);
62 info->symlink = *p;
63 *p += info->symlink_len;
64
65 ceph_decode_32_safe(p, end, info->xattr_len, bad);
66 ceph_decode_need(p, end, info->xattr_len, bad);
67 info->xattr_data = *p;
68 *p += info->xattr_len;
69 return 0;
70bad:
71 return err;
72}
73
74/*
75 * parse a normal reply, which may contain a (dir+)dentry and/or a
76 * target inode.
77 */
78static int parse_reply_info_trace(void **p, void *end,
79 struct ceph_mds_reply_info_parsed *info)
80{
81 int err;
82
83 if (info->head->is_dentry) {
84 err = parse_reply_info_in(p, end, &info->diri);
85 if (err < 0)
86 goto out_bad;
87
88 if (unlikely(*p + sizeof(*info->dirfrag) > end))
89 goto bad;
90 info->dirfrag = *p;
91 *p += sizeof(*info->dirfrag) +
92 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
93 if (unlikely(*p > end))
94 goto bad;
95
96 ceph_decode_32_safe(p, end, info->dname_len, bad);
97 ceph_decode_need(p, end, info->dname_len, bad);
98 info->dname = *p;
99 *p += info->dname_len;
100 info->dlease = *p;
101 *p += sizeof(*info->dlease);
102 }
103
104 if (info->head->is_target) {
105 err = parse_reply_info_in(p, end, &info->targeti);
106 if (err < 0)
107 goto out_bad;
108 }
109
110 if (unlikely(*p != end))
111 goto bad;
112 return 0;
113
114bad:
115 err = -EIO;
116out_bad:
117 pr_err("problem parsing mds trace %d\n", err);
118 return err;
119}
120
121/*
122 * parse readdir results
123 */
124static int parse_reply_info_dir(void **p, void *end,
125 struct ceph_mds_reply_info_parsed *info)
126{
127 u32 num, i = 0;
128 int err;
129
130 info->dir_dir = *p;
131 if (*p + sizeof(*info->dir_dir) > end)
132 goto bad;
133 *p += sizeof(*info->dir_dir) +
134 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
135 if (*p > end)
136 goto bad;
137
138 ceph_decode_need(p, end, sizeof(num) + 2, bad);
c89136ea
SW
139 num = ceph_decode_32(p);
140 info->dir_end = ceph_decode_8(p);
141 info->dir_complete = ceph_decode_8(p);
2f2dc053
SW
142 if (num == 0)
143 goto done;
144
145 /* alloc large array */
146 info->dir_nr = num;
147 info->dir_in = kcalloc(num, sizeof(*info->dir_in) +
148 sizeof(*info->dir_dname) +
149 sizeof(*info->dir_dname_len) +
150 sizeof(*info->dir_dlease),
151 GFP_NOFS);
152 if (info->dir_in == NULL) {
153 err = -ENOMEM;
154 goto out_bad;
155 }
156 info->dir_dname = (void *)(info->dir_in + num);
157 info->dir_dname_len = (void *)(info->dir_dname + num);
158 info->dir_dlease = (void *)(info->dir_dname_len + num);
159
160 while (num) {
161 /* dentry */
162 ceph_decode_need(p, end, sizeof(u32)*2, bad);
c89136ea 163 info->dir_dname_len[i] = ceph_decode_32(p);
2f2dc053
SW
164 ceph_decode_need(p, end, info->dir_dname_len[i], bad);
165 info->dir_dname[i] = *p;
166 *p += info->dir_dname_len[i];
167 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
168 info->dir_dname[i]);
169 info->dir_dlease[i] = *p;
170 *p += sizeof(struct ceph_mds_reply_lease);
171
172 /* inode */
173 err = parse_reply_info_in(p, end, &info->dir_in[i]);
174 if (err < 0)
175 goto out_bad;
176 i++;
177 num--;
178 }
179
180done:
181 if (*p != end)
182 goto bad;
183 return 0;
184
185bad:
186 err = -EIO;
187out_bad:
188 pr_err("problem parsing dir contents %d\n", err);
189 return err;
190}
191
192/*
193 * parse entire mds reply
194 */
195static int parse_reply_info(struct ceph_msg *msg,
196 struct ceph_mds_reply_info_parsed *info)
197{
198 void *p, *end;
199 u32 len;
200 int err;
201
202 info->head = msg->front.iov_base;
203 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
204 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
205
206 /* trace */
207 ceph_decode_32_safe(&p, end, len, bad);
208 if (len > 0) {
209 err = parse_reply_info_trace(&p, p+len, info);
210 if (err < 0)
211 goto out_bad;
212 }
213
214 /* dir content */
215 ceph_decode_32_safe(&p, end, len, bad);
216 if (len > 0) {
217 err = parse_reply_info_dir(&p, p+len, info);
218 if (err < 0)
219 goto out_bad;
220 }
221
222 /* snap blob */
223 ceph_decode_32_safe(&p, end, len, bad);
224 info->snapblob_len = len;
225 info->snapblob = p;
226 p += len;
227
228 if (p != end)
229 goto bad;
230 return 0;
231
232bad:
233 err = -EIO;
234out_bad:
235 pr_err("mds parse_reply err %d\n", err);
236 return err;
237}
238
239static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
240{
241 kfree(info->dir_in);
242}
243
244
245/*
246 * sessions
247 */
248static const char *session_state_name(int s)
249{
250 switch (s) {
251 case CEPH_MDS_SESSION_NEW: return "new";
252 case CEPH_MDS_SESSION_OPENING: return "opening";
253 case CEPH_MDS_SESSION_OPEN: return "open";
254 case CEPH_MDS_SESSION_HUNG: return "hung";
255 case CEPH_MDS_SESSION_CLOSING: return "closing";
256 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
257 default: return "???";
258 }
259}
260
261static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
262{
263 if (atomic_inc_not_zero(&s->s_ref)) {
264 dout("mdsc get_session %p %d -> %d\n", s,
265 atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
266 return s;
267 } else {
268 dout("mdsc get_session %p 0 -- FAIL", s);
269 return NULL;
270 }
271}
272
273void ceph_put_mds_session(struct ceph_mds_session *s)
274{
275 dout("mdsc put_session %p %d -> %d\n", s,
276 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
277 if (atomic_dec_and_test(&s->s_ref)) {
278 ceph_con_shutdown(&s->s_con);
279 kfree(s);
280 }
281}
282
283/*
284 * called under mdsc->mutex
285 */
286struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
287 int mds)
288{
289 struct ceph_mds_session *session;
290
291 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
292 return NULL;
293 session = mdsc->sessions[mds];
294 dout("lookup_mds_session %p %d\n", session,
295 atomic_read(&session->s_ref));
296 get_session(session);
297 return session;
298}
299
300static bool __have_session(struct ceph_mds_client *mdsc, int mds)
301{
302 if (mds >= mdsc->max_sessions)
303 return false;
304 return mdsc->sessions[mds];
305}
306
307/*
308 * create+register a new session for given mds.
309 * called under mdsc->mutex.
310 */
311static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
312 int mds)
313{
314 struct ceph_mds_session *s;
315
316 s = kzalloc(sizeof(*s), GFP_NOFS);
317 s->s_mdsc = mdsc;
318 s->s_mds = mds;
319 s->s_state = CEPH_MDS_SESSION_NEW;
320 s->s_ttl = 0;
321 s->s_seq = 0;
322 mutex_init(&s->s_mutex);
323
324 ceph_con_init(mdsc->client->msgr, &s->s_con);
325 s->s_con.private = s;
326 s->s_con.ops = &mds_con_ops;
327 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
328 s->s_con.peer_name.num = cpu_to_le64(mds);
329 ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
330
331 spin_lock_init(&s->s_cap_lock);
332 s->s_cap_gen = 0;
333 s->s_cap_ttl = 0;
334 s->s_renew_requested = 0;
335 s->s_renew_seq = 0;
336 INIT_LIST_HEAD(&s->s_caps);
337 s->s_nr_caps = 0;
338 atomic_set(&s->s_ref, 1);
339 INIT_LIST_HEAD(&s->s_waiting);
340 INIT_LIST_HEAD(&s->s_unsafe);
341 s->s_num_cap_releases = 0;
342 INIT_LIST_HEAD(&s->s_cap_releases);
343 INIT_LIST_HEAD(&s->s_cap_releases_done);
344 INIT_LIST_HEAD(&s->s_cap_flushing);
345 INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
346
347 dout("register_session mds%d\n", mds);
348 if (mds >= mdsc->max_sessions) {
349 int newmax = 1 << get_count_order(mds+1);
350 struct ceph_mds_session **sa;
351
352 dout("register_session realloc to %d\n", newmax);
353 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
354 if (sa == NULL)
355 return ERR_PTR(-ENOMEM);
356 if (mdsc->sessions) {
357 memcpy(sa, mdsc->sessions,
358 mdsc->max_sessions * sizeof(void *));
359 kfree(mdsc->sessions);
360 }
361 mdsc->sessions = sa;
362 mdsc->max_sessions = newmax;
363 }
364 mdsc->sessions[mds] = s;
365 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
366 return s;
367}
368
369/*
370 * called under mdsc->mutex
371 */
372static void unregister_session(struct ceph_mds_client *mdsc, int mds)
373{
374 dout("unregister_session mds%d %p\n", mds, mdsc->sessions[mds]);
375 ceph_put_mds_session(mdsc->sessions[mds]);
376 mdsc->sessions[mds] = NULL;
377}
378
379/*
380 * drop session refs in request.
381 *
382 * should be last request ref, or hold mdsc->mutex
383 */
384static void put_request_session(struct ceph_mds_request *req)
385{
386 if (req->r_session) {
387 ceph_put_mds_session(req->r_session);
388 req->r_session = NULL;
389 }
390}
391
392void ceph_mdsc_put_request(struct ceph_mds_request *req)
393{
394 dout("mdsc put_request %p %d -> %d\n", req,
395 atomic_read(&req->r_ref), atomic_read(&req->r_ref)-1);
396 if (atomic_dec_and_test(&req->r_ref)) {
397 if (req->r_request)
398 ceph_msg_put(req->r_request);
399 if (req->r_reply) {
400 ceph_msg_put(req->r_reply);
401 destroy_reply_info(&req->r_reply_info);
402 }
403 if (req->r_inode) {
404 ceph_put_cap_refs(ceph_inode(req->r_inode),
405 CEPH_CAP_PIN);
406 iput(req->r_inode);
407 }
408 if (req->r_locked_dir)
409 ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
410 CEPH_CAP_PIN);
411 if (req->r_target_inode)
412 iput(req->r_target_inode);
413 if (req->r_dentry)
414 dput(req->r_dentry);
415 if (req->r_old_dentry) {
416 ceph_put_cap_refs(
417 ceph_inode(req->r_old_dentry->d_parent->d_inode),
418 CEPH_CAP_PIN);
419 dput(req->r_old_dentry);
420 }
421 kfree(req->r_path1);
422 kfree(req->r_path2);
423 put_request_session(req);
424 ceph_unreserve_caps(&req->r_caps_reservation);
425 kfree(req);
426 }
427}
428
429/*
430 * lookup session, bump ref if found.
431 *
432 * called under mdsc->mutex.
433 */
434static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
435 u64 tid)
436{
437 struct ceph_mds_request *req;
438 req = radix_tree_lookup(&mdsc->request_tree, tid);
439 if (req)
440 ceph_mdsc_get_request(req);
441 return req;
442}
443
444/*
445 * Register an in-flight request, and assign a tid. Link to directory
446 * are modifying (if any).
447 *
448 * Called under mdsc->mutex.
449 */
450static void __register_request(struct ceph_mds_client *mdsc,
451 struct ceph_mds_request *req,
452 struct inode *dir)
453{
454 req->r_tid = ++mdsc->last_tid;
455 if (req->r_num_caps)
456 ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
457 dout("__register_request %p tid %lld\n", req, req->r_tid);
458 ceph_mdsc_get_request(req);
459 radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
460
461 if (dir) {
462 struct ceph_inode_info *ci = ceph_inode(dir);
463
464 spin_lock(&ci->i_unsafe_lock);
465 req->r_unsafe_dir = dir;
466 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
467 spin_unlock(&ci->i_unsafe_lock);
468 }
469}
470
471static void __unregister_request(struct ceph_mds_client *mdsc,
472 struct ceph_mds_request *req)
473{
474 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
475 radix_tree_delete(&mdsc->request_tree, req->r_tid);
476 ceph_mdsc_put_request(req);
477
478 if (req->r_unsafe_dir) {
479 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
480
481 spin_lock(&ci->i_unsafe_lock);
482 list_del_init(&req->r_unsafe_dir_item);
483 spin_unlock(&ci->i_unsafe_lock);
484 }
485}
486
487/*
488 * Choose mds to send request to next. If there is a hint set in the
489 * request (e.g., due to a prior forward hint from the mds), use that.
490 * Otherwise, consult frag tree and/or caps to identify the
491 * appropriate mds. If all else fails, choose randomly.
492 *
493 * Called under mdsc->mutex.
494 */
495static int __choose_mds(struct ceph_mds_client *mdsc,
496 struct ceph_mds_request *req)
497{
498 struct inode *inode;
499 struct ceph_inode_info *ci;
500 struct ceph_cap *cap;
501 int mode = req->r_direct_mode;
502 int mds = -1;
503 u32 hash = req->r_direct_hash;
504 bool is_hash = req->r_direct_is_hash;
505
506 /*
507 * is there a specific mds we should try? ignore hint if we have
508 * no session and the mds is not up (active or recovering).
509 */
510 if (req->r_resend_mds >= 0 &&
511 (__have_session(mdsc, req->r_resend_mds) ||
512 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
513 dout("choose_mds using resend_mds mds%d\n",
514 req->r_resend_mds);
515 return req->r_resend_mds;
516 }
517
518 if (mode == USE_RANDOM_MDS)
519 goto random;
520
521 inode = NULL;
522 if (req->r_inode) {
523 inode = req->r_inode;
524 } else if (req->r_dentry) {
525 if (req->r_dentry->d_inode) {
526 inode = req->r_dentry->d_inode;
527 } else {
528 inode = req->r_dentry->d_parent->d_inode;
529 hash = req->r_dentry->d_name.hash;
530 is_hash = true;
531 }
532 }
533 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
534 (int)hash, mode);
535 if (!inode)
536 goto random;
537 ci = ceph_inode(inode);
538
539 if (is_hash && S_ISDIR(inode->i_mode)) {
540 struct ceph_inode_frag frag;
541 int found;
542
543 ceph_choose_frag(ci, hash, &frag, &found);
544 if (found) {
545 if (mode == USE_ANY_MDS && frag.ndist > 0) {
546 u8 r;
547
548 /* choose a random replica */
549 get_random_bytes(&r, 1);
550 r %= frag.ndist;
551 mds = frag.dist[r];
552 dout("choose_mds %p %llx.%llx "
553 "frag %u mds%d (%d/%d)\n",
554 inode, ceph_vinop(inode),
555 frag.frag, frag.mds,
556 (int)r, frag.ndist);
557 return mds;
558 }
559
560 /* since this file/dir wasn't known to be
561 * replicated, then we want to look for the
562 * authoritative mds. */
563 mode = USE_AUTH_MDS;
564 if (frag.mds >= 0) {
565 /* choose auth mds */
566 mds = frag.mds;
567 dout("choose_mds %p %llx.%llx "
568 "frag %u mds%d (auth)\n",
569 inode, ceph_vinop(inode), frag.frag, mds);
570 return mds;
571 }
572 }
573 }
574
575 spin_lock(&inode->i_lock);
576 cap = NULL;
577 if (mode == USE_AUTH_MDS)
578 cap = ci->i_auth_cap;
579 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
580 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
581 if (!cap) {
582 spin_unlock(&inode->i_lock);
583 goto random;
584 }
585 mds = cap->session->s_mds;
586 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
587 inode, ceph_vinop(inode), mds,
588 cap == ci->i_auth_cap ? "auth " : "", cap);
589 spin_unlock(&inode->i_lock);
590 return mds;
591
592random:
593 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
594 dout("choose_mds chose random mds%d\n", mds);
595 return mds;
596}
597
598
599/*
600 * session messages
601 */
602static struct ceph_msg *create_session_msg(u32 op, u64 seq)
603{
604 struct ceph_msg *msg;
605 struct ceph_mds_session_head *h;
606
607 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
608 if (IS_ERR(msg)) {
609 pr_err("create_session_msg ENOMEM creating msg\n");
610 return ERR_PTR(PTR_ERR(msg));
611 }
612 h = msg->front.iov_base;
613 h->op = cpu_to_le32(op);
614 h->seq = cpu_to_le64(seq);
615 return msg;
616}
617
618/*
619 * send session open request.
620 *
621 * called under mdsc->mutex
622 */
623static int __open_session(struct ceph_mds_client *mdsc,
624 struct ceph_mds_session *session)
625{
626 struct ceph_msg *msg;
627 int mstate;
628 int mds = session->s_mds;
629 int err = 0;
630
631 /* wait for mds to go active? */
632 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
633 dout("open_session to mds%d (%s)\n", mds,
634 ceph_mds_state_name(mstate));
635 session->s_state = CEPH_MDS_SESSION_OPENING;
636 session->s_renew_requested = jiffies;
637
638 /* send connect message */
639 msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
640 if (IS_ERR(msg)) {
641 err = PTR_ERR(msg);
642 goto out;
643 }
644 ceph_con_send(&session->s_con, msg);
645
646out:
647 return 0;
648}
649
650/*
651 * session caps
652 */
653
654/*
655 * Free preallocated cap messages assigned to this session
656 */
657static void cleanup_cap_releases(struct ceph_mds_session *session)
658{
659 struct ceph_msg *msg;
660
661 spin_lock(&session->s_cap_lock);
662 while (!list_empty(&session->s_cap_releases)) {
663 msg = list_first_entry(&session->s_cap_releases,
664 struct ceph_msg, list_head);
665 list_del_init(&msg->list_head);
666 ceph_msg_put(msg);
667 }
668 while (!list_empty(&session->s_cap_releases_done)) {
669 msg = list_first_entry(&session->s_cap_releases_done,
670 struct ceph_msg, list_head);
671 list_del_init(&msg->list_head);
672 ceph_msg_put(msg);
673 }
674 spin_unlock(&session->s_cap_lock);
675}
676
677/*
678 * Helper to safely iterate over all caps associated with a session.
679 *
680 * caller must hold session s_mutex
681 */
682static int iterate_session_caps(struct ceph_mds_session *session,
683 int (*cb)(struct inode *, struct ceph_cap *,
684 void *), void *arg)
685{
686 struct ceph_cap *cap, *ncap;
687 struct inode *inode;
688 int ret;
689
690 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
691 spin_lock(&session->s_cap_lock);
692 list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
693 inode = igrab(&cap->ci->vfs_inode);
694 if (!inode)
695 continue;
696 spin_unlock(&session->s_cap_lock);
697 ret = cb(inode, cap, arg);
698 iput(inode);
699 if (ret < 0)
700 return ret;
701 spin_lock(&session->s_cap_lock);
702 }
703 spin_unlock(&session->s_cap_lock);
704
705 return 0;
706}
707
708static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
709 void *arg)
710{
711 struct ceph_inode_info *ci = ceph_inode(inode);
712 dout("removing cap %p, ci is %p, inode is %p\n",
713 cap, ci, &ci->vfs_inode);
714 ceph_remove_cap(cap);
715 return 0;
716}
717
718/*
719 * caller must hold session s_mutex
720 */
721static void remove_session_caps(struct ceph_mds_session *session)
722{
723 dout("remove_session_caps on %p\n", session);
724 iterate_session_caps(session, remove_session_caps_cb, NULL);
725 BUG_ON(session->s_nr_caps > 0);
726 cleanup_cap_releases(session);
727}
728
729/*
730 * wake up any threads waiting on this session's caps. if the cap is
731 * old (didn't get renewed on the client reconnect), remove it now.
732 *
733 * caller must hold s_mutex.
734 */
735static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
736 void *arg)
737{
2f2dc053 738 wake_up(&ceph_inode(inode)->i_cap_wq);
2f2dc053
SW
739 return 0;
740}
741
742static void wake_up_session_caps(struct ceph_mds_session *session)
743{
744 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
cdac8303 745 iterate_session_caps(session, wake_up_session_cb, NULL);
2f2dc053
SW
746}
747
748/*
749 * Send periodic message to MDS renewing all currently held caps. The
750 * ack will reset the expiration for all caps from this session.
751 *
752 * caller holds s_mutex
753 */
754static int send_renew_caps(struct ceph_mds_client *mdsc,
755 struct ceph_mds_session *session)
756{
757 struct ceph_msg *msg;
758 int state;
759
760 if (time_after_eq(jiffies, session->s_cap_ttl) &&
761 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
762 pr_info("mds%d caps stale\n", session->s_mds);
763
764 /* do not try to renew caps until a recovering mds has reconnected
765 * with its clients. */
766 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
767 if (state < CEPH_MDS_STATE_RECONNECT) {
768 dout("send_renew_caps ignoring mds%d (%s)\n",
769 session->s_mds, ceph_mds_state_name(state));
770 return 0;
771 }
772
773 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
774 ceph_mds_state_name(state));
775 session->s_renew_requested = jiffies;
776 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
777 ++session->s_renew_seq);
778 if (IS_ERR(msg))
779 return PTR_ERR(msg);
780 ceph_con_send(&session->s_con, msg);
781 return 0;
782}
783
784/*
785 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
786 */
787static void renewed_caps(struct ceph_mds_client *mdsc,
788 struct ceph_mds_session *session, int is_renew)
789{
790 int was_stale;
791 int wake = 0;
792
793 spin_lock(&session->s_cap_lock);
794 was_stale = is_renew && (session->s_cap_ttl == 0 ||
795 time_after_eq(jiffies, session->s_cap_ttl));
796
797 session->s_cap_ttl = session->s_renew_requested +
798 mdsc->mdsmap->m_session_timeout*HZ;
799
800 if (was_stale) {
801 if (time_before(jiffies, session->s_cap_ttl)) {
802 pr_info("mds%d caps renewed\n", session->s_mds);
803 wake = 1;
804 } else {
805 pr_info("mds%d caps still stale\n", session->s_mds);
806 }
807 }
808 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
809 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
810 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
811 spin_unlock(&session->s_cap_lock);
812
813 if (wake)
814 wake_up_session_caps(session);
815}
816
817/*
818 * send a session close request
819 */
820static int request_close_session(struct ceph_mds_client *mdsc,
821 struct ceph_mds_session *session)
822{
823 struct ceph_msg *msg;
824 int err = 0;
825
826 dout("request_close_session mds%d state %s seq %lld\n",
827 session->s_mds, session_state_name(session->s_state),
828 session->s_seq);
829 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
830 if (IS_ERR(msg))
831 err = PTR_ERR(msg);
832 else
833 ceph_con_send(&session->s_con, msg);
834 return err;
835}
836
837/*
838 * Called with s_mutex held.
839 */
840static int __close_session(struct ceph_mds_client *mdsc,
841 struct ceph_mds_session *session)
842{
843 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
844 return 0;
845 session->s_state = CEPH_MDS_SESSION_CLOSING;
846 return request_close_session(mdsc, session);
847}
848
849/*
850 * Trim old(er) caps.
851 *
852 * Because we can't cache an inode without one or more caps, we do
853 * this indirectly: if a cap is unused, we prune its aliases, at which
854 * point the inode will hopefully get dropped to.
855 *
856 * Yes, this is a bit sloppy. Our only real goal here is to respond to
857 * memory pressure from the MDS, though, so it needn't be perfect.
858 */
859static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
860{
861 struct ceph_mds_session *session = arg;
862 struct ceph_inode_info *ci = ceph_inode(inode);
863 int used, oissued, mine;
864
865 if (session->s_trim_caps <= 0)
866 return -1;
867
868 spin_lock(&inode->i_lock);
869 mine = cap->issued | cap->implemented;
870 used = __ceph_caps_used(ci);
871 oissued = __ceph_caps_issued_other(ci, cap);
872
873 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
874 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
875 ceph_cap_string(used));
876 if (ci->i_dirty_caps)
877 goto out; /* dirty caps */
878 if ((used & ~oissued) & mine)
879 goto out; /* we need these caps */
880
881 session->s_trim_caps--;
882 if (oissued) {
883 /* we aren't the only cap.. just remove us */
884 __ceph_remove_cap(cap, NULL);
885 } else {
886 /* try to drop referring dentries */
887 spin_unlock(&inode->i_lock);
888 d_prune_aliases(inode);
889 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
890 inode, cap, atomic_read(&inode->i_count));
891 return 0;
892 }
893
894out:
895 spin_unlock(&inode->i_lock);
896 return 0;
897}
898
899/*
900 * Trim session cap count down to some max number.
901 */
902static int trim_caps(struct ceph_mds_client *mdsc,
903 struct ceph_mds_session *session,
904 int max_caps)
905{
906 int trim_caps = session->s_nr_caps - max_caps;
907
908 dout("trim_caps mds%d start: %d / %d, trim %d\n",
909 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
910 if (trim_caps > 0) {
911 session->s_trim_caps = trim_caps;
912 iterate_session_caps(session, trim_caps_cb, session);
913 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
914 session->s_mds, session->s_nr_caps, max_caps,
915 trim_caps - session->s_trim_caps);
916 }
917 return 0;
918}
919
920/*
921 * Allocate cap_release messages. If there is a partially full message
922 * in the queue, try to allocate enough to cover it's remainder, so that
923 * we can send it immediately.
924 *
925 * Called under s_mutex.
926 */
927static int add_cap_releases(struct ceph_mds_client *mdsc,
928 struct ceph_mds_session *session,
929 int extra)
930{
931 struct ceph_msg *msg;
932 struct ceph_mds_cap_release *head;
933 int err = -ENOMEM;
934
935 if (extra < 0)
6b805185 936 extra = mdsc->client->mount_args->cap_release_safety;
2f2dc053
SW
937
938 spin_lock(&session->s_cap_lock);
939
940 if (!list_empty(&session->s_cap_releases)) {
941 msg = list_first_entry(&session->s_cap_releases,
942 struct ceph_msg,
943 list_head);
944 head = msg->front.iov_base;
945 extra += CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
946 }
947
948 while (session->s_num_cap_releases < session->s_nr_caps + extra) {
949 spin_unlock(&session->s_cap_lock);
950 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
951 0, 0, NULL);
952 if (!msg)
953 goto out_unlocked;
954 dout("add_cap_releases %p msg %p now %d\n", session, msg,
955 (int)msg->front.iov_len);
956 head = msg->front.iov_base;
957 head->num = cpu_to_le32(0);
958 msg->front.iov_len = sizeof(*head);
959 spin_lock(&session->s_cap_lock);
960 list_add(&msg->list_head, &session->s_cap_releases);
961 session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
962 }
963
964 if (!list_empty(&session->s_cap_releases)) {
965 msg = list_first_entry(&session->s_cap_releases,
966 struct ceph_msg,
967 list_head);
968 head = msg->front.iov_base;
969 if (head->num) {
970 dout(" queueing non-full %p (%d)\n", msg,
971 le32_to_cpu(head->num));
972 list_move_tail(&msg->list_head,
973 &session->s_cap_releases_done);
974 session->s_num_cap_releases -=
975 CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
976 }
977 }
978 err = 0;
979 spin_unlock(&session->s_cap_lock);
980out_unlocked:
981 return err;
982}
983
984/*
985 * flush all dirty inode data to disk.
986 *
987 * returns true if we've flushed through want_flush_seq
988 */
989static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
990{
991 int mds, ret = 1;
992
993 dout("check_cap_flush want %lld\n", want_flush_seq);
994 mutex_lock(&mdsc->mutex);
995 for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
996 struct ceph_mds_session *session = mdsc->sessions[mds];
997
998 if (!session)
999 continue;
1000 get_session(session);
1001 mutex_unlock(&mdsc->mutex);
1002
1003 mutex_lock(&session->s_mutex);
1004 if (!list_empty(&session->s_cap_flushing)) {
1005 struct ceph_inode_info *ci =
1006 list_entry(session->s_cap_flushing.next,
1007 struct ceph_inode_info,
1008 i_flushing_item);
1009 struct inode *inode = &ci->vfs_inode;
1010
1011 spin_lock(&inode->i_lock);
1012 if (ci->i_cap_flush_seq <= want_flush_seq) {
1013 dout("check_cap_flush still flushing %p "
1014 "seq %lld <= %lld to mds%d\n", inode,
1015 ci->i_cap_flush_seq, want_flush_seq,
1016 session->s_mds);
1017 ret = 0;
1018 }
1019 spin_unlock(&inode->i_lock);
1020 }
1021 mutex_unlock(&session->s_mutex);
1022 ceph_put_mds_session(session);
1023
1024 if (!ret)
1025 return ret;
1026 mutex_lock(&mdsc->mutex);
1027 }
1028
1029 mutex_unlock(&mdsc->mutex);
1030 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
1031 return ret;
1032}
1033
1034/*
1035 * called under s_mutex
1036 */
1037static void send_cap_releases(struct ceph_mds_client *mdsc,
1038 struct ceph_mds_session *session)
1039{
1040 struct ceph_msg *msg;
1041
1042 dout("send_cap_releases mds%d\n", session->s_mds);
1043 while (1) {
1044 spin_lock(&session->s_cap_lock);
1045 if (list_empty(&session->s_cap_releases_done))
1046 break;
1047 msg = list_first_entry(&session->s_cap_releases_done,
1048 struct ceph_msg, list_head);
1049 list_del_init(&msg->list_head);
1050 spin_unlock(&session->s_cap_lock);
1051 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1052 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1053 ceph_con_send(&session->s_con, msg);
1054 }
1055 spin_unlock(&session->s_cap_lock);
1056}
1057
1058/*
1059 * requests
1060 */
1061
1062/*
1063 * Create an mds request.
1064 */
1065struct ceph_mds_request *
1066ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1067{
1068 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1069
1070 if (!req)
1071 return ERR_PTR(-ENOMEM);
1072
1073 req->r_started = jiffies;
1074 req->r_resend_mds = -1;
1075 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1076 req->r_fmode = -1;
1077 atomic_set(&req->r_ref, 1); /* one for request_tree, one for caller */
1078 INIT_LIST_HEAD(&req->r_wait);
1079 init_completion(&req->r_completion);
1080 init_completion(&req->r_safe_completion);
1081 INIT_LIST_HEAD(&req->r_unsafe_item);
1082
1083 req->r_op = op;
1084 req->r_direct_mode = mode;
1085 return req;
1086}
1087
1088/*
1089 * return oldest (lowest) tid in request tree, 0 if none.
1090 *
1091 * called under mdsc->mutex.
1092 */
1093static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1094{
1095 struct ceph_mds_request *first;
1096 if (radix_tree_gang_lookup(&mdsc->request_tree,
1097 (void **)&first, 0, 1) <= 0)
1098 return 0;
1099 return first->r_tid;
1100}
1101
1102/*
1103 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1104 * on build_path_from_dentry in fs/cifs/dir.c.
1105 *
1106 * If @stop_on_nosnap, generate path relative to the first non-snapped
1107 * inode.
1108 *
1109 * Encode hidden .snap dirs as a double /, i.e.
1110 * foo/.snap/bar -> foo//bar
1111 */
1112char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1113 int stop_on_nosnap)
1114{
1115 struct dentry *temp;
1116 char *path;
1117 int len, pos;
1118
1119 if (dentry == NULL)
1120 return ERR_PTR(-EINVAL);
1121
1122retry:
1123 len = 0;
1124 for (temp = dentry; !IS_ROOT(temp);) {
1125 struct inode *inode = temp->d_inode;
1126 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1127 len++; /* slash only */
1128 else if (stop_on_nosnap && inode &&
1129 ceph_snap(inode) == CEPH_NOSNAP)
1130 break;
1131 else
1132 len += 1 + temp->d_name.len;
1133 temp = temp->d_parent;
1134 if (temp == NULL) {
1135 pr_err("build_path_dentry corrupt dentry %p\n", dentry);
1136 return ERR_PTR(-EINVAL);
1137 }
1138 }
1139 if (len)
1140 len--; /* no leading '/' */
1141
1142 path = kmalloc(len+1, GFP_NOFS);
1143 if (path == NULL)
1144 return ERR_PTR(-ENOMEM);
1145 pos = len;
1146 path[pos] = 0; /* trailing null */
1147 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1148 struct inode *inode = temp->d_inode;
1149
1150 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1151 dout("build_path_dentry path+%d: %p SNAPDIR\n",
1152 pos, temp);
1153 } else if (stop_on_nosnap && inode &&
1154 ceph_snap(inode) == CEPH_NOSNAP) {
1155 break;
1156 } else {
1157 pos -= temp->d_name.len;
1158 if (pos < 0)
1159 break;
1160 strncpy(path + pos, temp->d_name.name,
1161 temp->d_name.len);
1162 dout("build_path_dentry path+%d: %p '%.*s'\n",
1163 pos, temp, temp->d_name.len, path + pos);
1164 }
1165 if (pos)
1166 path[--pos] = '/';
1167 temp = temp->d_parent;
1168 if (temp == NULL) {
1169 pr_err("build_path_dentry corrupt dentry\n");
1170 kfree(path);
1171 return ERR_PTR(-EINVAL);
1172 }
1173 }
1174 if (pos != 0) {
1175 pr_err("build_path_dentry did not end path lookup where "
1176 "expected, namelen is %d, pos is %d\n", len, pos);
1177 /* presumably this is only possible if racing with a
1178 rename of one of the parent directories (we can not
1179 lock the dentries above us to prevent this, but
1180 retrying should be harmless) */
1181 kfree(path);
1182 goto retry;
1183 }
1184
1185 *base = ceph_ino(temp->d_inode);
1186 *plen = len;
1187 dout("build_path_dentry on %p %d built %llx '%.*s'\n",
1188 dentry, atomic_read(&dentry->d_count), *base, len, path);
1189 return path;
1190}
1191
1192static int build_dentry_path(struct dentry *dentry,
1193 const char **ppath, int *ppathlen, u64 *pino,
1194 int *pfreepath)
1195{
1196 char *path;
1197
1198 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
1199 *pino = ceph_ino(dentry->d_parent->d_inode);
1200 *ppath = dentry->d_name.name;
1201 *ppathlen = dentry->d_name.len;
1202 return 0;
1203 }
1204 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1205 if (IS_ERR(path))
1206 return PTR_ERR(path);
1207 *ppath = path;
1208 *pfreepath = 1;
1209 return 0;
1210}
1211
1212static int build_inode_path(struct inode *inode,
1213 const char **ppath, int *ppathlen, u64 *pino,
1214 int *pfreepath)
1215{
1216 struct dentry *dentry;
1217 char *path;
1218
1219 if (ceph_snap(inode) == CEPH_NOSNAP) {
1220 *pino = ceph_ino(inode);
1221 *ppathlen = 0;
1222 return 0;
1223 }
1224 dentry = d_find_alias(inode);
1225 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1226 dput(dentry);
1227 if (IS_ERR(path))
1228 return PTR_ERR(path);
1229 *ppath = path;
1230 *pfreepath = 1;
1231 return 0;
1232}
1233
1234/*
1235 * request arguments may be specified via an inode *, a dentry *, or
1236 * an explicit ino+path.
1237 */
1238static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1239 const char *rpath, u64 rino,
1240 const char **ppath, int *pathlen,
1241 u64 *ino, int *freepath)
1242{
1243 int r = 0;
1244
1245 if (rinode) {
1246 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1247 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1248 ceph_snap(rinode));
1249 } else if (rdentry) {
1250 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1251 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1252 *ppath);
1253 } else if (rpath) {
1254 *ino = rino;
1255 *ppath = rpath;
1256 *pathlen = strlen(rpath);
1257 dout(" path %.*s\n", *pathlen, rpath);
1258 }
1259
1260 return r;
1261}
1262
1263/*
1264 * called under mdsc->mutex
1265 */
1266static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1267 struct ceph_mds_request *req,
1268 int mds)
1269{
1270 struct ceph_msg *msg;
1271 struct ceph_mds_request_head *head;
1272 const char *path1 = NULL;
1273 const char *path2 = NULL;
1274 u64 ino1 = 0, ino2 = 0;
1275 int pathlen1 = 0, pathlen2 = 0;
1276 int freepath1 = 0, freepath2 = 0;
1277 int len;
1278 u16 releases;
1279 void *p, *end;
1280 int ret;
1281
1282 ret = set_request_path_attr(req->r_inode, req->r_dentry,
1283 req->r_path1, req->r_ino1.ino,
1284 &path1, &pathlen1, &ino1, &freepath1);
1285 if (ret < 0) {
1286 msg = ERR_PTR(ret);
1287 goto out;
1288 }
1289
1290 ret = set_request_path_attr(NULL, req->r_old_dentry,
1291 req->r_path2, req->r_ino2.ino,
1292 &path2, &pathlen2, &ino2, &freepath2);
1293 if (ret < 0) {
1294 msg = ERR_PTR(ret);
1295 goto out_free1;
1296 }
1297
1298 len = sizeof(*head) +
1299 pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64));
1300
1301 /* calculate (max) length for cap releases */
1302 len += sizeof(struct ceph_mds_request_release) *
1303 (!!req->r_inode_drop + !!req->r_dentry_drop +
1304 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1305 if (req->r_dentry_drop)
1306 len += req->r_dentry->d_name.len;
1307 if (req->r_old_dentry_drop)
1308 len += req->r_old_dentry->d_name.len;
1309
1310 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
1311 if (IS_ERR(msg))
1312 goto out_free2;
1313
1314 head = msg->front.iov_base;
1315 p = msg->front.iov_base + sizeof(*head);
1316 end = msg->front.iov_base + msg->front.iov_len;
1317
1318 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1319 head->op = cpu_to_le32(req->r_op);
1320 head->caller_uid = cpu_to_le32(current_fsuid());
1321 head->caller_gid = cpu_to_le32(current_fsgid());
1322 head->args = req->r_args;
1323
1324 ceph_encode_filepath(&p, end, ino1, path1);
1325 ceph_encode_filepath(&p, end, ino2, path2);
1326
1327 /* cap releases */
1328 releases = 0;
1329 if (req->r_inode_drop)
1330 releases += ceph_encode_inode_release(&p,
1331 req->r_inode ? req->r_inode : req->r_dentry->d_inode,
1332 mds, req->r_inode_drop, req->r_inode_unless, 0);
1333 if (req->r_dentry_drop)
1334 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1335 mds, req->r_dentry_drop, req->r_dentry_unless);
1336 if (req->r_old_dentry_drop)
1337 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1338 mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1339 if (req->r_old_inode_drop)
1340 releases += ceph_encode_inode_release(&p,
1341 req->r_old_dentry->d_inode,
1342 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1343 head->num_releases = cpu_to_le16(releases);
1344
1345 BUG_ON(p > end);
1346 msg->front.iov_len = p - msg->front.iov_base;
1347 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1348
1349 msg->pages = req->r_pages;
1350 msg->nr_pages = req->r_num_pages;
1351 msg->hdr.data_len = cpu_to_le32(req->r_data_len);
1352 msg->hdr.data_off = cpu_to_le16(0);
1353
1354out_free2:
1355 if (freepath2)
1356 kfree((char *)path2);
1357out_free1:
1358 if (freepath1)
1359 kfree((char *)path1);
1360out:
1361 return msg;
1362}
1363
1364/*
1365 * called under mdsc->mutex if error, under no mutex if
1366 * success.
1367 */
1368static void complete_request(struct ceph_mds_client *mdsc,
1369 struct ceph_mds_request *req)
1370{
1371 if (req->r_callback)
1372 req->r_callback(mdsc, req);
1373 else
1374 complete(&req->r_completion);
1375}
1376
1377/*
1378 * called under mdsc->mutex
1379 */
1380static int __prepare_send_request(struct ceph_mds_client *mdsc,
1381 struct ceph_mds_request *req,
1382 int mds)
1383{
1384 struct ceph_mds_request_head *rhead;
1385 struct ceph_msg *msg;
1386 int flags = 0;
1387
1388 req->r_mds = mds;
1389 req->r_attempts++;
1390 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
1391 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
1392
1393 if (req->r_request) {
1394 ceph_msg_put(req->r_request);
1395 req->r_request = NULL;
1396 }
1397 msg = create_request_message(mdsc, req, mds);
1398 if (IS_ERR(msg)) {
1399 req->r_reply = ERR_PTR(PTR_ERR(msg));
1400 complete_request(mdsc, req);
1401 return -PTR_ERR(msg);
1402 }
1403 req->r_request = msg;
1404
1405 rhead = msg->front.iov_base;
1406 rhead->tid = cpu_to_le64(req->r_tid);
1407 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
1408 if (req->r_got_unsafe)
1409 flags |= CEPH_MDS_FLAG_REPLAY;
1410 if (req->r_locked_dir)
1411 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
1412 rhead->flags = cpu_to_le32(flags);
1413 rhead->num_fwd = req->r_num_fwd;
1414 rhead->num_retry = req->r_attempts - 1;
1415
1416 dout(" r_locked_dir = %p\n", req->r_locked_dir);
1417
1418 if (req->r_target_inode && req->r_got_unsafe)
1419 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
1420 else
1421 rhead->ino = 0;
1422 return 0;
1423}
1424
1425/*
1426 * send request, or put it on the appropriate wait list.
1427 */
1428static int __do_request(struct ceph_mds_client *mdsc,
1429 struct ceph_mds_request *req)
1430{
1431 struct ceph_mds_session *session = NULL;
1432 int mds = -1;
1433 int err = -EAGAIN;
1434
1435 if (req->r_reply)
1436 goto out;
1437
1438 if (req->r_timeout &&
1439 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
1440 dout("do_request timed out\n");
1441 err = -EIO;
1442 goto finish;
1443 }
1444
1445 mds = __choose_mds(mdsc, req);
1446 if (mds < 0 ||
1447 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
1448 dout("do_request no mds or not active, waiting for map\n");
1449 list_add(&req->r_wait, &mdsc->waiting_for_map);
1450 goto out;
1451 }
1452
1453 /* get, open session */
1454 session = __ceph_lookup_mds_session(mdsc, mds);
1455 if (!session)
1456 session = register_session(mdsc, mds);
1457 dout("do_request mds%d session %p state %s\n", mds, session,
1458 session_state_name(session->s_state));
1459 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
1460 session->s_state != CEPH_MDS_SESSION_HUNG) {
1461 if (session->s_state == CEPH_MDS_SESSION_NEW ||
1462 session->s_state == CEPH_MDS_SESSION_CLOSING)
1463 __open_session(mdsc, session);
1464 list_add(&req->r_wait, &session->s_waiting);
1465 goto out_session;
1466 }
1467
1468 /* send request */
1469 req->r_session = get_session(session);
1470 req->r_resend_mds = -1; /* forget any previous mds hint */
1471
1472 if (req->r_request_started == 0) /* note request start time */
1473 req->r_request_started = jiffies;
1474
1475 err = __prepare_send_request(mdsc, req, mds);
1476 if (!err) {
1477 ceph_msg_get(req->r_request);
1478 ceph_con_send(&session->s_con, req->r_request);
1479 }
1480
1481out_session:
1482 ceph_put_mds_session(session);
1483out:
1484 return err;
1485
1486finish:
1487 req->r_reply = ERR_PTR(err);
1488 complete_request(mdsc, req);
1489 goto out;
1490}
1491
1492/*
1493 * called under mdsc->mutex
1494 */
1495static void __wake_requests(struct ceph_mds_client *mdsc,
1496 struct list_head *head)
1497{
1498 struct ceph_mds_request *req, *nreq;
1499
1500 list_for_each_entry_safe(req, nreq, head, r_wait) {
1501 list_del_init(&req->r_wait);
1502 __do_request(mdsc, req);
1503 }
1504}
1505
1506/*
1507 * Wake up threads with requests pending for @mds, so that they can
1508 * resubmit their requests to a possibly different mds. If @all is set,
1509 * wake up if their requests has been forwarded to @mds, too.
1510 */
1511static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
1512{
1513 struct ceph_mds_request *reqs[10];
1514 u64 nexttid = 0;
1515 int i, got;
1516
1517 dout("kick_requests mds%d\n", mds);
1518 while (nexttid <= mdsc->last_tid) {
1519 got = radix_tree_gang_lookup(&mdsc->request_tree,
1520 (void **)&reqs, nexttid, 10);
1521 if (got == 0)
1522 break;
1523 nexttid = reqs[got-1]->r_tid + 1;
1524 for (i = 0; i < got; i++) {
1525 if (reqs[i]->r_got_unsafe)
1526 continue;
1527 if (reqs[i]->r_session &&
1528 reqs[i]->r_session->s_mds == mds) {
1529 dout(" kicking tid %llu\n", reqs[i]->r_tid);
1530 put_request_session(reqs[i]);
1531 __do_request(mdsc, reqs[i]);
1532 }
1533 }
1534 }
1535}
1536
1537void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
1538 struct ceph_mds_request *req)
1539{
1540 dout("submit_request on %p\n", req);
1541 mutex_lock(&mdsc->mutex);
1542 __register_request(mdsc, req, NULL);
1543 __do_request(mdsc, req);
1544 mutex_unlock(&mdsc->mutex);
1545}
1546
1547/*
1548 * Synchrously perform an mds request. Take care of all of the
1549 * session setup, forwarding, retry details.
1550 */
1551int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
1552 struct inode *dir,
1553 struct ceph_mds_request *req)
1554{
1555 int err;
1556
1557 dout("do_request on %p\n", req);
1558
1559 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
1560 if (req->r_inode)
1561 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
1562 if (req->r_locked_dir)
1563 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
1564 if (req->r_old_dentry)
1565 ceph_get_cap_refs(
1566 ceph_inode(req->r_old_dentry->d_parent->d_inode),
1567 CEPH_CAP_PIN);
1568
1569 /* issue */
1570 mutex_lock(&mdsc->mutex);
1571 __register_request(mdsc, req, dir);
1572 __do_request(mdsc, req);
1573
1574 /* wait */
1575 if (!req->r_reply) {
1576 mutex_unlock(&mdsc->mutex);
1577 if (req->r_timeout) {
1578 err = wait_for_completion_timeout(&req->r_completion,
1579 req->r_timeout);
1580 if (err > 0)
1581 err = 0;
1582 else if (err == 0)
1583 req->r_reply = ERR_PTR(-EIO);
1584 } else {
1585 wait_for_completion(&req->r_completion);
1586 }
1587 mutex_lock(&mdsc->mutex);
1588 }
1589
1590 if (IS_ERR(req->r_reply)) {
1591 err = PTR_ERR(req->r_reply);
1592 req->r_reply = NULL;
1593
1594 /* clean up */
1595 __unregister_request(mdsc, req);
1596 if (!list_empty(&req->r_unsafe_item))
1597 list_del_init(&req->r_unsafe_item);
1598 complete(&req->r_safe_completion);
1599 } else if (req->r_err) {
1600 err = req->r_err;
1601 } else {
1602 err = le32_to_cpu(req->r_reply_info.head->result);
1603 }
1604 mutex_unlock(&mdsc->mutex);
1605
1606 dout("do_request %p done, result %d\n", req, err);
1607 return err;
1608}
1609
1610/*
1611 * Handle mds reply.
1612 *
1613 * We take the session mutex and parse and process the reply immediately.
1614 * This preserves the logical ordering of replies, capabilities, etc., sent
1615 * by the MDS as they are applied to our local cache.
1616 */
1617static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1618{
1619 struct ceph_mds_client *mdsc = session->s_mdsc;
1620 struct ceph_mds_request *req;
1621 struct ceph_mds_reply_head *head = msg->front.iov_base;
1622 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
1623 u64 tid;
1624 int err, result;
1625 int mds;
1626
1627 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
1628 return;
1629 if (msg->front.iov_len < sizeof(*head)) {
1630 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
1631 return;
1632 }
1633
1634 /* get request, session */
1635 tid = le64_to_cpu(head->tid);
1636 mutex_lock(&mdsc->mutex);
1637 req = __lookup_request(mdsc, tid);
1638 if (!req) {
1639 dout("handle_reply on unknown tid %llu\n", tid);
1640 mutex_unlock(&mdsc->mutex);
1641 return;
1642 }
1643 dout("handle_reply %p\n", req);
1644 mds = le64_to_cpu(msg->hdr.src.name.num);
1645
1646 /* correct session? */
1647 if (!req->r_session && req->r_session != session) {
1648 pr_err("mdsc_handle_reply got %llu on session mds%d"
1649 " not mds%d\n", tid, session->s_mds,
1650 req->r_session ? req->r_session->s_mds : -1);
1651 mutex_unlock(&mdsc->mutex);
1652 goto out;
1653 }
1654
1655 /* dup? */
1656 if ((req->r_got_unsafe && !head->safe) ||
1657 (req->r_got_safe && head->safe)) {
1658 pr_warning("got a dup %s reply on %llu from mds%d\n",
1659 head->safe ? "safe" : "unsafe", tid, mds);
1660 mutex_unlock(&mdsc->mutex);
1661 goto out;
1662 }
1663
1664 result = le32_to_cpu(head->result);
1665
1666 /*
1667 * Tolerate 2 consecutive ESTALEs from the same mds.
1668 * FIXME: we should be looking at the cap migrate_seq.
1669 */
1670 if (result == -ESTALE) {
1671 req->r_direct_mode = USE_AUTH_MDS;
1672 req->r_num_stale++;
1673 if (req->r_num_stale <= 2) {
1674 __do_request(mdsc, req);
1675 mutex_unlock(&mdsc->mutex);
1676 goto out;
1677 }
1678 } else {
1679 req->r_num_stale = 0;
1680 }
1681
1682 if (head->safe) {
1683 req->r_got_safe = true;
1684 __unregister_request(mdsc, req);
1685 complete(&req->r_safe_completion);
1686
1687 if (req->r_got_unsafe) {
1688 /*
1689 * We already handled the unsafe response, now do the
1690 * cleanup. No need to examine the response; the MDS
1691 * doesn't include any result info in the safe
1692 * response. And even if it did, there is nothing
1693 * useful we could do with a revised return value.
1694 */
1695 dout("got safe reply %llu, mds%d\n", tid, mds);
1696 list_del_init(&req->r_unsafe_item);
1697
1698 /* last unsafe request during umount? */
1699 if (mdsc->stopping && !__get_oldest_tid(mdsc))
1700 complete(&mdsc->safe_umount_waiters);
1701 mutex_unlock(&mdsc->mutex);
1702 goto out;
1703 }
1704 }
1705
1706 BUG_ON(req->r_reply);
1707
1708 if (!head->safe) {
1709 req->r_got_unsafe = true;
1710 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
1711 }
1712
1713 dout("handle_reply tid %lld result %d\n", tid, result);
1714 rinfo = &req->r_reply_info;
1715 err = parse_reply_info(msg, rinfo);
1716 mutex_unlock(&mdsc->mutex);
1717
1718 mutex_lock(&session->s_mutex);
1719 if (err < 0) {
1720 pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds);
1721 goto out_err;
1722 }
1723
1724 /* snap trace */
1725 if (rinfo->snapblob_len) {
1726 down_write(&mdsc->snap_rwsem);
1727 ceph_update_snap_trace(mdsc, rinfo->snapblob,
1728 rinfo->snapblob + rinfo->snapblob_len,
1729 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
1730 downgrade_write(&mdsc->snap_rwsem);
1731 } else {
1732 down_read(&mdsc->snap_rwsem);
1733 }
1734
1735 /* insert trace into our cache */
1736 err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
1737 if (err == 0) {
1738 if (result == 0 && rinfo->dir_nr)
1739 ceph_readdir_prepopulate(req, req->r_session);
1740 ceph_unreserve_caps(&req->r_caps_reservation);
1741 }
1742
1743 up_read(&mdsc->snap_rwsem);
1744out_err:
1745 if (err) {
1746 req->r_err = err;
1747 } else {
1748 req->r_reply = msg;
1749 ceph_msg_get(msg);
1750 }
1751
1752 add_cap_releases(mdsc, req->r_session, -1);
1753 mutex_unlock(&session->s_mutex);
1754
1755 /* kick calling process */
1756 complete_request(mdsc, req);
1757out:
1758 ceph_mdsc_put_request(req);
1759 return;
1760}
1761
1762
1763
1764/*
1765 * handle mds notification that our request has been forwarded.
1766 */
1767static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
1768{
1769 struct ceph_mds_request *req;
1770 u64 tid;
1771 u32 next_mds;
1772 u32 fwd_seq;
1773 u8 must_resend;
1774 int err = -EINVAL;
1775 void *p = msg->front.iov_base;
1776 void *end = p + msg->front.iov_len;
1777 int from_mds, state;
1778
1779 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
1780 goto bad;
1781 from_mds = le64_to_cpu(msg->hdr.src.name.num);
1782
1783 ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
c89136ea
SW
1784 tid = ceph_decode_64(&p);
1785 next_mds = ceph_decode_32(&p);
1786 fwd_seq = ceph_decode_32(&p);
1787 must_resend = ceph_decode_8(&p);
2f2dc053
SW
1788
1789 WARN_ON(must_resend); /* shouldn't happen. */
1790
1791 mutex_lock(&mdsc->mutex);
1792 req = __lookup_request(mdsc, tid);
1793 if (!req) {
1794 dout("forward %llu dne\n", tid);
1795 goto out; /* dup reply? */
1796 }
1797
1798 state = mdsc->sessions[next_mds]->s_state;
1799 if (fwd_seq <= req->r_num_fwd) {
1800 dout("forward %llu to mds%d - old seq %d <= %d\n",
1801 tid, next_mds, req->r_num_fwd, fwd_seq);
1802 } else {
1803 /* resend. forward race not possible; mds would drop */
1804 dout("forward %llu to mds%d (we resend)\n", tid, next_mds);
1805 req->r_num_fwd = fwd_seq;
1806 req->r_resend_mds = next_mds;
1807 put_request_session(req);
1808 __do_request(mdsc, req);
1809 }
1810 ceph_mdsc_put_request(req);
1811out:
1812 mutex_unlock(&mdsc->mutex);
1813 return;
1814
1815bad:
1816 pr_err("mdsc_handle_forward decode error err=%d\n", err);
1817}
1818
1819/*
1820 * handle a mds session control message
1821 */
1822static void handle_session(struct ceph_mds_session *session,
1823 struct ceph_msg *msg)
1824{
1825 struct ceph_mds_client *mdsc = session->s_mdsc;
1826 u32 op;
1827 u64 seq;
1828 int mds;
1829 struct ceph_mds_session_head *h = msg->front.iov_base;
1830 int wake = 0;
1831
1832 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
1833 return;
1834 mds = le64_to_cpu(msg->hdr.src.name.num);
1835
1836 /* decode */
1837 if (msg->front.iov_len != sizeof(*h))
1838 goto bad;
1839 op = le32_to_cpu(h->op);
1840 seq = le64_to_cpu(h->seq);
1841
1842 mutex_lock(&mdsc->mutex);
1843 /* FIXME: this ttl calculation is generous */
1844 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
1845 mutex_unlock(&mdsc->mutex);
1846
1847 mutex_lock(&session->s_mutex);
1848
1849 dout("handle_session mds%d %s %p state %s seq %llu\n",
1850 mds, ceph_session_op_name(op), session,
1851 session_state_name(session->s_state), seq);
1852
1853 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
1854 session->s_state = CEPH_MDS_SESSION_OPEN;
1855 pr_info("mds%d came back\n", session->s_mds);
1856 }
1857
1858 switch (op) {
1859 case CEPH_SESSION_OPEN:
1860 session->s_state = CEPH_MDS_SESSION_OPEN;
1861 renewed_caps(mdsc, session, 0);
1862 wake = 1;
1863 if (mdsc->stopping)
1864 __close_session(mdsc, session);
1865 break;
1866
1867 case CEPH_SESSION_RENEWCAPS:
1868 if (session->s_renew_seq == seq)
1869 renewed_caps(mdsc, session, 1);
1870 break;
1871
1872 case CEPH_SESSION_CLOSE:
1873 unregister_session(mdsc, mds);
1874 remove_session_caps(session);
1875 wake = 1; /* for good measure */
1876 complete(&mdsc->session_close_waiters);
1877 kick_requests(mdsc, mds, 0); /* cur only */
1878 break;
1879
1880 case CEPH_SESSION_STALE:
1881 pr_info("mds%d caps went stale, renewing\n",
1882 session->s_mds);
1883 spin_lock(&session->s_cap_lock);
1884 session->s_cap_gen++;
1885 session->s_cap_ttl = 0;
1886 spin_unlock(&session->s_cap_lock);
1887 send_renew_caps(mdsc, session);
1888 break;
1889
1890 case CEPH_SESSION_RECALL_STATE:
1891 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
1892 break;
1893
1894 default:
1895 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
1896 WARN_ON(1);
1897 }
1898
1899 mutex_unlock(&session->s_mutex);
1900 if (wake) {
1901 mutex_lock(&mdsc->mutex);
1902 __wake_requests(mdsc, &session->s_waiting);
1903 mutex_unlock(&mdsc->mutex);
1904 }
1905 return;
1906
1907bad:
1908 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
1909 (int)msg->front.iov_len);
1910 return;
1911}
1912
1913
1914/*
1915 * called under session->mutex.
1916 */
1917static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
1918 struct ceph_mds_session *session)
1919{
1920 struct ceph_mds_request *req, *nreq;
1921 int err;
1922
1923 dout("replay_unsafe_requests mds%d\n", session->s_mds);
1924
1925 mutex_lock(&mdsc->mutex);
1926 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
1927 err = __prepare_send_request(mdsc, req, session->s_mds);
1928 if (!err) {
1929 ceph_msg_get(req->r_request);
1930 ceph_con_send(&session->s_con, req->r_request);
1931 }
1932 }
1933 mutex_unlock(&mdsc->mutex);
1934}
1935
1936/*
1937 * Encode information about a cap for a reconnect with the MDS.
1938 */
1939struct encode_caps_data {
1940 void **pp;
1941 void *end;
1942 int *num_caps;
1943};
1944
1945static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
1946 void *arg)
1947{
1948 struct ceph_mds_cap_reconnect *rec;
1949 struct ceph_inode_info *ci;
1950 struct encode_caps_data *data = (struct encode_caps_data *)arg;
1951 void *p = *(data->pp);
1952 void *end = data->end;
1953 char *path;
1954 int pathlen, err;
1955 u64 pathbase;
1956 struct dentry *dentry;
1957
1958 ci = cap->ci;
1959
1960 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
1961 inode, ceph_vinop(inode), cap, cap->cap_id,
1962 ceph_cap_string(cap->issued));
1963 ceph_decode_need(&p, end, sizeof(u64), needmore);
1964 ceph_encode_64(&p, ceph_ino(inode));
1965
1966 dentry = d_find_alias(inode);
1967 if (dentry) {
1968 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
1969 if (IS_ERR(path)) {
1970 err = PTR_ERR(path);
1971 BUG_ON(err);
1972 }
1973 } else {
1974 path = NULL;
1975 pathlen = 0;
1976 }
1977 ceph_decode_need(&p, end, pathlen+4, needmore);
1978 ceph_encode_string(&p, end, path, pathlen);
1979
1980 ceph_decode_need(&p, end, sizeof(*rec), needmore);
1981 rec = p;
1982 p += sizeof(*rec);
1983 BUG_ON(p > end);
1984 spin_lock(&inode->i_lock);
1985 cap->seq = 0; /* reset cap seq */
1986 cap->issue_seq = 0; /* and issue_seq */
1987 rec->cap_id = cpu_to_le64(cap->cap_id);
1988 rec->pathbase = cpu_to_le64(pathbase);
1989 rec->wanted = cpu_to_le32(__ceph_caps_wanted(ci));
1990 rec->issued = cpu_to_le32(cap->issued);
1991 rec->size = cpu_to_le64(inode->i_size);
1992 ceph_encode_timespec(&rec->mtime, &inode->i_mtime);
1993 ceph_encode_timespec(&rec->atime, &inode->i_atime);
1994 rec->snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
1995 spin_unlock(&inode->i_lock);
1996
1997 kfree(path);
1998 dput(dentry);
1999 (*data->num_caps)++;
2000 *(data->pp) = p;
2001 return 0;
2002needmore:
2003 return -ENOSPC;
2004}
2005
2006
2007/*
2008 * If an MDS fails and recovers, clients need to reconnect in order to
2009 * reestablish shared state. This includes all caps issued through
2010 * this session _and_ the snap_realm hierarchy. Because it's not
2011 * clear which snap realms the mds cares about, we send everything we
2012 * know about.. that ensures we'll then get any new info the
2013 * recovering MDS might have.
2014 *
2015 * This is a relatively heavyweight operation, but it's rare.
2016 *
2017 * called with mdsc->mutex held.
2018 */
2019static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
2020{
2021 struct ceph_mds_session *session;
2022 struct ceph_msg *reply;
2023 int newlen, len = 4 + 1;
2024 void *p, *end;
2025 int err;
2026 int num_caps, num_realms = 0;
2027 int got;
2028 u64 next_snap_ino = 0;
2029 __le32 *pnum_caps, *pnum_realms;
2030 struct encode_caps_data iter_args;
2031
2032 pr_info("reconnect to recovering mds%d\n", mds);
2033
2034 /* find session */
2035 session = __ceph_lookup_mds_session(mdsc, mds);
2036 mutex_unlock(&mdsc->mutex); /* drop lock for duration */
2037
2038 if (session) {
2039 mutex_lock(&session->s_mutex);
2040
2041 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2042 session->s_seq = 0;
2043
2044 ceph_con_open(&session->s_con,
2045 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2046
2047 /* replay unsafe requests */
2048 replay_unsafe_requests(mdsc, session);
2049
2050 /* estimate needed space */
2051 len += session->s_nr_caps *
2052 (100+sizeof(struct ceph_mds_cap_reconnect));
2053 pr_info("estimating i need %d bytes for %d caps\n",
2054 len, session->s_nr_caps);
2055 } else {
2056 dout("no session for mds%d, will send short reconnect\n",
2057 mds);
2058 }
2059
2060 down_read(&mdsc->snap_rwsem);
2061
2062retry:
2063 /* build reply */
2064 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, len, 0, 0, NULL);
2065 if (IS_ERR(reply)) {
2066 err = PTR_ERR(reply);
2067 pr_err("send_mds_reconnect ENOMEM on %d for mds%d\n",
2068 len, mds);
2069 goto out;
2070 }
2071 p = reply->front.iov_base;
2072 end = p + len;
2073
2074 if (!session) {
2075 ceph_encode_8(&p, 1); /* session was closed */
2076 ceph_encode_32(&p, 0);
2077 goto send;
2078 }
2079 dout("session %p state %s\n", session,
2080 session_state_name(session->s_state));
2081
2082 /* traverse this session's caps */
2083 ceph_encode_8(&p, 0);
2084 pnum_caps = p;
2085 ceph_encode_32(&p, session->s_nr_caps);
2086 num_caps = 0;
2087
2088 iter_args.pp = &p;
2089 iter_args.end = end;
2090 iter_args.num_caps = &num_caps;
2091 err = iterate_session_caps(session, encode_caps_cb, &iter_args);
2092 if (err == -ENOSPC)
2093 goto needmore;
2094 if (err < 0)
2095 goto out;
2096 *pnum_caps = cpu_to_le32(num_caps);
2097
2098 /*
2099 * snaprealms. we provide mds with the ino, seq (version), and
2100 * parent for all of our realms. If the mds has any newer info,
2101 * it will tell us.
2102 */
2103 next_snap_ino = 0;
2104 /* save some space for the snaprealm count */
2105 pnum_realms = p;
2106 ceph_decode_need(&p, end, sizeof(*pnum_realms), needmore);
2107 p += sizeof(*pnum_realms);
2108 num_realms = 0;
2109 while (1) {
2110 struct ceph_snap_realm *realm;
2111 struct ceph_mds_snaprealm_reconnect *sr_rec;
2112 got = radix_tree_gang_lookup(&mdsc->snap_realms,
2113 (void **)&realm, next_snap_ino, 1);
2114 if (!got)
2115 break;
2116
2117 dout(" adding snap realm %llx seq %lld parent %llx\n",
2118 realm->ino, realm->seq, realm->parent_ino);
2119 ceph_decode_need(&p, end, sizeof(*sr_rec), needmore);
2120 sr_rec = p;
2121 sr_rec->ino = cpu_to_le64(realm->ino);
2122 sr_rec->seq = cpu_to_le64(realm->seq);
2123 sr_rec->parent = cpu_to_le64(realm->parent_ino);
2124 p += sizeof(*sr_rec);
2125 num_realms++;
2126 next_snap_ino = realm->ino + 1;
2127 }
2128 *pnum_realms = cpu_to_le32(num_realms);
2129
2130send:
2131 reply->front.iov_len = p - reply->front.iov_base;
2132 reply->hdr.front_len = cpu_to_le32(reply->front.iov_len);
2133 dout("final len was %u (guessed %d)\n",
2134 (unsigned)reply->front.iov_len, len);
2135 ceph_con_send(&session->s_con, reply);
2136
2137 if (session) {
2138 session->s_state = CEPH_MDS_SESSION_OPEN;
2139 __wake_requests(mdsc, &session->s_waiting);
2140 }
2141
2142out:
2143 up_read(&mdsc->snap_rwsem);
2144 if (session) {
2145 mutex_unlock(&session->s_mutex);
2146 ceph_put_mds_session(session);
2147 }
2148 mutex_lock(&mdsc->mutex);
2149 return;
2150
2151needmore:
2152 /*
2153 * we need a larger buffer. this doesn't very accurately
2154 * factor in snap realms, but it's safe.
2155 */
2156 num_caps += num_realms;
2157 newlen = len * ((100 * (session->s_nr_caps+3)) / (num_caps + 1)) / 100;
2158 pr_info("i guessed %d, and did %d of %d caps, retrying with %d\n",
2159 len, num_caps, session->s_nr_caps, newlen);
2160 len = newlen;
2161 ceph_msg_put(reply);
2162 goto retry;
2163}
2164
2165
2166/*
2167 * compare old and new mdsmaps, kicking requests
2168 * and closing out old connections as necessary
2169 *
2170 * called under mdsc->mutex.
2171 */
2172static void check_new_map(struct ceph_mds_client *mdsc,
2173 struct ceph_mdsmap *newmap,
2174 struct ceph_mdsmap *oldmap)
2175{
2176 int i;
2177 int oldstate, newstate;
2178 struct ceph_mds_session *s;
2179
2180 dout("check_new_map new %u old %u\n",
2181 newmap->m_epoch, oldmap->m_epoch);
2182
2183 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
2184 if (mdsc->sessions[i] == NULL)
2185 continue;
2186 s = mdsc->sessions[i];
2187 oldstate = ceph_mdsmap_get_state(oldmap, i);
2188 newstate = ceph_mdsmap_get_state(newmap, i);
2189
2190 dout("check_new_map mds%d state %s -> %s (session %s)\n",
2191 i, ceph_mds_state_name(oldstate),
2192 ceph_mds_state_name(newstate),
2193 session_state_name(s->s_state));
2194
2195 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
2196 ceph_mdsmap_get_addr(newmap, i),
2197 sizeof(struct ceph_entity_addr))) {
2198 if (s->s_state == CEPH_MDS_SESSION_OPENING) {
2199 /* the session never opened, just close it
2200 * out now */
2201 __wake_requests(mdsc, &s->s_waiting);
2202 unregister_session(mdsc, i);
2203 } else {
2204 /* just close it */
2205 mutex_unlock(&mdsc->mutex);
2206 mutex_lock(&s->s_mutex);
2207 mutex_lock(&mdsc->mutex);
2208 ceph_con_close(&s->s_con);
2209 mutex_unlock(&s->s_mutex);
2210 s->s_state = CEPH_MDS_SESSION_RESTARTING;
2211 }
2212
2213 /* kick any requests waiting on the recovering mds */
2214 kick_requests(mdsc, i, 1);
2215 } else if (oldstate == newstate) {
2216 continue; /* nothing new with this mds */
2217 }
2218
2219 /*
2220 * send reconnect?
2221 */
2222 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
2223 newstate >= CEPH_MDS_STATE_RECONNECT)
2224 send_mds_reconnect(mdsc, i);
2225
2226 /*
2227 * kick requests on any mds that has gone active.
2228 *
2229 * kick requests on cur or forwarder: we may have sent
2230 * the request to mds1, mds1 told us it forwarded it
2231 * to mds2, but then we learn mds1 failed and can't be
2232 * sure it successfully forwarded our request before
2233 * it died.
2234 */
2235 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
2236 newstate >= CEPH_MDS_STATE_ACTIVE) {
2237 kick_requests(mdsc, i, 1);
2238 ceph_kick_flushing_caps(mdsc, s);
2239 }
2240 }
2241}
2242
2243
2244
2245/*
2246 * leases
2247 */
2248
2249/*
2250 * caller must hold session s_mutex, dentry->d_lock
2251 */
2252void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
2253{
2254 struct ceph_dentry_info *di = ceph_dentry(dentry);
2255
2256 ceph_put_mds_session(di->lease_session);
2257 di->lease_session = NULL;
2258}
2259
2260static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
2261{
2262 struct super_block *sb = mdsc->client->sb;
2263 struct inode *inode;
2264 struct ceph_mds_session *session;
2265 struct ceph_inode_info *ci;
2266 struct dentry *parent, *dentry;
2267 struct ceph_dentry_info *di;
2268 int mds;
2269 struct ceph_mds_lease *h = msg->front.iov_base;
2270 struct ceph_vino vino;
2271 int mask;
2272 struct qstr dname;
2273 int release = 0;
2274
2275 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
2276 return;
2277 mds = le64_to_cpu(msg->hdr.src.name.num);
2278 dout("handle_lease from mds%d\n", mds);
2279
2280 /* decode */
2281 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
2282 goto bad;
2283 vino.ino = le64_to_cpu(h->ino);
2284 vino.snap = CEPH_NOSNAP;
2285 mask = le16_to_cpu(h->mask);
2286 dname.name = (void *)h + sizeof(*h) + sizeof(u32);
2287 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
2288 if (dname.len != get_unaligned_le32(h+1))
2289 goto bad;
2290
2291 /* find session */
2292 mutex_lock(&mdsc->mutex);
2293 session = __ceph_lookup_mds_session(mdsc, mds);
2294 mutex_unlock(&mdsc->mutex);
2295 if (!session) {
2296 pr_err("handle_lease got lease but no session mds%d\n", mds);
2297 return;
2298 }
2299
2300 mutex_lock(&session->s_mutex);
2301 session->s_seq++;
2302
2303 /* lookup inode */
2304 inode = ceph_find_inode(sb, vino);
2305 dout("handle_lease '%s', mask %d, ino %llx %p\n",
2306 ceph_lease_op_name(h->action), mask, vino.ino, inode);
2307 if (inode == NULL) {
2308 dout("handle_lease no inode %llx\n", vino.ino);
2309 goto release;
2310 }
2311 ci = ceph_inode(inode);
2312
2313 /* dentry */
2314 parent = d_find_alias(inode);
2315 if (!parent) {
2316 dout("no parent dentry on inode %p\n", inode);
2317 WARN_ON(1);
2318 goto release; /* hrm... */
2319 }
2320 dname.hash = full_name_hash(dname.name, dname.len);
2321 dentry = d_lookup(parent, &dname);
2322 dput(parent);
2323 if (!dentry)
2324 goto release;
2325
2326 spin_lock(&dentry->d_lock);
2327 di = ceph_dentry(dentry);
2328 switch (h->action) {
2329 case CEPH_MDS_LEASE_REVOKE:
2330 if (di && di->lease_session == session) {
2331 h->seq = cpu_to_le32(di->lease_seq);
2332 __ceph_mdsc_drop_dentry_lease(dentry);
2333 }
2334 release = 1;
2335 break;
2336
2337 case CEPH_MDS_LEASE_RENEW:
2338 if (di && di->lease_session == session &&
2339 di->lease_gen == session->s_cap_gen &&
2340 di->lease_renew_from &&
2341 di->lease_renew_after == 0) {
2342 unsigned long duration =
2343 le32_to_cpu(h->duration_ms) * HZ / 1000;
2344
2345 di->lease_seq = le32_to_cpu(h->seq);
2346 dentry->d_time = di->lease_renew_from + duration;
2347 di->lease_renew_after = di->lease_renew_from +
2348 (duration >> 1);
2349 di->lease_renew_from = 0;
2350 }
2351 break;
2352 }
2353 spin_unlock(&dentry->d_lock);
2354 dput(dentry);
2355
2356 if (!release)
2357 goto out;
2358
2359release:
2360 /* let's just reuse the same message */
2361 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
2362 ceph_msg_get(msg);
2363 ceph_con_send(&session->s_con, msg);
2364
2365out:
2366 iput(inode);
2367 mutex_unlock(&session->s_mutex);
2368 ceph_put_mds_session(session);
2369 return;
2370
2371bad:
2372 pr_err("corrupt lease message\n");
2373}
2374
2375void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
2376 struct inode *inode,
2377 struct dentry *dentry, char action,
2378 u32 seq)
2379{
2380 struct ceph_msg *msg;
2381 struct ceph_mds_lease *lease;
2382 int len = sizeof(*lease) + sizeof(u32);
2383 int dnamelen = 0;
2384
2385 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2386 inode, dentry, ceph_lease_op_name(action), session->s_mds);
2387 dnamelen = dentry->d_name.len;
2388 len += dnamelen;
2389
2390 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
2391 if (IS_ERR(msg))
2392 return;
2393 lease = msg->front.iov_base;
2394 lease->action = action;
2395 lease->mask = cpu_to_le16(CEPH_LOCK_DN);
2396 lease->ino = cpu_to_le64(ceph_vino(inode).ino);
2397 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
2398 lease->seq = cpu_to_le32(seq);
2399 put_unaligned_le32(dnamelen, lease + 1);
2400 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
2401
2402 /*
2403 * if this is a preemptive lease RELEASE, no need to
2404 * flush request stream, since the actual request will
2405 * soon follow.
2406 */
2407 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
2408
2409 ceph_con_send(&session->s_con, msg);
2410}
2411
2412/*
2413 * Preemptively release a lease we expect to invalidate anyway.
2414 * Pass @inode always, @dentry is optional.
2415 */
2416void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
2417 struct dentry *dentry, int mask)
2418{
2419 struct ceph_dentry_info *di;
2420 struct ceph_mds_session *session;
2421 u32 seq;
2422
2423 BUG_ON(inode == NULL);
2424 BUG_ON(dentry == NULL);
2425 BUG_ON(mask != CEPH_LOCK_DN);
2426
2427 /* is dentry lease valid? */
2428 spin_lock(&dentry->d_lock);
2429 di = ceph_dentry(dentry);
2430 if (!di || !di->lease_session ||
2431 di->lease_session->s_mds < 0 ||
2432 di->lease_gen != di->lease_session->s_cap_gen ||
2433 !time_before(jiffies, dentry->d_time)) {
2434 dout("lease_release inode %p dentry %p -- "
2435 "no lease on %d\n",
2436 inode, dentry, mask);
2437 spin_unlock(&dentry->d_lock);
2438 return;
2439 }
2440
2441 /* we do have a lease on this dentry; note mds and seq */
2442 session = ceph_get_mds_session(di->lease_session);
2443 seq = di->lease_seq;
2444 __ceph_mdsc_drop_dentry_lease(dentry);
2445 spin_unlock(&dentry->d_lock);
2446
2447 dout("lease_release inode %p dentry %p mask %d to mds%d\n",
2448 inode, dentry, mask, session->s_mds);
2449 ceph_mdsc_lease_send_msg(session, inode, dentry,
2450 CEPH_MDS_LEASE_RELEASE, seq);
2451 ceph_put_mds_session(session);
2452}
2453
2454/*
2455 * drop all leases (and dentry refs) in preparation for umount
2456 */
2457static void drop_leases(struct ceph_mds_client *mdsc)
2458{
2459 int i;
2460
2461 dout("drop_leases\n");
2462 mutex_lock(&mdsc->mutex);
2463 for (i = 0; i < mdsc->max_sessions; i++) {
2464 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2465 if (!s)
2466 continue;
2467 mutex_unlock(&mdsc->mutex);
2468 mutex_lock(&s->s_mutex);
2469 mutex_unlock(&s->s_mutex);
2470 ceph_put_mds_session(s);
2471 mutex_lock(&mdsc->mutex);
2472 }
2473 mutex_unlock(&mdsc->mutex);
2474}
2475
2476
2477
2478/*
2479 * delayed work -- periodically trim expired leases, renew caps with mds
2480 */
2481static void schedule_delayed(struct ceph_mds_client *mdsc)
2482{
2483 int delay = 5;
2484 unsigned hz = round_jiffies_relative(HZ * delay);
2485 schedule_delayed_work(&mdsc->delayed_work, hz);
2486}
2487
2488static void delayed_work(struct work_struct *work)
2489{
2490 int i;
2491 struct ceph_mds_client *mdsc =
2492 container_of(work, struct ceph_mds_client, delayed_work.work);
2493 int renew_interval;
2494 int renew_caps;
2495
2496 dout("mdsc delayed_work\n");
afcdaea3 2497 ceph_check_delayed_caps(mdsc);
2f2dc053
SW
2498
2499 mutex_lock(&mdsc->mutex);
2500 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
2501 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
2502 mdsc->last_renew_caps);
2503 if (renew_caps)
2504 mdsc->last_renew_caps = jiffies;
2505
2506 for (i = 0; i < mdsc->max_sessions; i++) {
2507 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2508 if (s == NULL)
2509 continue;
2510 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
2511 dout("resending session close request for mds%d\n",
2512 s->s_mds);
2513 request_close_session(mdsc, s);
2514 ceph_put_mds_session(s);
2515 continue;
2516 }
2517 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
2518 if (s->s_state == CEPH_MDS_SESSION_OPEN) {
2519 s->s_state = CEPH_MDS_SESSION_HUNG;
2520 pr_info("mds%d hung\n", s->s_mds);
2521 }
2522 }
2523 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
2524 /* this mds is failed or recovering, just wait */
2525 ceph_put_mds_session(s);
2526 continue;
2527 }
2528 mutex_unlock(&mdsc->mutex);
2529
2530 mutex_lock(&s->s_mutex);
2531 if (renew_caps)
2532 send_renew_caps(mdsc, s);
2533 else
2534 ceph_con_keepalive(&s->s_con);
2535 add_cap_releases(mdsc, s, -1);
2536 send_cap_releases(mdsc, s);
2537 mutex_unlock(&s->s_mutex);
2538 ceph_put_mds_session(s);
2539
2540 mutex_lock(&mdsc->mutex);
2541 }
2542 mutex_unlock(&mdsc->mutex);
2543
2544 schedule_delayed(mdsc);
2545}
2546
2547
2548void ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2549{
2550 mdsc->client = client;
2551 mutex_init(&mdsc->mutex);
2552 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
2553 init_completion(&mdsc->safe_umount_waiters);
2554 init_completion(&mdsc->session_close_waiters);
2555 INIT_LIST_HEAD(&mdsc->waiting_for_map);
2556 mdsc->sessions = NULL;
2557 mdsc->max_sessions = 0;
2558 mdsc->stopping = 0;
2559 init_rwsem(&mdsc->snap_rwsem);
2560 INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS);
2561 INIT_LIST_HEAD(&mdsc->snap_empty);
2562 spin_lock_init(&mdsc->snap_empty_lock);
2563 mdsc->last_tid = 0;
2564 INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS);
2565 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
2566 mdsc->last_renew_caps = jiffies;
2567 INIT_LIST_HEAD(&mdsc->cap_delay_list);
2568 spin_lock_init(&mdsc->cap_delay_lock);
2569 INIT_LIST_HEAD(&mdsc->snap_flush_list);
2570 spin_lock_init(&mdsc->snap_flush_lock);
2571 mdsc->cap_flush_seq = 0;
2572 INIT_LIST_HEAD(&mdsc->cap_dirty);
2573 mdsc->num_cap_flushing = 0;
2574 spin_lock_init(&mdsc->cap_dirty_lock);
2575 init_waitqueue_head(&mdsc->cap_flushing_wq);
2576 spin_lock_init(&mdsc->dentry_lru_lock);
2577 INIT_LIST_HEAD(&mdsc->dentry_lru);
2578}
2579
2580/*
2581 * Wait for safe replies on open mds requests. If we time out, drop
2582 * all requests from the tree to avoid dangling dentry refs.
2583 */
2584static void wait_requests(struct ceph_mds_client *mdsc)
2585{
2586 struct ceph_mds_request *req;
2587 struct ceph_client *client = mdsc->client;
2588
2589 mutex_lock(&mdsc->mutex);
2590 if (__get_oldest_tid(mdsc)) {
2591 mutex_unlock(&mdsc->mutex);
2592 dout("wait_requests waiting for requests\n");
2593 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
6b805185 2594 client->mount_args->mount_timeout * HZ);
2f2dc053
SW
2595 mutex_lock(&mdsc->mutex);
2596
2597 /* tear down remaining requests */
2598 while (radix_tree_gang_lookup(&mdsc->request_tree,
2599 (void **)&req, 0, 1)) {
2600 dout("wait_requests timed out on tid %llu\n",
2601 req->r_tid);
2602 radix_tree_delete(&mdsc->request_tree, req->r_tid);
2603 ceph_mdsc_put_request(req);
2604 }
2605 }
2606 mutex_unlock(&mdsc->mutex);
2607 dout("wait_requests done\n");
2608}
2609
2610/*
2611 * called before mount is ro, and before dentries are torn down.
2612 * (hmm, does this still race with new lookups?)
2613 */
2614void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
2615{
2616 dout("pre_umount\n");
2617 mdsc->stopping = 1;
2618
2619 drop_leases(mdsc);
afcdaea3 2620 ceph_flush_dirty_caps(mdsc);
2f2dc053
SW
2621 wait_requests(mdsc);
2622}
2623
2624/*
2625 * wait for all write mds requests to flush.
2626 */
2627static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
2628{
2629 struct ceph_mds_request *req;
2630 u64 next_tid = 0;
2631 int got;
2632
2633 mutex_lock(&mdsc->mutex);
2634 dout("wait_unsafe_requests want %lld\n", want_tid);
2635 while (1) {
2636 got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
2637 next_tid, 1);
2638 if (!got)
2639 break;
2640 if (req->r_tid > want_tid)
2641 break;
2642
2643 next_tid = req->r_tid + 1;
2644 if ((req->r_op & CEPH_MDS_OP_WRITE) == 0)
2645 continue; /* not a write op */
2646
2647 ceph_mdsc_get_request(req);
2648 mutex_unlock(&mdsc->mutex);
2649 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
2650 req->r_tid, want_tid);
2651 wait_for_completion(&req->r_safe_completion);
2652 mutex_lock(&mdsc->mutex);
2653 ceph_mdsc_put_request(req);
2654 }
2655 mutex_unlock(&mdsc->mutex);
2656 dout("wait_unsafe_requests done\n");
2657}
2658
2659void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
2660{
2661 u64 want_tid, want_flush;
2662
2663 dout("sync\n");
2664 mutex_lock(&mdsc->mutex);
2665 want_tid = mdsc->last_tid;
2666 want_flush = mdsc->cap_flush_seq;
2667 mutex_unlock(&mdsc->mutex);
2668 dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
2669
afcdaea3 2670 ceph_flush_dirty_caps(mdsc);
2f2dc053
SW
2671
2672 wait_unsafe_requests(mdsc, want_tid);
2673 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
2674}
2675
2676
2677/*
2678 * called after sb is ro.
2679 */
2680void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
2681{
2682 struct ceph_mds_session *session;
2683 int i;
2684 int n;
2685 struct ceph_client *client = mdsc->client;
6b805185 2686 unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
2f2dc053
SW
2687
2688 dout("close_sessions\n");
2689
2690 mutex_lock(&mdsc->mutex);
2691
2692 /* close sessions */
2693 started = jiffies;
2694 while (time_before(jiffies, started + timeout)) {
2695 dout("closing sessions\n");
2696 n = 0;
2697 for (i = 0; i < mdsc->max_sessions; i++) {
2698 session = __ceph_lookup_mds_session(mdsc, i);
2699 if (!session)
2700 continue;
2701 mutex_unlock(&mdsc->mutex);
2702 mutex_lock(&session->s_mutex);
2703 __close_session(mdsc, session);
2704 mutex_unlock(&session->s_mutex);
2705 ceph_put_mds_session(session);
2706 mutex_lock(&mdsc->mutex);
2707 n++;
2708 }
2709 if (n == 0)
2710 break;
2711
2712 if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
2713 break;
2714
2715 dout("waiting for sessions to close\n");
2716 mutex_unlock(&mdsc->mutex);
2717 wait_for_completion_timeout(&mdsc->session_close_waiters,
2718 timeout);
2719 mutex_lock(&mdsc->mutex);
2720 }
2721
2722 /* tear down remaining sessions */
2723 for (i = 0; i < mdsc->max_sessions; i++) {
2724 if (mdsc->sessions[i]) {
2725 session = get_session(mdsc->sessions[i]);
2726 unregister_session(mdsc, i);
2727 mutex_unlock(&mdsc->mutex);
2728 mutex_lock(&session->s_mutex);
2729 remove_session_caps(session);
2730 mutex_unlock(&session->s_mutex);
2731 ceph_put_mds_session(session);
2732 mutex_lock(&mdsc->mutex);
2733 }
2734 }
2735
2736 WARN_ON(!list_empty(&mdsc->cap_delay_list));
2737
2738 mutex_unlock(&mdsc->mutex);
2739
2740 ceph_cleanup_empty_realms(mdsc);
2741
2742 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
2743
2744 dout("stopped\n");
2745}
2746
2747void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
2748{
2749 dout("stop\n");
2750 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
2751 if (mdsc->mdsmap)
2752 ceph_mdsmap_destroy(mdsc->mdsmap);
2753 kfree(mdsc->sessions);
2754}
2755
2756
2757/*
2758 * handle mds map update.
2759 */
2760void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
2761{
2762 u32 epoch;
2763 u32 maplen;
2764 void *p = msg->front.iov_base;
2765 void *end = p + msg->front.iov_len;
2766 struct ceph_mdsmap *newmap, *oldmap;
2767 struct ceph_fsid fsid;
2768 int err = -EINVAL;
2769
2770 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
2771 ceph_decode_copy(&p, &fsid, sizeof(fsid));
2772 if (ceph_fsid_compare(&fsid, &mdsc->client->monc.monmap->fsid)) {
2773 pr_err("got mdsmap with wrong fsid\n");
2774 return;
2775 }
c89136ea
SW
2776 epoch = ceph_decode_32(&p);
2777 maplen = ceph_decode_32(&p);
2f2dc053
SW
2778 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
2779
2780 /* do we need it? */
2781 ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
2782 mutex_lock(&mdsc->mutex);
2783 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
2784 dout("handle_map epoch %u <= our %u\n",
2785 epoch, mdsc->mdsmap->m_epoch);
2786 mutex_unlock(&mdsc->mutex);
2787 return;
2788 }
2789
2790 newmap = ceph_mdsmap_decode(&p, end);
2791 if (IS_ERR(newmap)) {
2792 err = PTR_ERR(newmap);
2793 goto bad_unlock;
2794 }
2795
2796 /* swap into place */
2797 if (mdsc->mdsmap) {
2798 oldmap = mdsc->mdsmap;
2799 mdsc->mdsmap = newmap;
2800 check_new_map(mdsc, newmap, oldmap);
2801 ceph_mdsmap_destroy(oldmap);
2802 } else {
2803 mdsc->mdsmap = newmap; /* first mds map */
2804 }
2805 mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
2806
2807 __wake_requests(mdsc, &mdsc->waiting_for_map);
2808
2809 mutex_unlock(&mdsc->mutex);
2810 schedule_delayed(mdsc);
2811 return;
2812
2813bad_unlock:
2814 mutex_unlock(&mdsc->mutex);
2815bad:
2816 pr_err("error decoding mdsmap %d\n", err);
2817 return;
2818}
2819
2820static struct ceph_connection *con_get(struct ceph_connection *con)
2821{
2822 struct ceph_mds_session *s = con->private;
2823
2824 if (get_session(s)) {
2825 dout("mdsc con_get %p %d -> %d\n", s,
2826 atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref));
2827 return con;
2828 }
2829 dout("mdsc con_get %p FAIL\n", s);
2830 return NULL;
2831}
2832
2833static void con_put(struct ceph_connection *con)
2834{
2835 struct ceph_mds_session *s = con->private;
2836
2837 dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref),
2838 atomic_read(&s->s_ref) - 1);
2839 ceph_put_mds_session(s);
2840}
2841
2842/*
2843 * if the client is unresponsive for long enough, the mds will kill
2844 * the session entirely.
2845 */
2846static void peer_reset(struct ceph_connection *con)
2847{
2848 struct ceph_mds_session *s = con->private;
2849
2850 pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n",
2851 s->s_mds);
2852}
2853
2854static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2855{
2856 struct ceph_mds_session *s = con->private;
2857 struct ceph_mds_client *mdsc = s->s_mdsc;
2858 int type = le16_to_cpu(msg->hdr.type);
2859
2860 switch (type) {
2861 case CEPH_MSG_MDS_MAP:
2862 ceph_mdsc_handle_map(mdsc, msg);
2863 break;
2864 case CEPH_MSG_CLIENT_SESSION:
2865 handle_session(s, msg);
2866 break;
2867 case CEPH_MSG_CLIENT_REPLY:
2868 handle_reply(s, msg);
2869 break;
2870 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
2871 handle_forward(mdsc, msg);
2872 break;
2873 case CEPH_MSG_CLIENT_CAPS:
2874 ceph_handle_caps(s, msg);
2875 break;
2876 case CEPH_MSG_CLIENT_SNAP:
2877 ceph_handle_snap(mdsc, msg);
2878 break;
2879 case CEPH_MSG_CLIENT_LEASE:
2880 handle_lease(mdsc, msg);
2881 break;
2882
2883 default:
2884 pr_err("received unknown message type %d %s\n", type,
2885 ceph_msg_type_name(type));
2886 }
2887 ceph_msg_put(msg);
2888}
2889
2890const static struct ceph_connection_operations mds_con_ops = {
2891 .get = con_get,
2892 .put = con_put,
2893 .dispatch = dispatch,
2894 .peer_reset = peer_reset,
2895 .alloc_msg = ceph_alloc_msg,
2896 .alloc_middle = ceph_alloc_middle,
2897};
2898
2899
2900
2901
2902/* eof */