]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/ceph/mds_client.c
ceph: mark MDS CREATE as a write op
[mirror_ubuntu-zesty-kernel.git] / fs / ceph / mds_client.c
CommitLineData
2f2dc053
SW
1#include "ceph_debug.h"
2
3#include <linux/wait.h>
4#include <linux/sched.h>
5
6#include "mds_client.h"
7#include "mon_client.h"
8#include "super.h"
9#include "messenger.h"
10#include "decode.h"
4e7a5dcd 11#include "auth.h"
93cea5be 12#include "pagelist.h"
2f2dc053
SW
13
14/*
15 * A cluster of MDS (metadata server) daemons is responsible for
16 * managing the file system namespace (the directory hierarchy and
17 * inodes) and for coordinating shared access to storage. Metadata is
18 * partitioning hierarchically across a number of servers, and that
19 * partition varies over time as the cluster adjusts the distribution
20 * in order to balance load.
21 *
22 * The MDS client is primarily responsible to managing synchronous
23 * metadata requests for operations like open, unlink, and so forth.
24 * If there is a MDS failure, we find out about it when we (possibly
25 * request and) receive a new MDS map, and can resubmit affected
26 * requests.
27 *
28 * For the most part, though, we take advantage of a lossless
29 * communications channel to the MDS, and do not need to worry about
30 * timing out or resubmitting requests.
31 *
32 * We maintain a stateful "session" with each MDS we interact with.
33 * Within each session, we sent periodic heartbeat messages to ensure
34 * any capabilities or leases we have been issues remain valid. If
35 * the session times out and goes stale, our leases and capabilities
36 * are no longer valid.
37 */
38
39static void __wake_requests(struct ceph_mds_client *mdsc,
40 struct list_head *head);
41
42const static struct ceph_connection_operations mds_con_ops;
43
44
45/*
46 * mds reply parsing
47 */
48
49/*
50 * parse individual inode info
51 */
52static int parse_reply_info_in(void **p, void *end,
53 struct ceph_mds_reply_info_in *info)
54{
55 int err = -EIO;
56
57 info->in = *p;
58 *p += sizeof(struct ceph_mds_reply_inode) +
59 sizeof(*info->in->fragtree.splits) *
60 le32_to_cpu(info->in->fragtree.nsplits);
61
62 ceph_decode_32_safe(p, end, info->symlink_len, bad);
63 ceph_decode_need(p, end, info->symlink_len, bad);
64 info->symlink = *p;
65 *p += info->symlink_len;
66
67 ceph_decode_32_safe(p, end, info->xattr_len, bad);
68 ceph_decode_need(p, end, info->xattr_len, bad);
69 info->xattr_data = *p;
70 *p += info->xattr_len;
71 return 0;
72bad:
73 return err;
74}
75
76/*
77 * parse a normal reply, which may contain a (dir+)dentry and/or a
78 * target inode.
79 */
80static int parse_reply_info_trace(void **p, void *end,
81 struct ceph_mds_reply_info_parsed *info)
82{
83 int err;
84
85 if (info->head->is_dentry) {
86 err = parse_reply_info_in(p, end, &info->diri);
87 if (err < 0)
88 goto out_bad;
89
90 if (unlikely(*p + sizeof(*info->dirfrag) > end))
91 goto bad;
92 info->dirfrag = *p;
93 *p += sizeof(*info->dirfrag) +
94 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
95 if (unlikely(*p > end))
96 goto bad;
97
98 ceph_decode_32_safe(p, end, info->dname_len, bad);
99 ceph_decode_need(p, end, info->dname_len, bad);
100 info->dname = *p;
101 *p += info->dname_len;
102 info->dlease = *p;
103 *p += sizeof(*info->dlease);
104 }
105
106 if (info->head->is_target) {
107 err = parse_reply_info_in(p, end, &info->targeti);
108 if (err < 0)
109 goto out_bad;
110 }
111
112 if (unlikely(*p != end))
113 goto bad;
114 return 0;
115
116bad:
117 err = -EIO;
118out_bad:
119 pr_err("problem parsing mds trace %d\n", err);
120 return err;
121}
122
123/*
124 * parse readdir results
125 */
126static int parse_reply_info_dir(void **p, void *end,
127 struct ceph_mds_reply_info_parsed *info)
128{
129 u32 num, i = 0;
130 int err;
131
132 info->dir_dir = *p;
133 if (*p + sizeof(*info->dir_dir) > end)
134 goto bad;
135 *p += sizeof(*info->dir_dir) +
136 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
137 if (*p > end)
138 goto bad;
139
140 ceph_decode_need(p, end, sizeof(num) + 2, bad);
c89136ea
SW
141 num = ceph_decode_32(p);
142 info->dir_end = ceph_decode_8(p);
143 info->dir_complete = ceph_decode_8(p);
2f2dc053
SW
144 if (num == 0)
145 goto done;
146
147 /* alloc large array */
148 info->dir_nr = num;
149 info->dir_in = kcalloc(num, sizeof(*info->dir_in) +
150 sizeof(*info->dir_dname) +
151 sizeof(*info->dir_dname_len) +
152 sizeof(*info->dir_dlease),
153 GFP_NOFS);
154 if (info->dir_in == NULL) {
155 err = -ENOMEM;
156 goto out_bad;
157 }
158 info->dir_dname = (void *)(info->dir_in + num);
159 info->dir_dname_len = (void *)(info->dir_dname + num);
160 info->dir_dlease = (void *)(info->dir_dname_len + num);
161
162 while (num) {
163 /* dentry */
164 ceph_decode_need(p, end, sizeof(u32)*2, bad);
c89136ea 165 info->dir_dname_len[i] = ceph_decode_32(p);
2f2dc053
SW
166 ceph_decode_need(p, end, info->dir_dname_len[i], bad);
167 info->dir_dname[i] = *p;
168 *p += info->dir_dname_len[i];
169 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
170 info->dir_dname[i]);
171 info->dir_dlease[i] = *p;
172 *p += sizeof(struct ceph_mds_reply_lease);
173
174 /* inode */
175 err = parse_reply_info_in(p, end, &info->dir_in[i]);
176 if (err < 0)
177 goto out_bad;
178 i++;
179 num--;
180 }
181
182done:
183 if (*p != end)
184 goto bad;
185 return 0;
186
187bad:
188 err = -EIO;
189out_bad:
190 pr_err("problem parsing dir contents %d\n", err);
191 return err;
192}
193
194/*
195 * parse entire mds reply
196 */
197static int parse_reply_info(struct ceph_msg *msg,
198 struct ceph_mds_reply_info_parsed *info)
199{
200 void *p, *end;
201 u32 len;
202 int err;
203
204 info->head = msg->front.iov_base;
205 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
206 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
207
208 /* trace */
209 ceph_decode_32_safe(&p, end, len, bad);
210 if (len > 0) {
211 err = parse_reply_info_trace(&p, p+len, info);
212 if (err < 0)
213 goto out_bad;
214 }
215
216 /* dir content */
217 ceph_decode_32_safe(&p, end, len, bad);
218 if (len > 0) {
219 err = parse_reply_info_dir(&p, p+len, info);
220 if (err < 0)
221 goto out_bad;
222 }
223
224 /* snap blob */
225 ceph_decode_32_safe(&p, end, len, bad);
226 info->snapblob_len = len;
227 info->snapblob = p;
228 p += len;
229
230 if (p != end)
231 goto bad;
232 return 0;
233
234bad:
235 err = -EIO;
236out_bad:
237 pr_err("mds parse_reply err %d\n", err);
238 return err;
239}
240
241static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
242{
243 kfree(info->dir_in);
244}
245
246
247/*
248 * sessions
249 */
250static const char *session_state_name(int s)
251{
252 switch (s) {
253 case CEPH_MDS_SESSION_NEW: return "new";
254 case CEPH_MDS_SESSION_OPENING: return "opening";
255 case CEPH_MDS_SESSION_OPEN: return "open";
256 case CEPH_MDS_SESSION_HUNG: return "hung";
257 case CEPH_MDS_SESSION_CLOSING: return "closing";
258 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
259 default: return "???";
260 }
261}
262
263static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
264{
265 if (atomic_inc_not_zero(&s->s_ref)) {
266 dout("mdsc get_session %p %d -> %d\n", s,
267 atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
268 return s;
269 } else {
270 dout("mdsc get_session %p 0 -- FAIL", s);
271 return NULL;
272 }
273}
274
275void ceph_put_mds_session(struct ceph_mds_session *s)
276{
277 dout("mdsc put_session %p %d -> %d\n", s,
278 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
4e7a5dcd
SW
279 if (atomic_dec_and_test(&s->s_ref)) {
280 if (s->s_authorizer)
281 s->s_mdsc->client->monc.auth->ops->destroy_authorizer(
282 s->s_mdsc->client->monc.auth, s->s_authorizer);
2f2dc053 283 kfree(s);
4e7a5dcd 284 }
2f2dc053
SW
285}
286
287/*
288 * called under mdsc->mutex
289 */
290struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
291 int mds)
292{
293 struct ceph_mds_session *session;
294
295 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
296 return NULL;
297 session = mdsc->sessions[mds];
298 dout("lookup_mds_session %p %d\n", session,
299 atomic_read(&session->s_ref));
300 get_session(session);
301 return session;
302}
303
304static bool __have_session(struct ceph_mds_client *mdsc, int mds)
305{
306 if (mds >= mdsc->max_sessions)
307 return false;
308 return mdsc->sessions[mds];
309}
310
311/*
312 * create+register a new session for given mds.
313 * called under mdsc->mutex.
314 */
315static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
316 int mds)
317{
318 struct ceph_mds_session *s;
319
320 s = kzalloc(sizeof(*s), GFP_NOFS);
321 s->s_mdsc = mdsc;
322 s->s_mds = mds;
323 s->s_state = CEPH_MDS_SESSION_NEW;
324 s->s_ttl = 0;
325 s->s_seq = 0;
326 mutex_init(&s->s_mutex);
327
328 ceph_con_init(mdsc->client->msgr, &s->s_con);
329 s->s_con.private = s;
330 s->s_con.ops = &mds_con_ops;
331 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
332 s->s_con.peer_name.num = cpu_to_le64(mds);
2f2dc053
SW
333
334 spin_lock_init(&s->s_cap_lock);
335 s->s_cap_gen = 0;
336 s->s_cap_ttl = 0;
337 s->s_renew_requested = 0;
338 s->s_renew_seq = 0;
339 INIT_LIST_HEAD(&s->s_caps);
340 s->s_nr_caps = 0;
5dacf091 341 s->s_trim_caps = 0;
2f2dc053
SW
342 atomic_set(&s->s_ref, 1);
343 INIT_LIST_HEAD(&s->s_waiting);
344 INIT_LIST_HEAD(&s->s_unsafe);
345 s->s_num_cap_releases = 0;
5dacf091 346 s->s_iterating_caps = false;
2f2dc053
SW
347 INIT_LIST_HEAD(&s->s_cap_releases);
348 INIT_LIST_HEAD(&s->s_cap_releases_done);
349 INIT_LIST_HEAD(&s->s_cap_flushing);
350 INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
351
352 dout("register_session mds%d\n", mds);
353 if (mds >= mdsc->max_sessions) {
354 int newmax = 1 << get_count_order(mds+1);
355 struct ceph_mds_session **sa;
356
357 dout("register_session realloc to %d\n", newmax);
358 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
359 if (sa == NULL)
42ce56e5 360 goto fail_realloc;
2f2dc053
SW
361 if (mdsc->sessions) {
362 memcpy(sa, mdsc->sessions,
363 mdsc->max_sessions * sizeof(void *));
364 kfree(mdsc->sessions);
365 }
366 mdsc->sessions = sa;
367 mdsc->max_sessions = newmax;
368 }
369 mdsc->sessions[mds] = s;
370 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
42ce56e5
SW
371
372 ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
373
2f2dc053 374 return s;
42ce56e5
SW
375
376fail_realloc:
377 kfree(s);
378 return ERR_PTR(-ENOMEM);
2f2dc053
SW
379}
380
381/*
382 * called under mdsc->mutex
383 */
42ce56e5
SW
384static void unregister_session(struct ceph_mds_client *mdsc,
385 struct ceph_mds_session *s)
2f2dc053 386{
42ce56e5
SW
387 dout("unregister_session mds%d %p\n", s->s_mds, s);
388 mdsc->sessions[s->s_mds] = NULL;
389 ceph_con_close(&s->s_con);
390 ceph_put_mds_session(s);
2f2dc053
SW
391}
392
393/*
394 * drop session refs in request.
395 *
396 * should be last request ref, or hold mdsc->mutex
397 */
398static void put_request_session(struct ceph_mds_request *req)
399{
400 if (req->r_session) {
401 ceph_put_mds_session(req->r_session);
402 req->r_session = NULL;
403 }
404}
405
153c8e6b 406void ceph_mdsc_release_request(struct kref *kref)
2f2dc053 407{
153c8e6b
SW
408 struct ceph_mds_request *req = container_of(kref,
409 struct ceph_mds_request,
410 r_kref);
411 if (req->r_request)
412 ceph_msg_put(req->r_request);
413 if (req->r_reply) {
414 ceph_msg_put(req->r_reply);
415 destroy_reply_info(&req->r_reply_info);
416 }
417 if (req->r_inode) {
418 ceph_put_cap_refs(ceph_inode(req->r_inode),
419 CEPH_CAP_PIN);
420 iput(req->r_inode);
421 }
422 if (req->r_locked_dir)
423 ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
424 CEPH_CAP_PIN);
425 if (req->r_target_inode)
426 iput(req->r_target_inode);
427 if (req->r_dentry)
428 dput(req->r_dentry);
429 if (req->r_old_dentry) {
430 ceph_put_cap_refs(
431 ceph_inode(req->r_old_dentry->d_parent->d_inode),
432 CEPH_CAP_PIN);
433 dput(req->r_old_dentry);
2f2dc053 434 }
153c8e6b
SW
435 kfree(req->r_path1);
436 kfree(req->r_path2);
437 put_request_session(req);
438 ceph_unreserve_caps(&req->r_caps_reservation);
439 kfree(req);
2f2dc053
SW
440}
441
442/*
443 * lookup session, bump ref if found.
444 *
445 * called under mdsc->mutex.
446 */
447static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
448 u64 tid)
449{
450 struct ceph_mds_request *req;
451 req = radix_tree_lookup(&mdsc->request_tree, tid);
452 if (req)
453 ceph_mdsc_get_request(req);
454 return req;
455}
456
457/*
458 * Register an in-flight request, and assign a tid. Link to directory
459 * are modifying (if any).
460 *
461 * Called under mdsc->mutex.
462 */
463static void __register_request(struct ceph_mds_client *mdsc,
464 struct ceph_mds_request *req,
465 struct inode *dir)
466{
467 req->r_tid = ++mdsc->last_tid;
468 if (req->r_num_caps)
469 ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
470 dout("__register_request %p tid %lld\n", req, req->r_tid);
471 ceph_mdsc_get_request(req);
472 radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
473
474 if (dir) {
475 struct ceph_inode_info *ci = ceph_inode(dir);
476
477 spin_lock(&ci->i_unsafe_lock);
478 req->r_unsafe_dir = dir;
479 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
480 spin_unlock(&ci->i_unsafe_lock);
481 }
482}
483
484static void __unregister_request(struct ceph_mds_client *mdsc,
485 struct ceph_mds_request *req)
486{
487 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
488 radix_tree_delete(&mdsc->request_tree, req->r_tid);
489 ceph_mdsc_put_request(req);
490
491 if (req->r_unsafe_dir) {
492 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
493
494 spin_lock(&ci->i_unsafe_lock);
495 list_del_init(&req->r_unsafe_dir_item);
496 spin_unlock(&ci->i_unsafe_lock);
497 }
498}
499
500/*
501 * Choose mds to send request to next. If there is a hint set in the
502 * request (e.g., due to a prior forward hint from the mds), use that.
503 * Otherwise, consult frag tree and/or caps to identify the
504 * appropriate mds. If all else fails, choose randomly.
505 *
506 * Called under mdsc->mutex.
507 */
508static int __choose_mds(struct ceph_mds_client *mdsc,
509 struct ceph_mds_request *req)
510{
511 struct inode *inode;
512 struct ceph_inode_info *ci;
513 struct ceph_cap *cap;
514 int mode = req->r_direct_mode;
515 int mds = -1;
516 u32 hash = req->r_direct_hash;
517 bool is_hash = req->r_direct_is_hash;
518
519 /*
520 * is there a specific mds we should try? ignore hint if we have
521 * no session and the mds is not up (active or recovering).
522 */
523 if (req->r_resend_mds >= 0 &&
524 (__have_session(mdsc, req->r_resend_mds) ||
525 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
526 dout("choose_mds using resend_mds mds%d\n",
527 req->r_resend_mds);
528 return req->r_resend_mds;
529 }
530
531 if (mode == USE_RANDOM_MDS)
532 goto random;
533
534 inode = NULL;
535 if (req->r_inode) {
536 inode = req->r_inode;
537 } else if (req->r_dentry) {
538 if (req->r_dentry->d_inode) {
539 inode = req->r_dentry->d_inode;
540 } else {
541 inode = req->r_dentry->d_parent->d_inode;
542 hash = req->r_dentry->d_name.hash;
543 is_hash = true;
544 }
545 }
546 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
547 (int)hash, mode);
548 if (!inode)
549 goto random;
550 ci = ceph_inode(inode);
551
552 if (is_hash && S_ISDIR(inode->i_mode)) {
553 struct ceph_inode_frag frag;
554 int found;
555
556 ceph_choose_frag(ci, hash, &frag, &found);
557 if (found) {
558 if (mode == USE_ANY_MDS && frag.ndist > 0) {
559 u8 r;
560
561 /* choose a random replica */
562 get_random_bytes(&r, 1);
563 r %= frag.ndist;
564 mds = frag.dist[r];
565 dout("choose_mds %p %llx.%llx "
566 "frag %u mds%d (%d/%d)\n",
567 inode, ceph_vinop(inode),
568 frag.frag, frag.mds,
569 (int)r, frag.ndist);
570 return mds;
571 }
572
573 /* since this file/dir wasn't known to be
574 * replicated, then we want to look for the
575 * authoritative mds. */
576 mode = USE_AUTH_MDS;
577 if (frag.mds >= 0) {
578 /* choose auth mds */
579 mds = frag.mds;
580 dout("choose_mds %p %llx.%llx "
581 "frag %u mds%d (auth)\n",
582 inode, ceph_vinop(inode), frag.frag, mds);
583 return mds;
584 }
585 }
586 }
587
588 spin_lock(&inode->i_lock);
589 cap = NULL;
590 if (mode == USE_AUTH_MDS)
591 cap = ci->i_auth_cap;
592 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
593 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
594 if (!cap) {
595 spin_unlock(&inode->i_lock);
596 goto random;
597 }
598 mds = cap->session->s_mds;
599 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
600 inode, ceph_vinop(inode), mds,
601 cap == ci->i_auth_cap ? "auth " : "", cap);
602 spin_unlock(&inode->i_lock);
603 return mds;
604
605random:
606 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
607 dout("choose_mds chose random mds%d\n", mds);
608 return mds;
609}
610
611
612/*
613 * session messages
614 */
615static struct ceph_msg *create_session_msg(u32 op, u64 seq)
616{
617 struct ceph_msg *msg;
618 struct ceph_mds_session_head *h;
619
620 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
621 if (IS_ERR(msg)) {
622 pr_err("create_session_msg ENOMEM creating msg\n");
623 return ERR_PTR(PTR_ERR(msg));
624 }
625 h = msg->front.iov_base;
626 h->op = cpu_to_le32(op);
627 h->seq = cpu_to_le64(seq);
628 return msg;
629}
630
631/*
632 * send session open request.
633 *
634 * called under mdsc->mutex
635 */
636static int __open_session(struct ceph_mds_client *mdsc,
637 struct ceph_mds_session *session)
638{
639 struct ceph_msg *msg;
640 int mstate;
641 int mds = session->s_mds;
642 int err = 0;
643
644 /* wait for mds to go active? */
645 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
646 dout("open_session to mds%d (%s)\n", mds,
647 ceph_mds_state_name(mstate));
648 session->s_state = CEPH_MDS_SESSION_OPENING;
649 session->s_renew_requested = jiffies;
650
651 /* send connect message */
652 msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
653 if (IS_ERR(msg)) {
654 err = PTR_ERR(msg);
655 goto out;
656 }
657 ceph_con_send(&session->s_con, msg);
658
659out:
660 return 0;
661}
662
663/*
664 * session caps
665 */
666
667/*
668 * Free preallocated cap messages assigned to this session
669 */
670static void cleanup_cap_releases(struct ceph_mds_session *session)
671{
672 struct ceph_msg *msg;
673
674 spin_lock(&session->s_cap_lock);
675 while (!list_empty(&session->s_cap_releases)) {
676 msg = list_first_entry(&session->s_cap_releases,
677 struct ceph_msg, list_head);
678 list_del_init(&msg->list_head);
679 ceph_msg_put(msg);
680 }
681 while (!list_empty(&session->s_cap_releases_done)) {
682 msg = list_first_entry(&session->s_cap_releases_done,
683 struct ceph_msg, list_head);
684 list_del_init(&msg->list_head);
685 ceph_msg_put(msg);
686 }
687 spin_unlock(&session->s_cap_lock);
688}
689
690/*
691 * Helper to safely iterate over all caps associated with a session.
692 *
693 * caller must hold session s_mutex
694 */
695static int iterate_session_caps(struct ceph_mds_session *session,
696 int (*cb)(struct inode *, struct ceph_cap *,
697 void *), void *arg)
698{
699 struct ceph_cap *cap, *ncap;
700 struct inode *inode;
701 int ret;
702
703 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
704 spin_lock(&session->s_cap_lock);
5dacf091 705 session->s_iterating_caps = true;
2f2dc053
SW
706 list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
707 inode = igrab(&cap->ci->vfs_inode);
708 if (!inode)
709 continue;
710 spin_unlock(&session->s_cap_lock);
711 ret = cb(inode, cap, arg);
712 iput(inode);
2f2dc053 713 spin_lock(&session->s_cap_lock);
5dacf091
SW
714 if (ret < 0)
715 goto out;
2f2dc053 716 }
5dacf091
SW
717 ret = 0;
718out:
719 session->s_iterating_caps = false;
2f2dc053 720 spin_unlock(&session->s_cap_lock);
5dacf091 721 return ret;
2f2dc053
SW
722}
723
724static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
725 void *arg)
726{
727 struct ceph_inode_info *ci = ceph_inode(inode);
728 dout("removing cap %p, ci is %p, inode is %p\n",
729 cap, ci, &ci->vfs_inode);
730 ceph_remove_cap(cap);
731 return 0;
732}
733
734/*
735 * caller must hold session s_mutex
736 */
737static void remove_session_caps(struct ceph_mds_session *session)
738{
739 dout("remove_session_caps on %p\n", session);
740 iterate_session_caps(session, remove_session_caps_cb, NULL);
741 BUG_ON(session->s_nr_caps > 0);
742 cleanup_cap_releases(session);
743}
744
745/*
746 * wake up any threads waiting on this session's caps. if the cap is
747 * old (didn't get renewed on the client reconnect), remove it now.
748 *
749 * caller must hold s_mutex.
750 */
751static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
752 void *arg)
753{
0dc2570f
SW
754 struct ceph_inode_info *ci = ceph_inode(inode);
755
756 wake_up(&ci->i_cap_wq);
757 if (arg) {
758 spin_lock(&inode->i_lock);
759 ci->i_wanted_max_size = 0;
760 ci->i_requested_max_size = 0;
761 spin_unlock(&inode->i_lock);
762 }
2f2dc053
SW
763 return 0;
764}
765
0dc2570f
SW
766static void wake_up_session_caps(struct ceph_mds_session *session,
767 int reconnect)
2f2dc053
SW
768{
769 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
0dc2570f
SW
770 iterate_session_caps(session, wake_up_session_cb,
771 (void *)(unsigned long)reconnect);
2f2dc053
SW
772}
773
774/*
775 * Send periodic message to MDS renewing all currently held caps. The
776 * ack will reset the expiration for all caps from this session.
777 *
778 * caller holds s_mutex
779 */
780static int send_renew_caps(struct ceph_mds_client *mdsc,
781 struct ceph_mds_session *session)
782{
783 struct ceph_msg *msg;
784 int state;
785
786 if (time_after_eq(jiffies, session->s_cap_ttl) &&
787 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
788 pr_info("mds%d caps stale\n", session->s_mds);
789
790 /* do not try to renew caps until a recovering mds has reconnected
791 * with its clients. */
792 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
793 if (state < CEPH_MDS_STATE_RECONNECT) {
794 dout("send_renew_caps ignoring mds%d (%s)\n",
795 session->s_mds, ceph_mds_state_name(state));
796 return 0;
797 }
798
799 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
800 ceph_mds_state_name(state));
801 session->s_renew_requested = jiffies;
802 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
803 ++session->s_renew_seq);
804 if (IS_ERR(msg))
805 return PTR_ERR(msg);
806 ceph_con_send(&session->s_con, msg);
807 return 0;
808}
809
810/*
811 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
0dc2570f
SW
812 *
813 * Called under session->s_mutex
2f2dc053
SW
814 */
815static void renewed_caps(struct ceph_mds_client *mdsc,
816 struct ceph_mds_session *session, int is_renew)
817{
818 int was_stale;
819 int wake = 0;
820
821 spin_lock(&session->s_cap_lock);
822 was_stale = is_renew && (session->s_cap_ttl == 0 ||
823 time_after_eq(jiffies, session->s_cap_ttl));
824
825 session->s_cap_ttl = session->s_renew_requested +
826 mdsc->mdsmap->m_session_timeout*HZ;
827
828 if (was_stale) {
829 if (time_before(jiffies, session->s_cap_ttl)) {
830 pr_info("mds%d caps renewed\n", session->s_mds);
831 wake = 1;
832 } else {
833 pr_info("mds%d caps still stale\n", session->s_mds);
834 }
835 }
836 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
837 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
838 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
839 spin_unlock(&session->s_cap_lock);
840
841 if (wake)
0dc2570f 842 wake_up_session_caps(session, 0);
2f2dc053
SW
843}
844
845/*
846 * send a session close request
847 */
848static int request_close_session(struct ceph_mds_client *mdsc,
849 struct ceph_mds_session *session)
850{
851 struct ceph_msg *msg;
852 int err = 0;
853
854 dout("request_close_session mds%d state %s seq %lld\n",
855 session->s_mds, session_state_name(session->s_state),
856 session->s_seq);
857 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
858 if (IS_ERR(msg))
859 err = PTR_ERR(msg);
860 else
861 ceph_con_send(&session->s_con, msg);
862 return err;
863}
864
865/*
866 * Called with s_mutex held.
867 */
868static int __close_session(struct ceph_mds_client *mdsc,
869 struct ceph_mds_session *session)
870{
871 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
872 return 0;
873 session->s_state = CEPH_MDS_SESSION_CLOSING;
874 return request_close_session(mdsc, session);
875}
876
877/*
878 * Trim old(er) caps.
879 *
880 * Because we can't cache an inode without one or more caps, we do
881 * this indirectly: if a cap is unused, we prune its aliases, at which
882 * point the inode will hopefully get dropped to.
883 *
884 * Yes, this is a bit sloppy. Our only real goal here is to respond to
885 * memory pressure from the MDS, though, so it needn't be perfect.
886 */
887static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
888{
889 struct ceph_mds_session *session = arg;
890 struct ceph_inode_info *ci = ceph_inode(inode);
891 int used, oissued, mine;
892
893 if (session->s_trim_caps <= 0)
894 return -1;
895
896 spin_lock(&inode->i_lock);
897 mine = cap->issued | cap->implemented;
898 used = __ceph_caps_used(ci);
899 oissued = __ceph_caps_issued_other(ci, cap);
900
901 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
902 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
903 ceph_cap_string(used));
904 if (ci->i_dirty_caps)
905 goto out; /* dirty caps */
906 if ((used & ~oissued) & mine)
907 goto out; /* we need these caps */
908
909 session->s_trim_caps--;
910 if (oissued) {
911 /* we aren't the only cap.. just remove us */
912 __ceph_remove_cap(cap, NULL);
913 } else {
914 /* try to drop referring dentries */
915 spin_unlock(&inode->i_lock);
916 d_prune_aliases(inode);
917 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
918 inode, cap, atomic_read(&inode->i_count));
919 return 0;
920 }
921
922out:
923 spin_unlock(&inode->i_lock);
924 return 0;
925}
926
927/*
928 * Trim session cap count down to some max number.
929 */
930static int trim_caps(struct ceph_mds_client *mdsc,
931 struct ceph_mds_session *session,
932 int max_caps)
933{
934 int trim_caps = session->s_nr_caps - max_caps;
935
936 dout("trim_caps mds%d start: %d / %d, trim %d\n",
937 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
938 if (trim_caps > 0) {
939 session->s_trim_caps = trim_caps;
940 iterate_session_caps(session, trim_caps_cb, session);
941 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
942 session->s_mds, session->s_nr_caps, max_caps,
943 trim_caps - session->s_trim_caps);
5dacf091 944 session->s_trim_caps = 0;
2f2dc053
SW
945 }
946 return 0;
947}
948
949/*
950 * Allocate cap_release messages. If there is a partially full message
951 * in the queue, try to allocate enough to cover it's remainder, so that
952 * we can send it immediately.
953 *
954 * Called under s_mutex.
955 */
956static int add_cap_releases(struct ceph_mds_client *mdsc,
957 struct ceph_mds_session *session,
958 int extra)
959{
960 struct ceph_msg *msg;
961 struct ceph_mds_cap_release *head;
962 int err = -ENOMEM;
963
964 if (extra < 0)
6b805185 965 extra = mdsc->client->mount_args->cap_release_safety;
2f2dc053
SW
966
967 spin_lock(&session->s_cap_lock);
968
969 if (!list_empty(&session->s_cap_releases)) {
970 msg = list_first_entry(&session->s_cap_releases,
971 struct ceph_msg,
972 list_head);
973 head = msg->front.iov_base;
974 extra += CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
975 }
976
977 while (session->s_num_cap_releases < session->s_nr_caps + extra) {
978 spin_unlock(&session->s_cap_lock);
979 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
980 0, 0, NULL);
981 if (!msg)
982 goto out_unlocked;
983 dout("add_cap_releases %p msg %p now %d\n", session, msg,
984 (int)msg->front.iov_len);
985 head = msg->front.iov_base;
986 head->num = cpu_to_le32(0);
987 msg->front.iov_len = sizeof(*head);
988 spin_lock(&session->s_cap_lock);
989 list_add(&msg->list_head, &session->s_cap_releases);
990 session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
991 }
992
993 if (!list_empty(&session->s_cap_releases)) {
994 msg = list_first_entry(&session->s_cap_releases,
995 struct ceph_msg,
996 list_head);
997 head = msg->front.iov_base;
998 if (head->num) {
999 dout(" queueing non-full %p (%d)\n", msg,
1000 le32_to_cpu(head->num));
1001 list_move_tail(&msg->list_head,
1002 &session->s_cap_releases_done);
1003 session->s_num_cap_releases -=
1004 CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
1005 }
1006 }
1007 err = 0;
1008 spin_unlock(&session->s_cap_lock);
1009out_unlocked:
1010 return err;
1011}
1012
1013/*
1014 * flush all dirty inode data to disk.
1015 *
1016 * returns true if we've flushed through want_flush_seq
1017 */
1018static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1019{
1020 int mds, ret = 1;
1021
1022 dout("check_cap_flush want %lld\n", want_flush_seq);
1023 mutex_lock(&mdsc->mutex);
1024 for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
1025 struct ceph_mds_session *session = mdsc->sessions[mds];
1026
1027 if (!session)
1028 continue;
1029 get_session(session);
1030 mutex_unlock(&mdsc->mutex);
1031
1032 mutex_lock(&session->s_mutex);
1033 if (!list_empty(&session->s_cap_flushing)) {
1034 struct ceph_inode_info *ci =
1035 list_entry(session->s_cap_flushing.next,
1036 struct ceph_inode_info,
1037 i_flushing_item);
1038 struct inode *inode = &ci->vfs_inode;
1039
1040 spin_lock(&inode->i_lock);
1041 if (ci->i_cap_flush_seq <= want_flush_seq) {
1042 dout("check_cap_flush still flushing %p "
1043 "seq %lld <= %lld to mds%d\n", inode,
1044 ci->i_cap_flush_seq, want_flush_seq,
1045 session->s_mds);
1046 ret = 0;
1047 }
1048 spin_unlock(&inode->i_lock);
1049 }
1050 mutex_unlock(&session->s_mutex);
1051 ceph_put_mds_session(session);
1052
1053 if (!ret)
1054 return ret;
1055 mutex_lock(&mdsc->mutex);
1056 }
1057
1058 mutex_unlock(&mdsc->mutex);
1059 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
1060 return ret;
1061}
1062
1063/*
1064 * called under s_mutex
1065 */
1066static void send_cap_releases(struct ceph_mds_client *mdsc,
1067 struct ceph_mds_session *session)
1068{
1069 struct ceph_msg *msg;
1070
1071 dout("send_cap_releases mds%d\n", session->s_mds);
1072 while (1) {
1073 spin_lock(&session->s_cap_lock);
1074 if (list_empty(&session->s_cap_releases_done))
1075 break;
1076 msg = list_first_entry(&session->s_cap_releases_done,
1077 struct ceph_msg, list_head);
1078 list_del_init(&msg->list_head);
1079 spin_unlock(&session->s_cap_lock);
1080 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1081 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1082 ceph_con_send(&session->s_con, msg);
1083 }
1084 spin_unlock(&session->s_cap_lock);
1085}
1086
1087/*
1088 * requests
1089 */
1090
1091/*
1092 * Create an mds request.
1093 */
1094struct ceph_mds_request *
1095ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1096{
1097 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1098
1099 if (!req)
1100 return ERR_PTR(-ENOMEM);
1101
1102 req->r_started = jiffies;
1103 req->r_resend_mds = -1;
1104 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1105 req->r_fmode = -1;
153c8e6b 1106 kref_init(&req->r_kref);
2f2dc053
SW
1107 INIT_LIST_HEAD(&req->r_wait);
1108 init_completion(&req->r_completion);
1109 init_completion(&req->r_safe_completion);
1110 INIT_LIST_HEAD(&req->r_unsafe_item);
1111
1112 req->r_op = op;
1113 req->r_direct_mode = mode;
1114 return req;
1115}
1116
1117/*
1118 * return oldest (lowest) tid in request tree, 0 if none.
1119 *
1120 * called under mdsc->mutex.
1121 */
1122static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1123{
1124 struct ceph_mds_request *first;
1125 if (radix_tree_gang_lookup(&mdsc->request_tree,
1126 (void **)&first, 0, 1) <= 0)
1127 return 0;
1128 return first->r_tid;
1129}
1130
1131/*
1132 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1133 * on build_path_from_dentry in fs/cifs/dir.c.
1134 *
1135 * If @stop_on_nosnap, generate path relative to the first non-snapped
1136 * inode.
1137 *
1138 * Encode hidden .snap dirs as a double /, i.e.
1139 * foo/.snap/bar -> foo//bar
1140 */
1141char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1142 int stop_on_nosnap)
1143{
1144 struct dentry *temp;
1145 char *path;
1146 int len, pos;
1147
1148 if (dentry == NULL)
1149 return ERR_PTR(-EINVAL);
1150
1151retry:
1152 len = 0;
1153 for (temp = dentry; !IS_ROOT(temp);) {
1154 struct inode *inode = temp->d_inode;
1155 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1156 len++; /* slash only */
1157 else if (stop_on_nosnap && inode &&
1158 ceph_snap(inode) == CEPH_NOSNAP)
1159 break;
1160 else
1161 len += 1 + temp->d_name.len;
1162 temp = temp->d_parent;
1163 if (temp == NULL) {
1164 pr_err("build_path_dentry corrupt dentry %p\n", dentry);
1165 return ERR_PTR(-EINVAL);
1166 }
1167 }
1168 if (len)
1169 len--; /* no leading '/' */
1170
1171 path = kmalloc(len+1, GFP_NOFS);
1172 if (path == NULL)
1173 return ERR_PTR(-ENOMEM);
1174 pos = len;
1175 path[pos] = 0; /* trailing null */
1176 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1177 struct inode *inode = temp->d_inode;
1178
1179 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1180 dout("build_path_dentry path+%d: %p SNAPDIR\n",
1181 pos, temp);
1182 } else if (stop_on_nosnap && inode &&
1183 ceph_snap(inode) == CEPH_NOSNAP) {
1184 break;
1185 } else {
1186 pos -= temp->d_name.len;
1187 if (pos < 0)
1188 break;
1189 strncpy(path + pos, temp->d_name.name,
1190 temp->d_name.len);
1191 dout("build_path_dentry path+%d: %p '%.*s'\n",
1192 pos, temp, temp->d_name.len, path + pos);
1193 }
1194 if (pos)
1195 path[--pos] = '/';
1196 temp = temp->d_parent;
1197 if (temp == NULL) {
1198 pr_err("build_path_dentry corrupt dentry\n");
1199 kfree(path);
1200 return ERR_PTR(-EINVAL);
1201 }
1202 }
1203 if (pos != 0) {
1204 pr_err("build_path_dentry did not end path lookup where "
1205 "expected, namelen is %d, pos is %d\n", len, pos);
1206 /* presumably this is only possible if racing with a
1207 rename of one of the parent directories (we can not
1208 lock the dentries above us to prevent this, but
1209 retrying should be harmless) */
1210 kfree(path);
1211 goto retry;
1212 }
1213
1214 *base = ceph_ino(temp->d_inode);
1215 *plen = len;
1216 dout("build_path_dentry on %p %d built %llx '%.*s'\n",
1217 dentry, atomic_read(&dentry->d_count), *base, len, path);
1218 return path;
1219}
1220
1221static int build_dentry_path(struct dentry *dentry,
1222 const char **ppath, int *ppathlen, u64 *pino,
1223 int *pfreepath)
1224{
1225 char *path;
1226
1227 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
1228 *pino = ceph_ino(dentry->d_parent->d_inode);
1229 *ppath = dentry->d_name.name;
1230 *ppathlen = dentry->d_name.len;
1231 return 0;
1232 }
1233 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1234 if (IS_ERR(path))
1235 return PTR_ERR(path);
1236 *ppath = path;
1237 *pfreepath = 1;
1238 return 0;
1239}
1240
1241static int build_inode_path(struct inode *inode,
1242 const char **ppath, int *ppathlen, u64 *pino,
1243 int *pfreepath)
1244{
1245 struct dentry *dentry;
1246 char *path;
1247
1248 if (ceph_snap(inode) == CEPH_NOSNAP) {
1249 *pino = ceph_ino(inode);
1250 *ppathlen = 0;
1251 return 0;
1252 }
1253 dentry = d_find_alias(inode);
1254 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1255 dput(dentry);
1256 if (IS_ERR(path))
1257 return PTR_ERR(path);
1258 *ppath = path;
1259 *pfreepath = 1;
1260 return 0;
1261}
1262
1263/*
1264 * request arguments may be specified via an inode *, a dentry *, or
1265 * an explicit ino+path.
1266 */
1267static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1268 const char *rpath, u64 rino,
1269 const char **ppath, int *pathlen,
1270 u64 *ino, int *freepath)
1271{
1272 int r = 0;
1273
1274 if (rinode) {
1275 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1276 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1277 ceph_snap(rinode));
1278 } else if (rdentry) {
1279 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1280 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1281 *ppath);
1282 } else if (rpath) {
1283 *ino = rino;
1284 *ppath = rpath;
1285 *pathlen = strlen(rpath);
1286 dout(" path %.*s\n", *pathlen, rpath);
1287 }
1288
1289 return r;
1290}
1291
1292/*
1293 * called under mdsc->mutex
1294 */
1295static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1296 struct ceph_mds_request *req,
1297 int mds)
1298{
1299 struct ceph_msg *msg;
1300 struct ceph_mds_request_head *head;
1301 const char *path1 = NULL;
1302 const char *path2 = NULL;
1303 u64 ino1 = 0, ino2 = 0;
1304 int pathlen1 = 0, pathlen2 = 0;
1305 int freepath1 = 0, freepath2 = 0;
1306 int len;
1307 u16 releases;
1308 void *p, *end;
1309 int ret;
1310
1311 ret = set_request_path_attr(req->r_inode, req->r_dentry,
1312 req->r_path1, req->r_ino1.ino,
1313 &path1, &pathlen1, &ino1, &freepath1);
1314 if (ret < 0) {
1315 msg = ERR_PTR(ret);
1316 goto out;
1317 }
1318
1319 ret = set_request_path_attr(NULL, req->r_old_dentry,
1320 req->r_path2, req->r_ino2.ino,
1321 &path2, &pathlen2, &ino2, &freepath2);
1322 if (ret < 0) {
1323 msg = ERR_PTR(ret);
1324 goto out_free1;
1325 }
1326
1327 len = sizeof(*head) +
1328 pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64));
1329
1330 /* calculate (max) length for cap releases */
1331 len += sizeof(struct ceph_mds_request_release) *
1332 (!!req->r_inode_drop + !!req->r_dentry_drop +
1333 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1334 if (req->r_dentry_drop)
1335 len += req->r_dentry->d_name.len;
1336 if (req->r_old_dentry_drop)
1337 len += req->r_old_dentry->d_name.len;
1338
1339 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
1340 if (IS_ERR(msg))
1341 goto out_free2;
1342
6df058c0
SW
1343 msg->hdr.tid = cpu_to_le64(req->r_tid);
1344
2f2dc053
SW
1345 head = msg->front.iov_base;
1346 p = msg->front.iov_base + sizeof(*head);
1347 end = msg->front.iov_base + msg->front.iov_len;
1348
1349 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1350 head->op = cpu_to_le32(req->r_op);
1351 head->caller_uid = cpu_to_le32(current_fsuid());
1352 head->caller_gid = cpu_to_le32(current_fsgid());
1353 head->args = req->r_args;
1354
1355 ceph_encode_filepath(&p, end, ino1, path1);
1356 ceph_encode_filepath(&p, end, ino2, path2);
1357
1358 /* cap releases */
1359 releases = 0;
1360 if (req->r_inode_drop)
1361 releases += ceph_encode_inode_release(&p,
1362 req->r_inode ? req->r_inode : req->r_dentry->d_inode,
1363 mds, req->r_inode_drop, req->r_inode_unless, 0);
1364 if (req->r_dentry_drop)
1365 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1366 mds, req->r_dentry_drop, req->r_dentry_unless);
1367 if (req->r_old_dentry_drop)
1368 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1369 mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1370 if (req->r_old_inode_drop)
1371 releases += ceph_encode_inode_release(&p,
1372 req->r_old_dentry->d_inode,
1373 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1374 head->num_releases = cpu_to_le16(releases);
1375
1376 BUG_ON(p > end);
1377 msg->front.iov_len = p - msg->front.iov_base;
1378 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1379
1380 msg->pages = req->r_pages;
1381 msg->nr_pages = req->r_num_pages;
1382 msg->hdr.data_len = cpu_to_le32(req->r_data_len);
1383 msg->hdr.data_off = cpu_to_le16(0);
1384
1385out_free2:
1386 if (freepath2)
1387 kfree((char *)path2);
1388out_free1:
1389 if (freepath1)
1390 kfree((char *)path1);
1391out:
1392 return msg;
1393}
1394
1395/*
1396 * called under mdsc->mutex if error, under no mutex if
1397 * success.
1398 */
1399static void complete_request(struct ceph_mds_client *mdsc,
1400 struct ceph_mds_request *req)
1401{
1402 if (req->r_callback)
1403 req->r_callback(mdsc, req);
1404 else
1405 complete(&req->r_completion);
1406}
1407
1408/*
1409 * called under mdsc->mutex
1410 */
1411static int __prepare_send_request(struct ceph_mds_client *mdsc,
1412 struct ceph_mds_request *req,
1413 int mds)
1414{
1415 struct ceph_mds_request_head *rhead;
1416 struct ceph_msg *msg;
1417 int flags = 0;
1418
1419 req->r_mds = mds;
1420 req->r_attempts++;
1421 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
1422 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
1423
1424 if (req->r_request) {
1425 ceph_msg_put(req->r_request);
1426 req->r_request = NULL;
1427 }
1428 msg = create_request_message(mdsc, req, mds);
1429 if (IS_ERR(msg)) {
1430 req->r_reply = ERR_PTR(PTR_ERR(msg));
1431 complete_request(mdsc, req);
1432 return -PTR_ERR(msg);
1433 }
1434 req->r_request = msg;
1435
1436 rhead = msg->front.iov_base;
2f2dc053
SW
1437 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
1438 if (req->r_got_unsafe)
1439 flags |= CEPH_MDS_FLAG_REPLAY;
1440 if (req->r_locked_dir)
1441 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
1442 rhead->flags = cpu_to_le32(flags);
1443 rhead->num_fwd = req->r_num_fwd;
1444 rhead->num_retry = req->r_attempts - 1;
1445
1446 dout(" r_locked_dir = %p\n", req->r_locked_dir);
1447
1448 if (req->r_target_inode && req->r_got_unsafe)
1449 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
1450 else
1451 rhead->ino = 0;
1452 return 0;
1453}
1454
1455/*
1456 * send request, or put it on the appropriate wait list.
1457 */
1458static int __do_request(struct ceph_mds_client *mdsc,
1459 struct ceph_mds_request *req)
1460{
1461 struct ceph_mds_session *session = NULL;
1462 int mds = -1;
1463 int err = -EAGAIN;
1464
1465 if (req->r_reply)
1466 goto out;
1467
1468 if (req->r_timeout &&
1469 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
1470 dout("do_request timed out\n");
1471 err = -EIO;
1472 goto finish;
1473 }
1474
1475 mds = __choose_mds(mdsc, req);
1476 if (mds < 0 ||
1477 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
1478 dout("do_request no mds or not active, waiting for map\n");
1479 list_add(&req->r_wait, &mdsc->waiting_for_map);
1480 goto out;
1481 }
1482
1483 /* get, open session */
1484 session = __ceph_lookup_mds_session(mdsc, mds);
1485 if (!session)
1486 session = register_session(mdsc, mds);
1487 dout("do_request mds%d session %p state %s\n", mds, session,
1488 session_state_name(session->s_state));
1489 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
1490 session->s_state != CEPH_MDS_SESSION_HUNG) {
1491 if (session->s_state == CEPH_MDS_SESSION_NEW ||
1492 session->s_state == CEPH_MDS_SESSION_CLOSING)
1493 __open_session(mdsc, session);
1494 list_add(&req->r_wait, &session->s_waiting);
1495 goto out_session;
1496 }
1497
1498 /* send request */
1499 req->r_session = get_session(session);
1500 req->r_resend_mds = -1; /* forget any previous mds hint */
1501
1502 if (req->r_request_started == 0) /* note request start time */
1503 req->r_request_started = jiffies;
1504
1505 err = __prepare_send_request(mdsc, req, mds);
1506 if (!err) {
1507 ceph_msg_get(req->r_request);
1508 ceph_con_send(&session->s_con, req->r_request);
1509 }
1510
1511out_session:
1512 ceph_put_mds_session(session);
1513out:
1514 return err;
1515
1516finish:
1517 req->r_reply = ERR_PTR(err);
1518 complete_request(mdsc, req);
1519 goto out;
1520}
1521
1522/*
1523 * called under mdsc->mutex
1524 */
1525static void __wake_requests(struct ceph_mds_client *mdsc,
1526 struct list_head *head)
1527{
1528 struct ceph_mds_request *req, *nreq;
1529
1530 list_for_each_entry_safe(req, nreq, head, r_wait) {
1531 list_del_init(&req->r_wait);
1532 __do_request(mdsc, req);
1533 }
1534}
1535
1536/*
1537 * Wake up threads with requests pending for @mds, so that they can
1538 * resubmit their requests to a possibly different mds. If @all is set,
1539 * wake up if their requests has been forwarded to @mds, too.
1540 */
1541static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
1542{
1543 struct ceph_mds_request *reqs[10];
1544 u64 nexttid = 0;
1545 int i, got;
1546
1547 dout("kick_requests mds%d\n", mds);
1548 while (nexttid <= mdsc->last_tid) {
1549 got = radix_tree_gang_lookup(&mdsc->request_tree,
1550 (void **)&reqs, nexttid, 10);
1551 if (got == 0)
1552 break;
1553 nexttid = reqs[got-1]->r_tid + 1;
1554 for (i = 0; i < got; i++) {
1555 if (reqs[i]->r_got_unsafe)
1556 continue;
1557 if (reqs[i]->r_session &&
1558 reqs[i]->r_session->s_mds == mds) {
1559 dout(" kicking tid %llu\n", reqs[i]->r_tid);
1560 put_request_session(reqs[i]);
1561 __do_request(mdsc, reqs[i]);
1562 }
1563 }
1564 }
1565}
1566
1567void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
1568 struct ceph_mds_request *req)
1569{
1570 dout("submit_request on %p\n", req);
1571 mutex_lock(&mdsc->mutex);
1572 __register_request(mdsc, req, NULL);
1573 __do_request(mdsc, req);
1574 mutex_unlock(&mdsc->mutex);
1575}
1576
1577/*
1578 * Synchrously perform an mds request. Take care of all of the
1579 * session setup, forwarding, retry details.
1580 */
1581int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
1582 struct inode *dir,
1583 struct ceph_mds_request *req)
1584{
1585 int err;
1586
1587 dout("do_request on %p\n", req);
1588
1589 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
1590 if (req->r_inode)
1591 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
1592 if (req->r_locked_dir)
1593 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
1594 if (req->r_old_dentry)
1595 ceph_get_cap_refs(
1596 ceph_inode(req->r_old_dentry->d_parent->d_inode),
1597 CEPH_CAP_PIN);
1598
1599 /* issue */
1600 mutex_lock(&mdsc->mutex);
1601 __register_request(mdsc, req, dir);
1602 __do_request(mdsc, req);
1603
1604 /* wait */
1605 if (!req->r_reply) {
1606 mutex_unlock(&mdsc->mutex);
1607 if (req->r_timeout) {
e2885f06
SW
1608 err = (long)wait_for_completion_interruptible_timeout(
1609 &req->r_completion, req->r_timeout);
1610 if (err == 0)
2f2dc053 1611 req->r_reply = ERR_PTR(-EIO);
e2885f06
SW
1612 else if (err < 0)
1613 req->r_reply = ERR_PTR(err);
2f2dc053 1614 } else {
e2885f06
SW
1615 err = wait_for_completion_interruptible(
1616 &req->r_completion);
1617 if (err)
1618 req->r_reply = ERR_PTR(err);
2f2dc053
SW
1619 }
1620 mutex_lock(&mdsc->mutex);
1621 }
1622
1623 if (IS_ERR(req->r_reply)) {
1624 err = PTR_ERR(req->r_reply);
1625 req->r_reply = NULL;
1626
1627 /* clean up */
1628 __unregister_request(mdsc, req);
1629 if (!list_empty(&req->r_unsafe_item))
1630 list_del_init(&req->r_unsafe_item);
1631 complete(&req->r_safe_completion);
1632 } else if (req->r_err) {
1633 err = req->r_err;
1634 } else {
1635 err = le32_to_cpu(req->r_reply_info.head->result);
1636 }
1637 mutex_unlock(&mdsc->mutex);
1638
1639 dout("do_request %p done, result %d\n", req, err);
1640 return err;
1641}
1642
1643/*
1644 * Handle mds reply.
1645 *
1646 * We take the session mutex and parse and process the reply immediately.
1647 * This preserves the logical ordering of replies, capabilities, etc., sent
1648 * by the MDS as they are applied to our local cache.
1649 */
1650static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1651{
1652 struct ceph_mds_client *mdsc = session->s_mdsc;
1653 struct ceph_mds_request *req;
1654 struct ceph_mds_reply_head *head = msg->front.iov_base;
1655 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
1656 u64 tid;
1657 int err, result;
1658 int mds;
1659
1660 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
1661 return;
1662 if (msg->front.iov_len < sizeof(*head)) {
1663 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
9ec7cab1 1664 ceph_msg_dump(msg);
2f2dc053
SW
1665 return;
1666 }
1667
1668 /* get request, session */
6df058c0 1669 tid = le64_to_cpu(msg->hdr.tid);
2f2dc053
SW
1670 mutex_lock(&mdsc->mutex);
1671 req = __lookup_request(mdsc, tid);
1672 if (!req) {
1673 dout("handle_reply on unknown tid %llu\n", tid);
1674 mutex_unlock(&mdsc->mutex);
1675 return;
1676 }
1677 dout("handle_reply %p\n", req);
1678 mds = le64_to_cpu(msg->hdr.src.name.num);
1679
1680 /* correct session? */
1681 if (!req->r_session && req->r_session != session) {
1682 pr_err("mdsc_handle_reply got %llu on session mds%d"
1683 " not mds%d\n", tid, session->s_mds,
1684 req->r_session ? req->r_session->s_mds : -1);
1685 mutex_unlock(&mdsc->mutex);
1686 goto out;
1687 }
1688
1689 /* dup? */
1690 if ((req->r_got_unsafe && !head->safe) ||
1691 (req->r_got_safe && head->safe)) {
1692 pr_warning("got a dup %s reply on %llu from mds%d\n",
1693 head->safe ? "safe" : "unsafe", tid, mds);
1694 mutex_unlock(&mdsc->mutex);
1695 goto out;
1696 }
1697
1698 result = le32_to_cpu(head->result);
1699
1700 /*
1701 * Tolerate 2 consecutive ESTALEs from the same mds.
1702 * FIXME: we should be looking at the cap migrate_seq.
1703 */
1704 if (result == -ESTALE) {
1705 req->r_direct_mode = USE_AUTH_MDS;
1706 req->r_num_stale++;
1707 if (req->r_num_stale <= 2) {
1708 __do_request(mdsc, req);
1709 mutex_unlock(&mdsc->mutex);
1710 goto out;
1711 }
1712 } else {
1713 req->r_num_stale = 0;
1714 }
1715
1716 if (head->safe) {
1717 req->r_got_safe = true;
1718 __unregister_request(mdsc, req);
1719 complete(&req->r_safe_completion);
1720
1721 if (req->r_got_unsafe) {
1722 /*
1723 * We already handled the unsafe response, now do the
1724 * cleanup. No need to examine the response; the MDS
1725 * doesn't include any result info in the safe
1726 * response. And even if it did, there is nothing
1727 * useful we could do with a revised return value.
1728 */
1729 dout("got safe reply %llu, mds%d\n", tid, mds);
1730 list_del_init(&req->r_unsafe_item);
1731
1732 /* last unsafe request during umount? */
1733 if (mdsc->stopping && !__get_oldest_tid(mdsc))
1734 complete(&mdsc->safe_umount_waiters);
1735 mutex_unlock(&mdsc->mutex);
1736 goto out;
1737 }
1738 }
1739
1740 BUG_ON(req->r_reply);
1741
1742 if (!head->safe) {
1743 req->r_got_unsafe = true;
1744 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
1745 }
1746
1747 dout("handle_reply tid %lld result %d\n", tid, result);
1748 rinfo = &req->r_reply_info;
1749 err = parse_reply_info(msg, rinfo);
1750 mutex_unlock(&mdsc->mutex);
1751
1752 mutex_lock(&session->s_mutex);
1753 if (err < 0) {
1754 pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds);
9ec7cab1 1755 ceph_msg_dump(msg);
2f2dc053
SW
1756 goto out_err;
1757 }
1758
1759 /* snap trace */
1760 if (rinfo->snapblob_len) {
1761 down_write(&mdsc->snap_rwsem);
1762 ceph_update_snap_trace(mdsc, rinfo->snapblob,
1763 rinfo->snapblob + rinfo->snapblob_len,
1764 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
1765 downgrade_write(&mdsc->snap_rwsem);
1766 } else {
1767 down_read(&mdsc->snap_rwsem);
1768 }
1769
1770 /* insert trace into our cache */
1771 err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
1772 if (err == 0) {
1773 if (result == 0 && rinfo->dir_nr)
1774 ceph_readdir_prepopulate(req, req->r_session);
1775 ceph_unreserve_caps(&req->r_caps_reservation);
1776 }
1777
1778 up_read(&mdsc->snap_rwsem);
1779out_err:
1780 if (err) {
1781 req->r_err = err;
1782 } else {
1783 req->r_reply = msg;
1784 ceph_msg_get(msg);
1785 }
1786
1787 add_cap_releases(mdsc, req->r_session, -1);
1788 mutex_unlock(&session->s_mutex);
1789
1790 /* kick calling process */
1791 complete_request(mdsc, req);
1792out:
1793 ceph_mdsc_put_request(req);
1794 return;
1795}
1796
1797
1798
1799/*
1800 * handle mds notification that our request has been forwarded.
1801 */
1802static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
1803{
1804 struct ceph_mds_request *req;
1805 u64 tid;
1806 u32 next_mds;
1807 u32 fwd_seq;
1808 u8 must_resend;
1809 int err = -EINVAL;
1810 void *p = msg->front.iov_base;
1811 void *end = p + msg->front.iov_len;
1812 int from_mds, state;
1813
1814 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
1815 goto bad;
1816 from_mds = le64_to_cpu(msg->hdr.src.name.num);
1817
1818 ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
c89136ea
SW
1819 tid = ceph_decode_64(&p);
1820 next_mds = ceph_decode_32(&p);
1821 fwd_seq = ceph_decode_32(&p);
1822 must_resend = ceph_decode_8(&p);
2f2dc053
SW
1823
1824 WARN_ON(must_resend); /* shouldn't happen. */
1825
1826 mutex_lock(&mdsc->mutex);
1827 req = __lookup_request(mdsc, tid);
1828 if (!req) {
1829 dout("forward %llu dne\n", tid);
1830 goto out; /* dup reply? */
1831 }
1832
1833 state = mdsc->sessions[next_mds]->s_state;
1834 if (fwd_seq <= req->r_num_fwd) {
1835 dout("forward %llu to mds%d - old seq %d <= %d\n",
1836 tid, next_mds, req->r_num_fwd, fwd_seq);
1837 } else {
1838 /* resend. forward race not possible; mds would drop */
1839 dout("forward %llu to mds%d (we resend)\n", tid, next_mds);
1840 req->r_num_fwd = fwd_seq;
1841 req->r_resend_mds = next_mds;
1842 put_request_session(req);
1843 __do_request(mdsc, req);
1844 }
1845 ceph_mdsc_put_request(req);
1846out:
1847 mutex_unlock(&mdsc->mutex);
1848 return;
1849
1850bad:
1851 pr_err("mdsc_handle_forward decode error err=%d\n", err);
1852}
1853
1854/*
1855 * handle a mds session control message
1856 */
1857static void handle_session(struct ceph_mds_session *session,
1858 struct ceph_msg *msg)
1859{
1860 struct ceph_mds_client *mdsc = session->s_mdsc;
1861 u32 op;
1862 u64 seq;
1863 int mds;
1864 struct ceph_mds_session_head *h = msg->front.iov_base;
1865 int wake = 0;
1866
1867 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
1868 return;
1869 mds = le64_to_cpu(msg->hdr.src.name.num);
1870
1871 /* decode */
1872 if (msg->front.iov_len != sizeof(*h))
1873 goto bad;
1874 op = le32_to_cpu(h->op);
1875 seq = le64_to_cpu(h->seq);
1876
1877 mutex_lock(&mdsc->mutex);
1878 /* FIXME: this ttl calculation is generous */
1879 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
1880 mutex_unlock(&mdsc->mutex);
1881
1882 mutex_lock(&session->s_mutex);
1883
1884 dout("handle_session mds%d %s %p state %s seq %llu\n",
1885 mds, ceph_session_op_name(op), session,
1886 session_state_name(session->s_state), seq);
1887
1888 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
1889 session->s_state = CEPH_MDS_SESSION_OPEN;
1890 pr_info("mds%d came back\n", session->s_mds);
1891 }
1892
1893 switch (op) {
1894 case CEPH_SESSION_OPEN:
1895 session->s_state = CEPH_MDS_SESSION_OPEN;
1896 renewed_caps(mdsc, session, 0);
1897 wake = 1;
1898 if (mdsc->stopping)
1899 __close_session(mdsc, session);
1900 break;
1901
1902 case CEPH_SESSION_RENEWCAPS:
1903 if (session->s_renew_seq == seq)
1904 renewed_caps(mdsc, session, 1);
1905 break;
1906
1907 case CEPH_SESSION_CLOSE:
42ce56e5 1908 unregister_session(mdsc, session);
2f2dc053
SW
1909 remove_session_caps(session);
1910 wake = 1; /* for good measure */
1911 complete(&mdsc->session_close_waiters);
1912 kick_requests(mdsc, mds, 0); /* cur only */
1913 break;
1914
1915 case CEPH_SESSION_STALE:
1916 pr_info("mds%d caps went stale, renewing\n",
1917 session->s_mds);
1918 spin_lock(&session->s_cap_lock);
1919 session->s_cap_gen++;
1920 session->s_cap_ttl = 0;
1921 spin_unlock(&session->s_cap_lock);
1922 send_renew_caps(mdsc, session);
1923 break;
1924
1925 case CEPH_SESSION_RECALL_STATE:
1926 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
1927 break;
1928
1929 default:
1930 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
1931 WARN_ON(1);
1932 }
1933
1934 mutex_unlock(&session->s_mutex);
1935 if (wake) {
1936 mutex_lock(&mdsc->mutex);
1937 __wake_requests(mdsc, &session->s_waiting);
1938 mutex_unlock(&mdsc->mutex);
1939 }
1940 return;
1941
1942bad:
1943 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
1944 (int)msg->front.iov_len);
9ec7cab1 1945 ceph_msg_dump(msg);
2f2dc053
SW
1946 return;
1947}
1948
1949
1950/*
1951 * called under session->mutex.
1952 */
1953static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
1954 struct ceph_mds_session *session)
1955{
1956 struct ceph_mds_request *req, *nreq;
1957 int err;
1958
1959 dout("replay_unsafe_requests mds%d\n", session->s_mds);
1960
1961 mutex_lock(&mdsc->mutex);
1962 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
1963 err = __prepare_send_request(mdsc, req, session->s_mds);
1964 if (!err) {
1965 ceph_msg_get(req->r_request);
1966 ceph_con_send(&session->s_con, req->r_request);
1967 }
1968 }
1969 mutex_unlock(&mdsc->mutex);
1970}
1971
1972/*
1973 * Encode information about a cap for a reconnect with the MDS.
1974 */
2f2dc053
SW
1975static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
1976 void *arg)
1977{
93cea5be 1978 struct ceph_mds_cap_reconnect rec;
2f2dc053 1979 struct ceph_inode_info *ci;
93cea5be 1980 struct ceph_pagelist *pagelist = arg;
2f2dc053
SW
1981 char *path;
1982 int pathlen, err;
1983 u64 pathbase;
1984 struct dentry *dentry;
1985
1986 ci = cap->ci;
1987
1988 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
1989 inode, ceph_vinop(inode), cap, cap->cap_id,
1990 ceph_cap_string(cap->issued));
93cea5be
SW
1991 err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
1992 if (err)
1993 return err;
2f2dc053
SW
1994
1995 dentry = d_find_alias(inode);
1996 if (dentry) {
1997 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
1998 if (IS_ERR(path)) {
1999 err = PTR_ERR(path);
2000 BUG_ON(err);
2001 }
2002 } else {
2003 path = NULL;
2004 pathlen = 0;
2005 }
93cea5be
SW
2006 err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2007 if (err)
2008 goto out;
2f2dc053 2009
2f2dc053
SW
2010 spin_lock(&inode->i_lock);
2011 cap->seq = 0; /* reset cap seq */
2012 cap->issue_seq = 0; /* and issue_seq */
93cea5be
SW
2013 rec.cap_id = cpu_to_le64(cap->cap_id);
2014 rec.pathbase = cpu_to_le64(pathbase);
2015 rec.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2016 rec.issued = cpu_to_le32(cap->issued);
2017 rec.size = cpu_to_le64(inode->i_size);
2018 ceph_encode_timespec(&rec.mtime, &inode->i_mtime);
2019 ceph_encode_timespec(&rec.atime, &inode->i_atime);
2020 rec.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2f2dc053
SW
2021 spin_unlock(&inode->i_lock);
2022
93cea5be
SW
2023 err = ceph_pagelist_append(pagelist, &rec, sizeof(rec));
2024
2025out:
2f2dc053
SW
2026 kfree(path);
2027 dput(dentry);
93cea5be 2028 return err;
2f2dc053
SW
2029}
2030
2031
2032/*
2033 * If an MDS fails and recovers, clients need to reconnect in order to
2034 * reestablish shared state. This includes all caps issued through
2035 * this session _and_ the snap_realm hierarchy. Because it's not
2036 * clear which snap realms the mds cares about, we send everything we
2037 * know about.. that ensures we'll then get any new info the
2038 * recovering MDS might have.
2039 *
2040 * This is a relatively heavyweight operation, but it's rare.
2041 *
2042 * called with mdsc->mutex held.
2043 */
2044static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
2045{
93cea5be 2046 struct ceph_mds_session *session = NULL;
2f2dc053 2047 struct ceph_msg *reply;
2f2dc053 2048 int err;
2f2dc053
SW
2049 int got;
2050 u64 next_snap_ino = 0;
93cea5be 2051 struct ceph_pagelist *pagelist;
2f2dc053
SW
2052
2053 pr_info("reconnect to recovering mds%d\n", mds);
2054
93cea5be
SW
2055 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2056 if (!pagelist)
2057 goto fail_nopagelist;
2058 ceph_pagelist_init(pagelist);
2059
2060 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL);
2061 if (IS_ERR(reply)) {
2062 err = PTR_ERR(reply);
2063 goto fail_nomsg;
2064 }
2065
2f2dc053
SW
2066 /* find session */
2067 session = __ceph_lookup_mds_session(mdsc, mds);
2068 mutex_unlock(&mdsc->mutex); /* drop lock for duration */
2069
2070 if (session) {
2071 mutex_lock(&session->s_mutex);
2072
2073 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2074 session->s_seq = 0;
2075
2076 ceph_con_open(&session->s_con,
2077 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2078
2079 /* replay unsafe requests */
2080 replay_unsafe_requests(mdsc, session);
2f2dc053
SW
2081 } else {
2082 dout("no session for mds%d, will send short reconnect\n",
2083 mds);
2084 }
2085
2086 down_read(&mdsc->snap_rwsem);
2087
93cea5be 2088 if (!session)
2f2dc053 2089 goto send;
2f2dc053
SW
2090 dout("session %p state %s\n", session,
2091 session_state_name(session->s_state));
2092
2093 /* traverse this session's caps */
93cea5be
SW
2094 err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
2095 if (err)
2096 goto fail;
2097 err = iterate_session_caps(session, encode_caps_cb, pagelist);
2f2dc053
SW
2098 if (err < 0)
2099 goto out;
2f2dc053
SW
2100
2101 /*
2102 * snaprealms. we provide mds with the ino, seq (version), and
2103 * parent for all of our realms. If the mds has any newer info,
2104 * it will tell us.
2105 */
2106 next_snap_ino = 0;
2f2dc053
SW
2107 while (1) {
2108 struct ceph_snap_realm *realm;
93cea5be 2109 struct ceph_mds_snaprealm_reconnect sr_rec;
2f2dc053
SW
2110 got = radix_tree_gang_lookup(&mdsc->snap_realms,
2111 (void **)&realm, next_snap_ino, 1);
2112 if (!got)
2113 break;
2114
2115 dout(" adding snap realm %llx seq %lld parent %llx\n",
2116 realm->ino, realm->seq, realm->parent_ino);
93cea5be
SW
2117 sr_rec.ino = cpu_to_le64(realm->ino);
2118 sr_rec.seq = cpu_to_le64(realm->seq);
2119 sr_rec.parent = cpu_to_le64(realm->parent_ino);
2120 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
2121 if (err)
2122 goto fail;
2f2dc053
SW
2123 next_snap_ino = realm->ino + 1;
2124 }
2f2dc053
SW
2125
2126send:
93cea5be
SW
2127 reply->pagelist = pagelist;
2128 reply->hdr.data_len = cpu_to_le32(pagelist->length);
2129 reply->nr_pages = calc_pages_for(0, pagelist->length);
2f2dc053
SW
2130 ceph_con_send(&session->s_con, reply);
2131
2132 if (session) {
2133 session->s_state = CEPH_MDS_SESSION_OPEN;
2134 __wake_requests(mdsc, &session->s_waiting);
2135 }
2136
2137out:
2138 up_read(&mdsc->snap_rwsem);
2139 if (session) {
2140 mutex_unlock(&session->s_mutex);
2141 ceph_put_mds_session(session);
2142 }
2143 mutex_lock(&mdsc->mutex);
2144 return;
2145
93cea5be 2146fail:
2f2dc053 2147 ceph_msg_put(reply);
93cea5be
SW
2148fail_nomsg:
2149 ceph_pagelist_release(pagelist);
2150 kfree(pagelist);
2151fail_nopagelist:
2152 pr_err("ENOMEM preparing reconnect for mds%d\n", mds);
2153 goto out;
2f2dc053
SW
2154}
2155
2156
2157/*
2158 * compare old and new mdsmaps, kicking requests
2159 * and closing out old connections as necessary
2160 *
2161 * called under mdsc->mutex.
2162 */
2163static void check_new_map(struct ceph_mds_client *mdsc,
2164 struct ceph_mdsmap *newmap,
2165 struct ceph_mdsmap *oldmap)
2166{
2167 int i;
2168 int oldstate, newstate;
2169 struct ceph_mds_session *s;
2170
2171 dout("check_new_map new %u old %u\n",
2172 newmap->m_epoch, oldmap->m_epoch);
2173
2174 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
2175 if (mdsc->sessions[i] == NULL)
2176 continue;
2177 s = mdsc->sessions[i];
2178 oldstate = ceph_mdsmap_get_state(oldmap, i);
2179 newstate = ceph_mdsmap_get_state(newmap, i);
2180
2181 dout("check_new_map mds%d state %s -> %s (session %s)\n",
2182 i, ceph_mds_state_name(oldstate),
2183 ceph_mds_state_name(newstate),
2184 session_state_name(s->s_state));
2185
2186 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
2187 ceph_mdsmap_get_addr(newmap, i),
2188 sizeof(struct ceph_entity_addr))) {
2189 if (s->s_state == CEPH_MDS_SESSION_OPENING) {
2190 /* the session never opened, just close it
2191 * out now */
2192 __wake_requests(mdsc, &s->s_waiting);
42ce56e5 2193 unregister_session(mdsc, s);
2f2dc053
SW
2194 } else {
2195 /* just close it */
2196 mutex_unlock(&mdsc->mutex);
2197 mutex_lock(&s->s_mutex);
2198 mutex_lock(&mdsc->mutex);
2199 ceph_con_close(&s->s_con);
2200 mutex_unlock(&s->s_mutex);
2201 s->s_state = CEPH_MDS_SESSION_RESTARTING;
2202 }
2203
2204 /* kick any requests waiting on the recovering mds */
2205 kick_requests(mdsc, i, 1);
2206 } else if (oldstate == newstate) {
2207 continue; /* nothing new with this mds */
2208 }
2209
2210 /*
2211 * send reconnect?
2212 */
2213 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
2214 newstate >= CEPH_MDS_STATE_RECONNECT)
2215 send_mds_reconnect(mdsc, i);
2216
2217 /*
2218 * kick requests on any mds that has gone active.
2219 *
2220 * kick requests on cur or forwarder: we may have sent
2221 * the request to mds1, mds1 told us it forwarded it
2222 * to mds2, but then we learn mds1 failed and can't be
2223 * sure it successfully forwarded our request before
2224 * it died.
2225 */
2226 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
2227 newstate >= CEPH_MDS_STATE_ACTIVE) {
fef320ff 2228 pr_info("mds%d reconnect completed\n", s->s_mds);
2f2dc053
SW
2229 kick_requests(mdsc, i, 1);
2230 ceph_kick_flushing_caps(mdsc, s);
0dc2570f 2231 wake_up_session_caps(s, 1);
2f2dc053
SW
2232 }
2233 }
2234}
2235
2236
2237
2238/*
2239 * leases
2240 */
2241
2242/*
2243 * caller must hold session s_mutex, dentry->d_lock
2244 */
2245void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
2246{
2247 struct ceph_dentry_info *di = ceph_dentry(dentry);
2248
2249 ceph_put_mds_session(di->lease_session);
2250 di->lease_session = NULL;
2251}
2252
2253static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
2254{
2255 struct super_block *sb = mdsc->client->sb;
2256 struct inode *inode;
2257 struct ceph_mds_session *session;
2258 struct ceph_inode_info *ci;
2259 struct dentry *parent, *dentry;
2260 struct ceph_dentry_info *di;
2261 int mds;
2262 struct ceph_mds_lease *h = msg->front.iov_base;
2263 struct ceph_vino vino;
2264 int mask;
2265 struct qstr dname;
2266 int release = 0;
2267
2268 if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
2269 return;
2270 mds = le64_to_cpu(msg->hdr.src.name.num);
2271 dout("handle_lease from mds%d\n", mds);
2272
2273 /* decode */
2274 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
2275 goto bad;
2276 vino.ino = le64_to_cpu(h->ino);
2277 vino.snap = CEPH_NOSNAP;
2278 mask = le16_to_cpu(h->mask);
2279 dname.name = (void *)h + sizeof(*h) + sizeof(u32);
2280 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
2281 if (dname.len != get_unaligned_le32(h+1))
2282 goto bad;
2283
2284 /* find session */
2285 mutex_lock(&mdsc->mutex);
2286 session = __ceph_lookup_mds_session(mdsc, mds);
2287 mutex_unlock(&mdsc->mutex);
2288 if (!session) {
2289 pr_err("handle_lease got lease but no session mds%d\n", mds);
2290 return;
2291 }
2292
2293 mutex_lock(&session->s_mutex);
2294 session->s_seq++;
2295
2296 /* lookup inode */
2297 inode = ceph_find_inode(sb, vino);
2298 dout("handle_lease '%s', mask %d, ino %llx %p\n",
2299 ceph_lease_op_name(h->action), mask, vino.ino, inode);
2300 if (inode == NULL) {
2301 dout("handle_lease no inode %llx\n", vino.ino);
2302 goto release;
2303 }
2304 ci = ceph_inode(inode);
2305
2306 /* dentry */
2307 parent = d_find_alias(inode);
2308 if (!parent) {
2309 dout("no parent dentry on inode %p\n", inode);
2310 WARN_ON(1);
2311 goto release; /* hrm... */
2312 }
2313 dname.hash = full_name_hash(dname.name, dname.len);
2314 dentry = d_lookup(parent, &dname);
2315 dput(parent);
2316 if (!dentry)
2317 goto release;
2318
2319 spin_lock(&dentry->d_lock);
2320 di = ceph_dentry(dentry);
2321 switch (h->action) {
2322 case CEPH_MDS_LEASE_REVOKE:
2323 if (di && di->lease_session == session) {
2324 h->seq = cpu_to_le32(di->lease_seq);
2325 __ceph_mdsc_drop_dentry_lease(dentry);
2326 }
2327 release = 1;
2328 break;
2329
2330 case CEPH_MDS_LEASE_RENEW:
2331 if (di && di->lease_session == session &&
2332 di->lease_gen == session->s_cap_gen &&
2333 di->lease_renew_from &&
2334 di->lease_renew_after == 0) {
2335 unsigned long duration =
2336 le32_to_cpu(h->duration_ms) * HZ / 1000;
2337
2338 di->lease_seq = le32_to_cpu(h->seq);
2339 dentry->d_time = di->lease_renew_from + duration;
2340 di->lease_renew_after = di->lease_renew_from +
2341 (duration >> 1);
2342 di->lease_renew_from = 0;
2343 }
2344 break;
2345 }
2346 spin_unlock(&dentry->d_lock);
2347 dput(dentry);
2348
2349 if (!release)
2350 goto out;
2351
2352release:
2353 /* let's just reuse the same message */
2354 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
2355 ceph_msg_get(msg);
2356 ceph_con_send(&session->s_con, msg);
2357
2358out:
2359 iput(inode);
2360 mutex_unlock(&session->s_mutex);
2361 ceph_put_mds_session(session);
2362 return;
2363
2364bad:
2365 pr_err("corrupt lease message\n");
9ec7cab1 2366 ceph_msg_dump(msg);
2f2dc053
SW
2367}
2368
2369void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
2370 struct inode *inode,
2371 struct dentry *dentry, char action,
2372 u32 seq)
2373{
2374 struct ceph_msg *msg;
2375 struct ceph_mds_lease *lease;
2376 int len = sizeof(*lease) + sizeof(u32);
2377 int dnamelen = 0;
2378
2379 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2380 inode, dentry, ceph_lease_op_name(action), session->s_mds);
2381 dnamelen = dentry->d_name.len;
2382 len += dnamelen;
2383
2384 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
2385 if (IS_ERR(msg))
2386 return;
2387 lease = msg->front.iov_base;
2388 lease->action = action;
2389 lease->mask = cpu_to_le16(CEPH_LOCK_DN);
2390 lease->ino = cpu_to_le64(ceph_vino(inode).ino);
2391 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
2392 lease->seq = cpu_to_le32(seq);
2393 put_unaligned_le32(dnamelen, lease + 1);
2394 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
2395
2396 /*
2397 * if this is a preemptive lease RELEASE, no need to
2398 * flush request stream, since the actual request will
2399 * soon follow.
2400 */
2401 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
2402
2403 ceph_con_send(&session->s_con, msg);
2404}
2405
2406/*
2407 * Preemptively release a lease we expect to invalidate anyway.
2408 * Pass @inode always, @dentry is optional.
2409 */
2410void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
2411 struct dentry *dentry, int mask)
2412{
2413 struct ceph_dentry_info *di;
2414 struct ceph_mds_session *session;
2415 u32 seq;
2416
2417 BUG_ON(inode == NULL);
2418 BUG_ON(dentry == NULL);
2419 BUG_ON(mask != CEPH_LOCK_DN);
2420
2421 /* is dentry lease valid? */
2422 spin_lock(&dentry->d_lock);
2423 di = ceph_dentry(dentry);
2424 if (!di || !di->lease_session ||
2425 di->lease_session->s_mds < 0 ||
2426 di->lease_gen != di->lease_session->s_cap_gen ||
2427 !time_before(jiffies, dentry->d_time)) {
2428 dout("lease_release inode %p dentry %p -- "
2429 "no lease on %d\n",
2430 inode, dentry, mask);
2431 spin_unlock(&dentry->d_lock);
2432 return;
2433 }
2434
2435 /* we do have a lease on this dentry; note mds and seq */
2436 session = ceph_get_mds_session(di->lease_session);
2437 seq = di->lease_seq;
2438 __ceph_mdsc_drop_dentry_lease(dentry);
2439 spin_unlock(&dentry->d_lock);
2440
2441 dout("lease_release inode %p dentry %p mask %d to mds%d\n",
2442 inode, dentry, mask, session->s_mds);
2443 ceph_mdsc_lease_send_msg(session, inode, dentry,
2444 CEPH_MDS_LEASE_RELEASE, seq);
2445 ceph_put_mds_session(session);
2446}
2447
2448/*
2449 * drop all leases (and dentry refs) in preparation for umount
2450 */
2451static void drop_leases(struct ceph_mds_client *mdsc)
2452{
2453 int i;
2454
2455 dout("drop_leases\n");
2456 mutex_lock(&mdsc->mutex);
2457 for (i = 0; i < mdsc->max_sessions; i++) {
2458 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2459 if (!s)
2460 continue;
2461 mutex_unlock(&mdsc->mutex);
2462 mutex_lock(&s->s_mutex);
2463 mutex_unlock(&s->s_mutex);
2464 ceph_put_mds_session(s);
2465 mutex_lock(&mdsc->mutex);
2466 }
2467 mutex_unlock(&mdsc->mutex);
2468}
2469
2470
2471
2472/*
2473 * delayed work -- periodically trim expired leases, renew caps with mds
2474 */
2475static void schedule_delayed(struct ceph_mds_client *mdsc)
2476{
2477 int delay = 5;
2478 unsigned hz = round_jiffies_relative(HZ * delay);
2479 schedule_delayed_work(&mdsc->delayed_work, hz);
2480}
2481
2482static void delayed_work(struct work_struct *work)
2483{
2484 int i;
2485 struct ceph_mds_client *mdsc =
2486 container_of(work, struct ceph_mds_client, delayed_work.work);
2487 int renew_interval;
2488 int renew_caps;
2489
2490 dout("mdsc delayed_work\n");
afcdaea3 2491 ceph_check_delayed_caps(mdsc);
2f2dc053
SW
2492
2493 mutex_lock(&mdsc->mutex);
2494 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
2495 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
2496 mdsc->last_renew_caps);
2497 if (renew_caps)
2498 mdsc->last_renew_caps = jiffies;
2499
2500 for (i = 0; i < mdsc->max_sessions; i++) {
2501 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2502 if (s == NULL)
2503 continue;
2504 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
2505 dout("resending session close request for mds%d\n",
2506 s->s_mds);
2507 request_close_session(mdsc, s);
2508 ceph_put_mds_session(s);
2509 continue;
2510 }
2511 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
2512 if (s->s_state == CEPH_MDS_SESSION_OPEN) {
2513 s->s_state = CEPH_MDS_SESSION_HUNG;
2514 pr_info("mds%d hung\n", s->s_mds);
2515 }
2516 }
2517 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
2518 /* this mds is failed or recovering, just wait */
2519 ceph_put_mds_session(s);
2520 continue;
2521 }
2522 mutex_unlock(&mdsc->mutex);
2523
2524 mutex_lock(&s->s_mutex);
2525 if (renew_caps)
2526 send_renew_caps(mdsc, s);
2527 else
2528 ceph_con_keepalive(&s->s_con);
2529 add_cap_releases(mdsc, s, -1);
2530 send_cap_releases(mdsc, s);
2531 mutex_unlock(&s->s_mutex);
2532 ceph_put_mds_session(s);
2533
2534 mutex_lock(&mdsc->mutex);
2535 }
2536 mutex_unlock(&mdsc->mutex);
2537
2538 schedule_delayed(mdsc);
2539}
2540
2541
5f44f142 2542int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2f2dc053
SW
2543{
2544 mdsc->client = client;
2545 mutex_init(&mdsc->mutex);
2546 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
2547 init_completion(&mdsc->safe_umount_waiters);
2548 init_completion(&mdsc->session_close_waiters);
2549 INIT_LIST_HEAD(&mdsc->waiting_for_map);
2550 mdsc->sessions = NULL;
2551 mdsc->max_sessions = 0;
2552 mdsc->stopping = 0;
2553 init_rwsem(&mdsc->snap_rwsem);
2554 INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS);
2555 INIT_LIST_HEAD(&mdsc->snap_empty);
2556 spin_lock_init(&mdsc->snap_empty_lock);
2557 mdsc->last_tid = 0;
2558 INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS);
2559 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
2560 mdsc->last_renew_caps = jiffies;
2561 INIT_LIST_HEAD(&mdsc->cap_delay_list);
2562 spin_lock_init(&mdsc->cap_delay_lock);
2563 INIT_LIST_HEAD(&mdsc->snap_flush_list);
2564 spin_lock_init(&mdsc->snap_flush_lock);
2565 mdsc->cap_flush_seq = 0;
2566 INIT_LIST_HEAD(&mdsc->cap_dirty);
2567 mdsc->num_cap_flushing = 0;
2568 spin_lock_init(&mdsc->cap_dirty_lock);
2569 init_waitqueue_head(&mdsc->cap_flushing_wq);
2570 spin_lock_init(&mdsc->dentry_lru_lock);
2571 INIT_LIST_HEAD(&mdsc->dentry_lru);
5f44f142 2572 return 0;
2f2dc053
SW
2573}
2574
2575/*
2576 * Wait for safe replies on open mds requests. If we time out, drop
2577 * all requests from the tree to avoid dangling dentry refs.
2578 */
2579static void wait_requests(struct ceph_mds_client *mdsc)
2580{
2581 struct ceph_mds_request *req;
2582 struct ceph_client *client = mdsc->client;
2583
2584 mutex_lock(&mdsc->mutex);
2585 if (__get_oldest_tid(mdsc)) {
2586 mutex_unlock(&mdsc->mutex);
2587 dout("wait_requests waiting for requests\n");
2588 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
6b805185 2589 client->mount_args->mount_timeout * HZ);
2f2dc053
SW
2590 mutex_lock(&mdsc->mutex);
2591
2592 /* tear down remaining requests */
2593 while (radix_tree_gang_lookup(&mdsc->request_tree,
2594 (void **)&req, 0, 1)) {
2595 dout("wait_requests timed out on tid %llu\n",
2596 req->r_tid);
2597 radix_tree_delete(&mdsc->request_tree, req->r_tid);
2598 ceph_mdsc_put_request(req);
2599 }
2600 }
2601 mutex_unlock(&mdsc->mutex);
2602 dout("wait_requests done\n");
2603}
2604
2605/*
2606 * called before mount is ro, and before dentries are torn down.
2607 * (hmm, does this still race with new lookups?)
2608 */
2609void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
2610{
2611 dout("pre_umount\n");
2612 mdsc->stopping = 1;
2613
2614 drop_leases(mdsc);
afcdaea3 2615 ceph_flush_dirty_caps(mdsc);
2f2dc053
SW
2616 wait_requests(mdsc);
2617}
2618
2619/*
2620 * wait for all write mds requests to flush.
2621 */
2622static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
2623{
2624 struct ceph_mds_request *req;
2625 u64 next_tid = 0;
2626 int got;
2627
2628 mutex_lock(&mdsc->mutex);
2629 dout("wait_unsafe_requests want %lld\n", want_tid);
2630 while (1) {
2631 got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
2632 next_tid, 1);
2633 if (!got)
2634 break;
2635 if (req->r_tid > want_tid)
2636 break;
2637
2638 next_tid = req->r_tid + 1;
2639 if ((req->r_op & CEPH_MDS_OP_WRITE) == 0)
2640 continue; /* not a write op */
2641
2642 ceph_mdsc_get_request(req);
2643 mutex_unlock(&mdsc->mutex);
2644 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
2645 req->r_tid, want_tid);
2646 wait_for_completion(&req->r_safe_completion);
2647 mutex_lock(&mdsc->mutex);
2648 ceph_mdsc_put_request(req);
2649 }
2650 mutex_unlock(&mdsc->mutex);
2651 dout("wait_unsafe_requests done\n");
2652}
2653
2654void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
2655{
2656 u64 want_tid, want_flush;
2657
2658 dout("sync\n");
2659 mutex_lock(&mdsc->mutex);
2660 want_tid = mdsc->last_tid;
2661 want_flush = mdsc->cap_flush_seq;
2662 mutex_unlock(&mdsc->mutex);
2663 dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
2664
afcdaea3 2665 ceph_flush_dirty_caps(mdsc);
2f2dc053
SW
2666
2667 wait_unsafe_requests(mdsc, want_tid);
2668 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
2669}
2670
2671
2672/*
2673 * called after sb is ro.
2674 */
2675void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
2676{
2677 struct ceph_mds_session *session;
2678 int i;
2679 int n;
2680 struct ceph_client *client = mdsc->client;
6b805185 2681 unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
2f2dc053
SW
2682
2683 dout("close_sessions\n");
2684
2685 mutex_lock(&mdsc->mutex);
2686
2687 /* close sessions */
2688 started = jiffies;
2689 while (time_before(jiffies, started + timeout)) {
2690 dout("closing sessions\n");
2691 n = 0;
2692 for (i = 0; i < mdsc->max_sessions; i++) {
2693 session = __ceph_lookup_mds_session(mdsc, i);
2694 if (!session)
2695 continue;
2696 mutex_unlock(&mdsc->mutex);
2697 mutex_lock(&session->s_mutex);
2698 __close_session(mdsc, session);
2699 mutex_unlock(&session->s_mutex);
2700 ceph_put_mds_session(session);
2701 mutex_lock(&mdsc->mutex);
2702 n++;
2703 }
2704 if (n == 0)
2705 break;
2706
2707 if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
2708 break;
2709
2710 dout("waiting for sessions to close\n");
2711 mutex_unlock(&mdsc->mutex);
2712 wait_for_completion_timeout(&mdsc->session_close_waiters,
2713 timeout);
2714 mutex_lock(&mdsc->mutex);
2715 }
2716
2717 /* tear down remaining sessions */
2718 for (i = 0; i < mdsc->max_sessions; i++) {
2719 if (mdsc->sessions[i]) {
2720 session = get_session(mdsc->sessions[i]);
42ce56e5 2721 unregister_session(mdsc, session);
2f2dc053
SW
2722 mutex_unlock(&mdsc->mutex);
2723 mutex_lock(&session->s_mutex);
2724 remove_session_caps(session);
2725 mutex_unlock(&session->s_mutex);
2726 ceph_put_mds_session(session);
2727 mutex_lock(&mdsc->mutex);
2728 }
2729 }
2730
2731 WARN_ON(!list_empty(&mdsc->cap_delay_list));
2732
2733 mutex_unlock(&mdsc->mutex);
2734
2735 ceph_cleanup_empty_realms(mdsc);
2736
2737 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
2738
2739 dout("stopped\n");
2740}
2741
2742void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
2743{
2744 dout("stop\n");
2745 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
2746 if (mdsc->mdsmap)
2747 ceph_mdsmap_destroy(mdsc->mdsmap);
2748 kfree(mdsc->sessions);
2749}
2750
2751
2752/*
2753 * handle mds map update.
2754 */
2755void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
2756{
2757 u32 epoch;
2758 u32 maplen;
2759 void *p = msg->front.iov_base;
2760 void *end = p + msg->front.iov_len;
2761 struct ceph_mdsmap *newmap, *oldmap;
2762 struct ceph_fsid fsid;
2763 int err = -EINVAL;
2764
2765 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
2766 ceph_decode_copy(&p, &fsid, sizeof(fsid));
0743304d
SW
2767 if (ceph_check_fsid(mdsc->client, &fsid) < 0)
2768 return;
c89136ea
SW
2769 epoch = ceph_decode_32(&p);
2770 maplen = ceph_decode_32(&p);
2f2dc053
SW
2771 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
2772
2773 /* do we need it? */
2774 ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
2775 mutex_lock(&mdsc->mutex);
2776 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
2777 dout("handle_map epoch %u <= our %u\n",
2778 epoch, mdsc->mdsmap->m_epoch);
2779 mutex_unlock(&mdsc->mutex);
2780 return;
2781 }
2782
2783 newmap = ceph_mdsmap_decode(&p, end);
2784 if (IS_ERR(newmap)) {
2785 err = PTR_ERR(newmap);
2786 goto bad_unlock;
2787 }
2788
2789 /* swap into place */
2790 if (mdsc->mdsmap) {
2791 oldmap = mdsc->mdsmap;
2792 mdsc->mdsmap = newmap;
2793 check_new_map(mdsc, newmap, oldmap);
2794 ceph_mdsmap_destroy(oldmap);
2795 } else {
2796 mdsc->mdsmap = newmap; /* first mds map */
2797 }
2798 mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
2799
2800 __wake_requests(mdsc, &mdsc->waiting_for_map);
2801
2802 mutex_unlock(&mdsc->mutex);
2803 schedule_delayed(mdsc);
2804 return;
2805
2806bad_unlock:
2807 mutex_unlock(&mdsc->mutex);
2808bad:
2809 pr_err("error decoding mdsmap %d\n", err);
2810 return;
2811}
2812
2813static struct ceph_connection *con_get(struct ceph_connection *con)
2814{
2815 struct ceph_mds_session *s = con->private;
2816
2817 if (get_session(s)) {
2818 dout("mdsc con_get %p %d -> %d\n", s,
2819 atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref));
2820 return con;
2821 }
2822 dout("mdsc con_get %p FAIL\n", s);
2823 return NULL;
2824}
2825
2826static void con_put(struct ceph_connection *con)
2827{
2828 struct ceph_mds_session *s = con->private;
2829
2830 dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref),
2831 atomic_read(&s->s_ref) - 1);
2832 ceph_put_mds_session(s);
2833}
2834
2835/*
2836 * if the client is unresponsive for long enough, the mds will kill
2837 * the session entirely.
2838 */
2839static void peer_reset(struct ceph_connection *con)
2840{
2841 struct ceph_mds_session *s = con->private;
2842
2843 pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n",
2844 s->s_mds);
2845}
2846
2847static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
2848{
2849 struct ceph_mds_session *s = con->private;
2850 struct ceph_mds_client *mdsc = s->s_mdsc;
2851 int type = le16_to_cpu(msg->hdr.type);
2852
2853 switch (type) {
2854 case CEPH_MSG_MDS_MAP:
2855 ceph_mdsc_handle_map(mdsc, msg);
2856 break;
2857 case CEPH_MSG_CLIENT_SESSION:
2858 handle_session(s, msg);
2859 break;
2860 case CEPH_MSG_CLIENT_REPLY:
2861 handle_reply(s, msg);
2862 break;
2863 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
2864 handle_forward(mdsc, msg);
2865 break;
2866 case CEPH_MSG_CLIENT_CAPS:
2867 ceph_handle_caps(s, msg);
2868 break;
2869 case CEPH_MSG_CLIENT_SNAP:
2870 ceph_handle_snap(mdsc, msg);
2871 break;
2872 case CEPH_MSG_CLIENT_LEASE:
2873 handle_lease(mdsc, msg);
2874 break;
2875
2876 default:
2877 pr_err("received unknown message type %d %s\n", type,
2878 ceph_msg_type_name(type));
2879 }
2880 ceph_msg_put(msg);
2881}
2882
4e7a5dcd
SW
2883/*
2884 * authentication
2885 */
2886static int get_authorizer(struct ceph_connection *con,
2887 void **buf, int *len, int *proto,
2888 void **reply_buf, int *reply_len, int force_new)
2889{
2890 struct ceph_mds_session *s = con->private;
2891 struct ceph_mds_client *mdsc = s->s_mdsc;
2892 struct ceph_auth_client *ac = mdsc->client->monc.auth;
2893 int ret = 0;
2894
2895 if (force_new && s->s_authorizer) {
2896 ac->ops->destroy_authorizer(ac, s->s_authorizer);
2897 s->s_authorizer = NULL;
2898 }
2899 if (s->s_authorizer == NULL) {
2900 if (ac->ops->create_authorizer) {
2901 ret = ac->ops->create_authorizer(
2902 ac, CEPH_ENTITY_TYPE_MDS,
2903 &s->s_authorizer,
2904 &s->s_authorizer_buf,
2905 &s->s_authorizer_buf_len,
2906 &s->s_authorizer_reply_buf,
2907 &s->s_authorizer_reply_buf_len);
2908 if (ret)
2909 return ret;
2910 }
2911 }
2912
2913 *proto = ac->protocol;
2914 *buf = s->s_authorizer_buf;
2915 *len = s->s_authorizer_buf_len;
2916 *reply_buf = s->s_authorizer_reply_buf;
2917 *reply_len = s->s_authorizer_reply_buf_len;
2918 return 0;
2919}
2920
2921
2922static int verify_authorizer_reply(struct ceph_connection *con, int len)
2923{
2924 struct ceph_mds_session *s = con->private;
2925 struct ceph_mds_client *mdsc = s->s_mdsc;
2926 struct ceph_auth_client *ac = mdsc->client->monc.auth;
2927
2928 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
2929}
2930
2f2dc053
SW
2931const static struct ceph_connection_operations mds_con_ops = {
2932 .get = con_get,
2933 .put = con_put,
2934 .dispatch = dispatch,
4e7a5dcd
SW
2935 .get_authorizer = get_authorizer,
2936 .verify_authorizer_reply = verify_authorizer_reply,
2f2dc053
SW
2937 .peer_reset = peer_reset,
2938 .alloc_msg = ceph_alloc_msg,
2939 .alloc_middle = ceph_alloc_middle,
2940};
2941
2942
2943
2944
2945/* eof */