]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/ceph/mds_client.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[mirror_ubuntu-zesty-kernel.git] / fs / ceph / mds_client.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/fs.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/sched.h>
7 #include <linux/debugfs.h>
8 #include <linux/seq_file.h>
9
10 #include "super.h"
11 #include "mds_client.h"
12
13 #include <linux/ceph/messenger.h>
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/pagelist.h>
16 #include <linux/ceph/auth.h>
17 #include <linux/ceph/debugfs.h>
18
19 /*
20 * A cluster of MDS (metadata server) daemons is responsible for
21 * managing the file system namespace (the directory hierarchy and
22 * inodes) and for coordinating shared access to storage. Metadata is
23 * partitioning hierarchically across a number of servers, and that
24 * partition varies over time as the cluster adjusts the distribution
25 * in order to balance load.
26 *
27 * The MDS client is primarily responsible to managing synchronous
28 * metadata requests for operations like open, unlink, and so forth.
29 * If there is a MDS failure, we find out about it when we (possibly
30 * request and) receive a new MDS map, and can resubmit affected
31 * requests.
32 *
33 * For the most part, though, we take advantage of a lossless
34 * communications channel to the MDS, and do not need to worry about
35 * timing out or resubmitting requests.
36 *
37 * We maintain a stateful "session" with each MDS we interact with.
38 * Within each session, we sent periodic heartbeat messages to ensure
39 * any capabilities or leases we have been issues remain valid. If
40 * the session times out and goes stale, our leases and capabilities
41 * are no longer valid.
42 */
43
44 struct ceph_reconnect_state {
45 struct ceph_pagelist *pagelist;
46 bool flock;
47 };
48
49 static void __wake_requests(struct ceph_mds_client *mdsc,
50 struct list_head *head);
51
52 static const struct ceph_connection_operations mds_con_ops;
53
54
55 /*
56 * mds reply parsing
57 */
58
59 /*
60 * parse individual inode info
61 */
62 static int parse_reply_info_in(void **p, void *end,
63 struct ceph_mds_reply_info_in *info)
64 {
65 int err = -EIO;
66
67 info->in = *p;
68 *p += sizeof(struct ceph_mds_reply_inode) +
69 sizeof(*info->in->fragtree.splits) *
70 le32_to_cpu(info->in->fragtree.nsplits);
71
72 ceph_decode_32_safe(p, end, info->symlink_len, bad);
73 ceph_decode_need(p, end, info->symlink_len, bad);
74 info->symlink = *p;
75 *p += info->symlink_len;
76
77 ceph_decode_32_safe(p, end, info->xattr_len, bad);
78 ceph_decode_need(p, end, info->xattr_len, bad);
79 info->xattr_data = *p;
80 *p += info->xattr_len;
81 return 0;
82 bad:
83 return err;
84 }
85
86 /*
87 * parse a normal reply, which may contain a (dir+)dentry and/or a
88 * target inode.
89 */
90 static int parse_reply_info_trace(void **p, void *end,
91 struct ceph_mds_reply_info_parsed *info)
92 {
93 int err;
94
95 if (info->head->is_dentry) {
96 err = parse_reply_info_in(p, end, &info->diri);
97 if (err < 0)
98 goto out_bad;
99
100 if (unlikely(*p + sizeof(*info->dirfrag) > end))
101 goto bad;
102 info->dirfrag = *p;
103 *p += sizeof(*info->dirfrag) +
104 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
105 if (unlikely(*p > end))
106 goto bad;
107
108 ceph_decode_32_safe(p, end, info->dname_len, bad);
109 ceph_decode_need(p, end, info->dname_len, bad);
110 info->dname = *p;
111 *p += info->dname_len;
112 info->dlease = *p;
113 *p += sizeof(*info->dlease);
114 }
115
116 if (info->head->is_target) {
117 err = parse_reply_info_in(p, end, &info->targeti);
118 if (err < 0)
119 goto out_bad;
120 }
121
122 if (unlikely(*p != end))
123 goto bad;
124 return 0;
125
126 bad:
127 err = -EIO;
128 out_bad:
129 pr_err("problem parsing mds trace %d\n", err);
130 return err;
131 }
132
133 /*
134 * parse readdir results
135 */
136 static int parse_reply_info_dir(void **p, void *end,
137 struct ceph_mds_reply_info_parsed *info)
138 {
139 u32 num, i = 0;
140 int err;
141
142 info->dir_dir = *p;
143 if (*p + sizeof(*info->dir_dir) > end)
144 goto bad;
145 *p += sizeof(*info->dir_dir) +
146 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
147 if (*p > end)
148 goto bad;
149
150 ceph_decode_need(p, end, sizeof(num) + 2, bad);
151 num = ceph_decode_32(p);
152 info->dir_end = ceph_decode_8(p);
153 info->dir_complete = ceph_decode_8(p);
154 if (num == 0)
155 goto done;
156
157 /* alloc large array */
158 info->dir_nr = num;
159 info->dir_in = kcalloc(num, sizeof(*info->dir_in) +
160 sizeof(*info->dir_dname) +
161 sizeof(*info->dir_dname_len) +
162 sizeof(*info->dir_dlease),
163 GFP_NOFS);
164 if (info->dir_in == NULL) {
165 err = -ENOMEM;
166 goto out_bad;
167 }
168 info->dir_dname = (void *)(info->dir_in + num);
169 info->dir_dname_len = (void *)(info->dir_dname + num);
170 info->dir_dlease = (void *)(info->dir_dname_len + num);
171
172 while (num) {
173 /* dentry */
174 ceph_decode_need(p, end, sizeof(u32)*2, bad);
175 info->dir_dname_len[i] = ceph_decode_32(p);
176 ceph_decode_need(p, end, info->dir_dname_len[i], bad);
177 info->dir_dname[i] = *p;
178 *p += info->dir_dname_len[i];
179 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
180 info->dir_dname[i]);
181 info->dir_dlease[i] = *p;
182 *p += sizeof(struct ceph_mds_reply_lease);
183
184 /* inode */
185 err = parse_reply_info_in(p, end, &info->dir_in[i]);
186 if (err < 0)
187 goto out_bad;
188 i++;
189 num--;
190 }
191
192 done:
193 if (*p != end)
194 goto bad;
195 return 0;
196
197 bad:
198 err = -EIO;
199 out_bad:
200 pr_err("problem parsing dir contents %d\n", err);
201 return err;
202 }
203
204 /*
205 * parse entire mds reply
206 */
207 static int parse_reply_info(struct ceph_msg *msg,
208 struct ceph_mds_reply_info_parsed *info)
209 {
210 void *p, *end;
211 u32 len;
212 int err;
213
214 info->head = msg->front.iov_base;
215 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
216 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
217
218 /* trace */
219 ceph_decode_32_safe(&p, end, len, bad);
220 if (len > 0) {
221 err = parse_reply_info_trace(&p, p+len, info);
222 if (err < 0)
223 goto out_bad;
224 }
225
226 /* dir content */
227 ceph_decode_32_safe(&p, end, len, bad);
228 if (len > 0) {
229 err = parse_reply_info_dir(&p, p+len, info);
230 if (err < 0)
231 goto out_bad;
232 }
233
234 /* snap blob */
235 ceph_decode_32_safe(&p, end, len, bad);
236 info->snapblob_len = len;
237 info->snapblob = p;
238 p += len;
239
240 if (p != end)
241 goto bad;
242 return 0;
243
244 bad:
245 err = -EIO;
246 out_bad:
247 pr_err("mds parse_reply err %d\n", err);
248 return err;
249 }
250
251 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
252 {
253 kfree(info->dir_in);
254 }
255
256
257 /*
258 * sessions
259 */
260 static const char *session_state_name(int s)
261 {
262 switch (s) {
263 case CEPH_MDS_SESSION_NEW: return "new";
264 case CEPH_MDS_SESSION_OPENING: return "opening";
265 case CEPH_MDS_SESSION_OPEN: return "open";
266 case CEPH_MDS_SESSION_HUNG: return "hung";
267 case CEPH_MDS_SESSION_CLOSING: return "closing";
268 case CEPH_MDS_SESSION_RESTARTING: return "restarting";
269 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
270 default: return "???";
271 }
272 }
273
274 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
275 {
276 if (atomic_inc_not_zero(&s->s_ref)) {
277 dout("mdsc get_session %p %d -> %d\n", s,
278 atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
279 return s;
280 } else {
281 dout("mdsc get_session %p 0 -- FAIL", s);
282 return NULL;
283 }
284 }
285
286 void ceph_put_mds_session(struct ceph_mds_session *s)
287 {
288 dout("mdsc put_session %p %d -> %d\n", s,
289 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
290 if (atomic_dec_and_test(&s->s_ref)) {
291 if (s->s_authorizer)
292 s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
293 s->s_mdsc->fsc->client->monc.auth,
294 s->s_authorizer);
295 kfree(s);
296 }
297 }
298
299 /*
300 * called under mdsc->mutex
301 */
302 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
303 int mds)
304 {
305 struct ceph_mds_session *session;
306
307 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
308 return NULL;
309 session = mdsc->sessions[mds];
310 dout("lookup_mds_session %p %d\n", session,
311 atomic_read(&session->s_ref));
312 get_session(session);
313 return session;
314 }
315
316 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
317 {
318 if (mds >= mdsc->max_sessions)
319 return false;
320 return mdsc->sessions[mds];
321 }
322
323 static int __verify_registered_session(struct ceph_mds_client *mdsc,
324 struct ceph_mds_session *s)
325 {
326 if (s->s_mds >= mdsc->max_sessions ||
327 mdsc->sessions[s->s_mds] != s)
328 return -ENOENT;
329 return 0;
330 }
331
332 /*
333 * create+register a new session for given mds.
334 * called under mdsc->mutex.
335 */
336 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
337 int mds)
338 {
339 struct ceph_mds_session *s;
340
341 s = kzalloc(sizeof(*s), GFP_NOFS);
342 if (!s)
343 return ERR_PTR(-ENOMEM);
344 s->s_mdsc = mdsc;
345 s->s_mds = mds;
346 s->s_state = CEPH_MDS_SESSION_NEW;
347 s->s_ttl = 0;
348 s->s_seq = 0;
349 mutex_init(&s->s_mutex);
350
351 ceph_con_init(mdsc->fsc->client->msgr, &s->s_con);
352 s->s_con.private = s;
353 s->s_con.ops = &mds_con_ops;
354 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
355 s->s_con.peer_name.num = cpu_to_le64(mds);
356
357 spin_lock_init(&s->s_cap_lock);
358 s->s_cap_gen = 0;
359 s->s_cap_ttl = 0;
360 s->s_renew_requested = 0;
361 s->s_renew_seq = 0;
362 INIT_LIST_HEAD(&s->s_caps);
363 s->s_nr_caps = 0;
364 s->s_trim_caps = 0;
365 atomic_set(&s->s_ref, 1);
366 INIT_LIST_HEAD(&s->s_waiting);
367 INIT_LIST_HEAD(&s->s_unsafe);
368 s->s_num_cap_releases = 0;
369 s->s_cap_iterator = NULL;
370 INIT_LIST_HEAD(&s->s_cap_releases);
371 INIT_LIST_HEAD(&s->s_cap_releases_done);
372 INIT_LIST_HEAD(&s->s_cap_flushing);
373 INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
374
375 dout("register_session mds%d\n", mds);
376 if (mds >= mdsc->max_sessions) {
377 int newmax = 1 << get_count_order(mds+1);
378 struct ceph_mds_session **sa;
379
380 dout("register_session realloc to %d\n", newmax);
381 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
382 if (sa == NULL)
383 goto fail_realloc;
384 if (mdsc->sessions) {
385 memcpy(sa, mdsc->sessions,
386 mdsc->max_sessions * sizeof(void *));
387 kfree(mdsc->sessions);
388 }
389 mdsc->sessions = sa;
390 mdsc->max_sessions = newmax;
391 }
392 mdsc->sessions[mds] = s;
393 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
394
395 ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
396
397 return s;
398
399 fail_realloc:
400 kfree(s);
401 return ERR_PTR(-ENOMEM);
402 }
403
404 /*
405 * called under mdsc->mutex
406 */
407 static void __unregister_session(struct ceph_mds_client *mdsc,
408 struct ceph_mds_session *s)
409 {
410 dout("__unregister_session mds%d %p\n", s->s_mds, s);
411 BUG_ON(mdsc->sessions[s->s_mds] != s);
412 mdsc->sessions[s->s_mds] = NULL;
413 ceph_con_close(&s->s_con);
414 ceph_put_mds_session(s);
415 }
416
417 /*
418 * drop session refs in request.
419 *
420 * should be last request ref, or hold mdsc->mutex
421 */
422 static void put_request_session(struct ceph_mds_request *req)
423 {
424 if (req->r_session) {
425 ceph_put_mds_session(req->r_session);
426 req->r_session = NULL;
427 }
428 }
429
430 void ceph_mdsc_release_request(struct kref *kref)
431 {
432 struct ceph_mds_request *req = container_of(kref,
433 struct ceph_mds_request,
434 r_kref);
435 if (req->r_request)
436 ceph_msg_put(req->r_request);
437 if (req->r_reply) {
438 ceph_msg_put(req->r_reply);
439 destroy_reply_info(&req->r_reply_info);
440 }
441 if (req->r_inode) {
442 ceph_put_cap_refs(ceph_inode(req->r_inode),
443 CEPH_CAP_PIN);
444 iput(req->r_inode);
445 }
446 if (req->r_locked_dir)
447 ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
448 CEPH_CAP_PIN);
449 if (req->r_target_inode)
450 iput(req->r_target_inode);
451 if (req->r_dentry)
452 dput(req->r_dentry);
453 if (req->r_old_dentry) {
454 ceph_put_cap_refs(
455 ceph_inode(req->r_old_dentry->d_parent->d_inode),
456 CEPH_CAP_PIN);
457 dput(req->r_old_dentry);
458 }
459 kfree(req->r_path1);
460 kfree(req->r_path2);
461 put_request_session(req);
462 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
463 kfree(req);
464 }
465
466 /*
467 * lookup session, bump ref if found.
468 *
469 * called under mdsc->mutex.
470 */
471 static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
472 u64 tid)
473 {
474 struct ceph_mds_request *req;
475 struct rb_node *n = mdsc->request_tree.rb_node;
476
477 while (n) {
478 req = rb_entry(n, struct ceph_mds_request, r_node);
479 if (tid < req->r_tid)
480 n = n->rb_left;
481 else if (tid > req->r_tid)
482 n = n->rb_right;
483 else {
484 ceph_mdsc_get_request(req);
485 return req;
486 }
487 }
488 return NULL;
489 }
490
491 static void __insert_request(struct ceph_mds_client *mdsc,
492 struct ceph_mds_request *new)
493 {
494 struct rb_node **p = &mdsc->request_tree.rb_node;
495 struct rb_node *parent = NULL;
496 struct ceph_mds_request *req = NULL;
497
498 while (*p) {
499 parent = *p;
500 req = rb_entry(parent, struct ceph_mds_request, r_node);
501 if (new->r_tid < req->r_tid)
502 p = &(*p)->rb_left;
503 else if (new->r_tid > req->r_tid)
504 p = &(*p)->rb_right;
505 else
506 BUG();
507 }
508
509 rb_link_node(&new->r_node, parent, p);
510 rb_insert_color(&new->r_node, &mdsc->request_tree);
511 }
512
513 /*
514 * Register an in-flight request, and assign a tid. Link to directory
515 * are modifying (if any).
516 *
517 * Called under mdsc->mutex.
518 */
519 static void __register_request(struct ceph_mds_client *mdsc,
520 struct ceph_mds_request *req,
521 struct inode *dir)
522 {
523 req->r_tid = ++mdsc->last_tid;
524 if (req->r_num_caps)
525 ceph_reserve_caps(mdsc, &req->r_caps_reservation,
526 req->r_num_caps);
527 dout("__register_request %p tid %lld\n", req, req->r_tid);
528 ceph_mdsc_get_request(req);
529 __insert_request(mdsc, req);
530
531 req->r_uid = current_fsuid();
532 req->r_gid = current_fsgid();
533
534 if (dir) {
535 struct ceph_inode_info *ci = ceph_inode(dir);
536
537 spin_lock(&ci->i_unsafe_lock);
538 req->r_unsafe_dir = dir;
539 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
540 spin_unlock(&ci->i_unsafe_lock);
541 }
542 }
543
544 static void __unregister_request(struct ceph_mds_client *mdsc,
545 struct ceph_mds_request *req)
546 {
547 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
548 rb_erase(&req->r_node, &mdsc->request_tree);
549 RB_CLEAR_NODE(&req->r_node);
550
551 if (req->r_unsafe_dir) {
552 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
553
554 spin_lock(&ci->i_unsafe_lock);
555 list_del_init(&req->r_unsafe_dir_item);
556 spin_unlock(&ci->i_unsafe_lock);
557 }
558
559 ceph_mdsc_put_request(req);
560 }
561
562 /*
563 * Choose mds to send request to next. If there is a hint set in the
564 * request (e.g., due to a prior forward hint from the mds), use that.
565 * Otherwise, consult frag tree and/or caps to identify the
566 * appropriate mds. If all else fails, choose randomly.
567 *
568 * Called under mdsc->mutex.
569 */
570 struct dentry *get_nonsnap_parent(struct dentry *dentry)
571 {
572 while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
573 dentry = dentry->d_parent;
574 return dentry;
575 }
576
577 static int __choose_mds(struct ceph_mds_client *mdsc,
578 struct ceph_mds_request *req)
579 {
580 struct inode *inode;
581 struct ceph_inode_info *ci;
582 struct ceph_cap *cap;
583 int mode = req->r_direct_mode;
584 int mds = -1;
585 u32 hash = req->r_direct_hash;
586 bool is_hash = req->r_direct_is_hash;
587
588 /*
589 * is there a specific mds we should try? ignore hint if we have
590 * no session and the mds is not up (active or recovering).
591 */
592 if (req->r_resend_mds >= 0 &&
593 (__have_session(mdsc, req->r_resend_mds) ||
594 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
595 dout("choose_mds using resend_mds mds%d\n",
596 req->r_resend_mds);
597 return req->r_resend_mds;
598 }
599
600 if (mode == USE_RANDOM_MDS)
601 goto random;
602
603 inode = NULL;
604 if (req->r_inode) {
605 inode = req->r_inode;
606 } else if (req->r_dentry) {
607 struct inode *dir = req->r_dentry->d_parent->d_inode;
608
609 if (dir->i_sb != mdsc->fsc->sb) {
610 /* not this fs! */
611 inode = req->r_dentry->d_inode;
612 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
613 /* direct snapped/virtual snapdir requests
614 * based on parent dir inode */
615 struct dentry *dn =
616 get_nonsnap_parent(req->r_dentry->d_parent);
617 inode = dn->d_inode;
618 dout("__choose_mds using nonsnap parent %p\n", inode);
619 } else if (req->r_dentry->d_inode) {
620 /* dentry target */
621 inode = req->r_dentry->d_inode;
622 } else {
623 /* dir + name */
624 inode = dir;
625 hash = req->r_dentry->d_name.hash;
626 is_hash = true;
627 }
628 }
629
630 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
631 (int)hash, mode);
632 if (!inode)
633 goto random;
634 ci = ceph_inode(inode);
635
636 if (is_hash && S_ISDIR(inode->i_mode)) {
637 struct ceph_inode_frag frag;
638 int found;
639
640 ceph_choose_frag(ci, hash, &frag, &found);
641 if (found) {
642 if (mode == USE_ANY_MDS && frag.ndist > 0) {
643 u8 r;
644
645 /* choose a random replica */
646 get_random_bytes(&r, 1);
647 r %= frag.ndist;
648 mds = frag.dist[r];
649 dout("choose_mds %p %llx.%llx "
650 "frag %u mds%d (%d/%d)\n",
651 inode, ceph_vinop(inode),
652 frag.frag, frag.mds,
653 (int)r, frag.ndist);
654 return mds;
655 }
656
657 /* since this file/dir wasn't known to be
658 * replicated, then we want to look for the
659 * authoritative mds. */
660 mode = USE_AUTH_MDS;
661 if (frag.mds >= 0) {
662 /* choose auth mds */
663 mds = frag.mds;
664 dout("choose_mds %p %llx.%llx "
665 "frag %u mds%d (auth)\n",
666 inode, ceph_vinop(inode), frag.frag, mds);
667 return mds;
668 }
669 }
670 }
671
672 spin_lock(&inode->i_lock);
673 cap = NULL;
674 if (mode == USE_AUTH_MDS)
675 cap = ci->i_auth_cap;
676 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
677 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
678 if (!cap) {
679 spin_unlock(&inode->i_lock);
680 goto random;
681 }
682 mds = cap->session->s_mds;
683 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
684 inode, ceph_vinop(inode), mds,
685 cap == ci->i_auth_cap ? "auth " : "", cap);
686 spin_unlock(&inode->i_lock);
687 return mds;
688
689 random:
690 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
691 dout("choose_mds chose random mds%d\n", mds);
692 return mds;
693 }
694
695
696 /*
697 * session messages
698 */
699 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
700 {
701 struct ceph_msg *msg;
702 struct ceph_mds_session_head *h;
703
704 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS);
705 if (!msg) {
706 pr_err("create_session_msg ENOMEM creating msg\n");
707 return NULL;
708 }
709 h = msg->front.iov_base;
710 h->op = cpu_to_le32(op);
711 h->seq = cpu_to_le64(seq);
712 return msg;
713 }
714
715 /*
716 * send session open request.
717 *
718 * called under mdsc->mutex
719 */
720 static int __open_session(struct ceph_mds_client *mdsc,
721 struct ceph_mds_session *session)
722 {
723 struct ceph_msg *msg;
724 int mstate;
725 int mds = session->s_mds;
726
727 /* wait for mds to go active? */
728 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
729 dout("open_session to mds%d (%s)\n", mds,
730 ceph_mds_state_name(mstate));
731 session->s_state = CEPH_MDS_SESSION_OPENING;
732 session->s_renew_requested = jiffies;
733
734 /* send connect message */
735 msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
736 if (!msg)
737 return -ENOMEM;
738 ceph_con_send(&session->s_con, msg);
739 return 0;
740 }
741
742 /*
743 * open sessions for any export targets for the given mds
744 *
745 * called under mdsc->mutex
746 */
747 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
748 struct ceph_mds_session *session)
749 {
750 struct ceph_mds_info *mi;
751 struct ceph_mds_session *ts;
752 int i, mds = session->s_mds;
753 int target;
754
755 if (mds >= mdsc->mdsmap->m_max_mds)
756 return;
757 mi = &mdsc->mdsmap->m_info[mds];
758 dout("open_export_target_sessions for mds%d (%d targets)\n",
759 session->s_mds, mi->num_export_targets);
760
761 for (i = 0; i < mi->num_export_targets; i++) {
762 target = mi->export_targets[i];
763 ts = __ceph_lookup_mds_session(mdsc, target);
764 if (!ts) {
765 ts = register_session(mdsc, target);
766 if (IS_ERR(ts))
767 return;
768 }
769 if (session->s_state == CEPH_MDS_SESSION_NEW ||
770 session->s_state == CEPH_MDS_SESSION_CLOSING)
771 __open_session(mdsc, session);
772 else
773 dout(" mds%d target mds%d %p is %s\n", session->s_mds,
774 i, ts, session_state_name(ts->s_state));
775 ceph_put_mds_session(ts);
776 }
777 }
778
779 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
780 struct ceph_mds_session *session)
781 {
782 mutex_lock(&mdsc->mutex);
783 __open_export_target_sessions(mdsc, session);
784 mutex_unlock(&mdsc->mutex);
785 }
786
787 /*
788 * session caps
789 */
790
791 /*
792 * Free preallocated cap messages assigned to this session
793 */
794 static void cleanup_cap_releases(struct ceph_mds_session *session)
795 {
796 struct ceph_msg *msg;
797
798 spin_lock(&session->s_cap_lock);
799 while (!list_empty(&session->s_cap_releases)) {
800 msg = list_first_entry(&session->s_cap_releases,
801 struct ceph_msg, list_head);
802 list_del_init(&msg->list_head);
803 ceph_msg_put(msg);
804 }
805 while (!list_empty(&session->s_cap_releases_done)) {
806 msg = list_first_entry(&session->s_cap_releases_done,
807 struct ceph_msg, list_head);
808 list_del_init(&msg->list_head);
809 ceph_msg_put(msg);
810 }
811 spin_unlock(&session->s_cap_lock);
812 }
813
814 /*
815 * Helper to safely iterate over all caps associated with a session, with
816 * special care taken to handle a racing __ceph_remove_cap().
817 *
818 * Caller must hold session s_mutex.
819 */
820 static int iterate_session_caps(struct ceph_mds_session *session,
821 int (*cb)(struct inode *, struct ceph_cap *,
822 void *), void *arg)
823 {
824 struct list_head *p;
825 struct ceph_cap *cap;
826 struct inode *inode, *last_inode = NULL;
827 struct ceph_cap *old_cap = NULL;
828 int ret;
829
830 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
831 spin_lock(&session->s_cap_lock);
832 p = session->s_caps.next;
833 while (p != &session->s_caps) {
834 cap = list_entry(p, struct ceph_cap, session_caps);
835 inode = igrab(&cap->ci->vfs_inode);
836 if (!inode) {
837 p = p->next;
838 continue;
839 }
840 session->s_cap_iterator = cap;
841 spin_unlock(&session->s_cap_lock);
842
843 if (last_inode) {
844 iput(last_inode);
845 last_inode = NULL;
846 }
847 if (old_cap) {
848 ceph_put_cap(session->s_mdsc, old_cap);
849 old_cap = NULL;
850 }
851
852 ret = cb(inode, cap, arg);
853 last_inode = inode;
854
855 spin_lock(&session->s_cap_lock);
856 p = p->next;
857 if (cap->ci == NULL) {
858 dout("iterate_session_caps finishing cap %p removal\n",
859 cap);
860 BUG_ON(cap->session != session);
861 list_del_init(&cap->session_caps);
862 session->s_nr_caps--;
863 cap->session = NULL;
864 old_cap = cap; /* put_cap it w/o locks held */
865 }
866 if (ret < 0)
867 goto out;
868 }
869 ret = 0;
870 out:
871 session->s_cap_iterator = NULL;
872 spin_unlock(&session->s_cap_lock);
873
874 if (last_inode)
875 iput(last_inode);
876 if (old_cap)
877 ceph_put_cap(session->s_mdsc, old_cap);
878
879 return ret;
880 }
881
882 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
883 void *arg)
884 {
885 struct ceph_inode_info *ci = ceph_inode(inode);
886 int drop = 0;
887
888 dout("removing cap %p, ci is %p, inode is %p\n",
889 cap, ci, &ci->vfs_inode);
890 spin_lock(&inode->i_lock);
891 __ceph_remove_cap(cap);
892 if (!__ceph_is_any_real_caps(ci)) {
893 struct ceph_mds_client *mdsc =
894 ceph_sb_to_client(inode->i_sb)->mdsc;
895
896 spin_lock(&mdsc->cap_dirty_lock);
897 if (!list_empty(&ci->i_dirty_item)) {
898 pr_info(" dropping dirty %s state for %p %lld\n",
899 ceph_cap_string(ci->i_dirty_caps),
900 inode, ceph_ino(inode));
901 ci->i_dirty_caps = 0;
902 list_del_init(&ci->i_dirty_item);
903 drop = 1;
904 }
905 if (!list_empty(&ci->i_flushing_item)) {
906 pr_info(" dropping dirty+flushing %s state for %p %lld\n",
907 ceph_cap_string(ci->i_flushing_caps),
908 inode, ceph_ino(inode));
909 ci->i_flushing_caps = 0;
910 list_del_init(&ci->i_flushing_item);
911 mdsc->num_cap_flushing--;
912 drop = 1;
913 }
914 if (drop && ci->i_wrbuffer_ref) {
915 pr_info(" dropping dirty data for %p %lld\n",
916 inode, ceph_ino(inode));
917 ci->i_wrbuffer_ref = 0;
918 ci->i_wrbuffer_ref_head = 0;
919 drop++;
920 }
921 spin_unlock(&mdsc->cap_dirty_lock);
922 }
923 spin_unlock(&inode->i_lock);
924 while (drop--)
925 iput(inode);
926 return 0;
927 }
928
929 /*
930 * caller must hold session s_mutex
931 */
932 static void remove_session_caps(struct ceph_mds_session *session)
933 {
934 dout("remove_session_caps on %p\n", session);
935 iterate_session_caps(session, remove_session_caps_cb, NULL);
936 BUG_ON(session->s_nr_caps > 0);
937 BUG_ON(!list_empty(&session->s_cap_flushing));
938 cleanup_cap_releases(session);
939 }
940
941 /*
942 * wake up any threads waiting on this session's caps. if the cap is
943 * old (didn't get renewed on the client reconnect), remove it now.
944 *
945 * caller must hold s_mutex.
946 */
947 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
948 void *arg)
949 {
950 struct ceph_inode_info *ci = ceph_inode(inode);
951
952 wake_up_all(&ci->i_cap_wq);
953 if (arg) {
954 spin_lock(&inode->i_lock);
955 ci->i_wanted_max_size = 0;
956 ci->i_requested_max_size = 0;
957 spin_unlock(&inode->i_lock);
958 }
959 return 0;
960 }
961
962 static void wake_up_session_caps(struct ceph_mds_session *session,
963 int reconnect)
964 {
965 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
966 iterate_session_caps(session, wake_up_session_cb,
967 (void *)(unsigned long)reconnect);
968 }
969
970 /*
971 * Send periodic message to MDS renewing all currently held caps. The
972 * ack will reset the expiration for all caps from this session.
973 *
974 * caller holds s_mutex
975 */
976 static int send_renew_caps(struct ceph_mds_client *mdsc,
977 struct ceph_mds_session *session)
978 {
979 struct ceph_msg *msg;
980 int state;
981
982 if (time_after_eq(jiffies, session->s_cap_ttl) &&
983 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
984 pr_info("mds%d caps stale\n", session->s_mds);
985 session->s_renew_requested = jiffies;
986
987 /* do not try to renew caps until a recovering mds has reconnected
988 * with its clients. */
989 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
990 if (state < CEPH_MDS_STATE_RECONNECT) {
991 dout("send_renew_caps ignoring mds%d (%s)\n",
992 session->s_mds, ceph_mds_state_name(state));
993 return 0;
994 }
995
996 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
997 ceph_mds_state_name(state));
998 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
999 ++session->s_renew_seq);
1000 if (!msg)
1001 return -ENOMEM;
1002 ceph_con_send(&session->s_con, msg);
1003 return 0;
1004 }
1005
1006 /*
1007 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1008 *
1009 * Called under session->s_mutex
1010 */
1011 static void renewed_caps(struct ceph_mds_client *mdsc,
1012 struct ceph_mds_session *session, int is_renew)
1013 {
1014 int was_stale;
1015 int wake = 0;
1016
1017 spin_lock(&session->s_cap_lock);
1018 was_stale = is_renew && (session->s_cap_ttl == 0 ||
1019 time_after_eq(jiffies, session->s_cap_ttl));
1020
1021 session->s_cap_ttl = session->s_renew_requested +
1022 mdsc->mdsmap->m_session_timeout*HZ;
1023
1024 if (was_stale) {
1025 if (time_before(jiffies, session->s_cap_ttl)) {
1026 pr_info("mds%d caps renewed\n", session->s_mds);
1027 wake = 1;
1028 } else {
1029 pr_info("mds%d caps still stale\n", session->s_mds);
1030 }
1031 }
1032 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1033 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1034 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1035 spin_unlock(&session->s_cap_lock);
1036
1037 if (wake)
1038 wake_up_session_caps(session, 0);
1039 }
1040
1041 /*
1042 * send a session close request
1043 */
1044 static int request_close_session(struct ceph_mds_client *mdsc,
1045 struct ceph_mds_session *session)
1046 {
1047 struct ceph_msg *msg;
1048
1049 dout("request_close_session mds%d state %s seq %lld\n",
1050 session->s_mds, session_state_name(session->s_state),
1051 session->s_seq);
1052 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1053 if (!msg)
1054 return -ENOMEM;
1055 ceph_con_send(&session->s_con, msg);
1056 return 0;
1057 }
1058
1059 /*
1060 * Called with s_mutex held.
1061 */
1062 static int __close_session(struct ceph_mds_client *mdsc,
1063 struct ceph_mds_session *session)
1064 {
1065 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1066 return 0;
1067 session->s_state = CEPH_MDS_SESSION_CLOSING;
1068 return request_close_session(mdsc, session);
1069 }
1070
1071 /*
1072 * Trim old(er) caps.
1073 *
1074 * Because we can't cache an inode without one or more caps, we do
1075 * this indirectly: if a cap is unused, we prune its aliases, at which
1076 * point the inode will hopefully get dropped to.
1077 *
1078 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1079 * memory pressure from the MDS, though, so it needn't be perfect.
1080 */
1081 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1082 {
1083 struct ceph_mds_session *session = arg;
1084 struct ceph_inode_info *ci = ceph_inode(inode);
1085 int used, oissued, mine;
1086
1087 if (session->s_trim_caps <= 0)
1088 return -1;
1089
1090 spin_lock(&inode->i_lock);
1091 mine = cap->issued | cap->implemented;
1092 used = __ceph_caps_used(ci);
1093 oissued = __ceph_caps_issued_other(ci, cap);
1094
1095 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
1096 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1097 ceph_cap_string(used));
1098 if (ci->i_dirty_caps)
1099 goto out; /* dirty caps */
1100 if ((used & ~oissued) & mine)
1101 goto out; /* we need these caps */
1102
1103 session->s_trim_caps--;
1104 if (oissued) {
1105 /* we aren't the only cap.. just remove us */
1106 __ceph_remove_cap(cap);
1107 } else {
1108 /* try to drop referring dentries */
1109 spin_unlock(&inode->i_lock);
1110 d_prune_aliases(inode);
1111 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1112 inode, cap, atomic_read(&inode->i_count));
1113 return 0;
1114 }
1115
1116 out:
1117 spin_unlock(&inode->i_lock);
1118 return 0;
1119 }
1120
1121 /*
1122 * Trim session cap count down to some max number.
1123 */
1124 static int trim_caps(struct ceph_mds_client *mdsc,
1125 struct ceph_mds_session *session,
1126 int max_caps)
1127 {
1128 int trim_caps = session->s_nr_caps - max_caps;
1129
1130 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1131 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1132 if (trim_caps > 0) {
1133 session->s_trim_caps = trim_caps;
1134 iterate_session_caps(session, trim_caps_cb, session);
1135 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1136 session->s_mds, session->s_nr_caps, max_caps,
1137 trim_caps - session->s_trim_caps);
1138 session->s_trim_caps = 0;
1139 }
1140 return 0;
1141 }
1142
1143 /*
1144 * Allocate cap_release messages. If there is a partially full message
1145 * in the queue, try to allocate enough to cover it's remainder, so that
1146 * we can send it immediately.
1147 *
1148 * Called under s_mutex.
1149 */
1150 int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
1151 struct ceph_mds_session *session)
1152 {
1153 struct ceph_msg *msg, *partial = NULL;
1154 struct ceph_mds_cap_release *head;
1155 int err = -ENOMEM;
1156 int extra = mdsc->fsc->mount_options->cap_release_safety;
1157 int num;
1158
1159 dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
1160 extra);
1161
1162 spin_lock(&session->s_cap_lock);
1163
1164 if (!list_empty(&session->s_cap_releases)) {
1165 msg = list_first_entry(&session->s_cap_releases,
1166 struct ceph_msg,
1167 list_head);
1168 head = msg->front.iov_base;
1169 num = le32_to_cpu(head->num);
1170 if (num) {
1171 dout(" partial %p with (%d/%d)\n", msg, num,
1172 (int)CEPH_CAPS_PER_RELEASE);
1173 extra += CEPH_CAPS_PER_RELEASE - num;
1174 partial = msg;
1175 }
1176 }
1177 while (session->s_num_cap_releases < session->s_nr_caps + extra) {
1178 spin_unlock(&session->s_cap_lock);
1179 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
1180 GFP_NOFS);
1181 if (!msg)
1182 goto out_unlocked;
1183 dout("add_cap_releases %p msg %p now %d\n", session, msg,
1184 (int)msg->front.iov_len);
1185 head = msg->front.iov_base;
1186 head->num = cpu_to_le32(0);
1187 msg->front.iov_len = sizeof(*head);
1188 spin_lock(&session->s_cap_lock);
1189 list_add(&msg->list_head, &session->s_cap_releases);
1190 session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
1191 }
1192
1193 if (partial) {
1194 head = partial->front.iov_base;
1195 num = le32_to_cpu(head->num);
1196 dout(" queueing partial %p with %d/%d\n", partial, num,
1197 (int)CEPH_CAPS_PER_RELEASE);
1198 list_move_tail(&partial->list_head,
1199 &session->s_cap_releases_done);
1200 session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num;
1201 }
1202 err = 0;
1203 spin_unlock(&session->s_cap_lock);
1204 out_unlocked:
1205 return err;
1206 }
1207
1208 /*
1209 * flush all dirty inode data to disk.
1210 *
1211 * returns true if we've flushed through want_flush_seq
1212 */
1213 static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1214 {
1215 int mds, ret = 1;
1216
1217 dout("check_cap_flush want %lld\n", want_flush_seq);
1218 mutex_lock(&mdsc->mutex);
1219 for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
1220 struct ceph_mds_session *session = mdsc->sessions[mds];
1221
1222 if (!session)
1223 continue;
1224 get_session(session);
1225 mutex_unlock(&mdsc->mutex);
1226
1227 mutex_lock(&session->s_mutex);
1228 if (!list_empty(&session->s_cap_flushing)) {
1229 struct ceph_inode_info *ci =
1230 list_entry(session->s_cap_flushing.next,
1231 struct ceph_inode_info,
1232 i_flushing_item);
1233 struct inode *inode = &ci->vfs_inode;
1234
1235 spin_lock(&inode->i_lock);
1236 if (ci->i_cap_flush_seq <= want_flush_seq) {
1237 dout("check_cap_flush still flushing %p "
1238 "seq %lld <= %lld to mds%d\n", inode,
1239 ci->i_cap_flush_seq, want_flush_seq,
1240 session->s_mds);
1241 ret = 0;
1242 }
1243 spin_unlock(&inode->i_lock);
1244 }
1245 mutex_unlock(&session->s_mutex);
1246 ceph_put_mds_session(session);
1247
1248 if (!ret)
1249 return ret;
1250 mutex_lock(&mdsc->mutex);
1251 }
1252
1253 mutex_unlock(&mdsc->mutex);
1254 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
1255 return ret;
1256 }
1257
1258 /*
1259 * called under s_mutex
1260 */
1261 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1262 struct ceph_mds_session *session)
1263 {
1264 struct ceph_msg *msg;
1265
1266 dout("send_cap_releases mds%d\n", session->s_mds);
1267 spin_lock(&session->s_cap_lock);
1268 while (!list_empty(&session->s_cap_releases_done)) {
1269 msg = list_first_entry(&session->s_cap_releases_done,
1270 struct ceph_msg, list_head);
1271 list_del_init(&msg->list_head);
1272 spin_unlock(&session->s_cap_lock);
1273 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1274 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1275 ceph_con_send(&session->s_con, msg);
1276 spin_lock(&session->s_cap_lock);
1277 }
1278 spin_unlock(&session->s_cap_lock);
1279 }
1280
1281 static void discard_cap_releases(struct ceph_mds_client *mdsc,
1282 struct ceph_mds_session *session)
1283 {
1284 struct ceph_msg *msg;
1285 struct ceph_mds_cap_release *head;
1286 unsigned num;
1287
1288 dout("discard_cap_releases mds%d\n", session->s_mds);
1289 spin_lock(&session->s_cap_lock);
1290
1291 /* zero out the in-progress message */
1292 msg = list_first_entry(&session->s_cap_releases,
1293 struct ceph_msg, list_head);
1294 head = msg->front.iov_base;
1295 num = le32_to_cpu(head->num);
1296 dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num);
1297 head->num = cpu_to_le32(0);
1298 session->s_num_cap_releases += num;
1299
1300 /* requeue completed messages */
1301 while (!list_empty(&session->s_cap_releases_done)) {
1302 msg = list_first_entry(&session->s_cap_releases_done,
1303 struct ceph_msg, list_head);
1304 list_del_init(&msg->list_head);
1305
1306 head = msg->front.iov_base;
1307 num = le32_to_cpu(head->num);
1308 dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg,
1309 num);
1310 session->s_num_cap_releases += num;
1311 head->num = cpu_to_le32(0);
1312 msg->front.iov_len = sizeof(*head);
1313 list_add(&msg->list_head, &session->s_cap_releases);
1314 }
1315
1316 spin_unlock(&session->s_cap_lock);
1317 }
1318
1319 /*
1320 * requests
1321 */
1322
1323 /*
1324 * Create an mds request.
1325 */
1326 struct ceph_mds_request *
1327 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1328 {
1329 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1330
1331 if (!req)
1332 return ERR_PTR(-ENOMEM);
1333
1334 mutex_init(&req->r_fill_mutex);
1335 req->r_mdsc = mdsc;
1336 req->r_started = jiffies;
1337 req->r_resend_mds = -1;
1338 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1339 req->r_fmode = -1;
1340 kref_init(&req->r_kref);
1341 INIT_LIST_HEAD(&req->r_wait);
1342 init_completion(&req->r_completion);
1343 init_completion(&req->r_safe_completion);
1344 INIT_LIST_HEAD(&req->r_unsafe_item);
1345
1346 req->r_op = op;
1347 req->r_direct_mode = mode;
1348 return req;
1349 }
1350
1351 /*
1352 * return oldest (lowest) request, tid in request tree, 0 if none.
1353 *
1354 * called under mdsc->mutex.
1355 */
1356 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1357 {
1358 if (RB_EMPTY_ROOT(&mdsc->request_tree))
1359 return NULL;
1360 return rb_entry(rb_first(&mdsc->request_tree),
1361 struct ceph_mds_request, r_node);
1362 }
1363
1364 static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1365 {
1366 struct ceph_mds_request *req = __get_oldest_req(mdsc);
1367
1368 if (req)
1369 return req->r_tid;
1370 return 0;
1371 }
1372
1373 /*
1374 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1375 * on build_path_from_dentry in fs/cifs/dir.c.
1376 *
1377 * If @stop_on_nosnap, generate path relative to the first non-snapped
1378 * inode.
1379 *
1380 * Encode hidden .snap dirs as a double /, i.e.
1381 * foo/.snap/bar -> foo//bar
1382 */
1383 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1384 int stop_on_nosnap)
1385 {
1386 struct dentry *temp;
1387 char *path;
1388 int len, pos;
1389
1390 if (dentry == NULL)
1391 return ERR_PTR(-EINVAL);
1392
1393 retry:
1394 len = 0;
1395 for (temp = dentry; !IS_ROOT(temp);) {
1396 struct inode *inode = temp->d_inode;
1397 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1398 len++; /* slash only */
1399 else if (stop_on_nosnap && inode &&
1400 ceph_snap(inode) == CEPH_NOSNAP)
1401 break;
1402 else
1403 len += 1 + temp->d_name.len;
1404 temp = temp->d_parent;
1405 if (temp == NULL) {
1406 pr_err("build_path corrupt dentry %p\n", dentry);
1407 return ERR_PTR(-EINVAL);
1408 }
1409 }
1410 if (len)
1411 len--; /* no leading '/' */
1412
1413 path = kmalloc(len+1, GFP_NOFS);
1414 if (path == NULL)
1415 return ERR_PTR(-ENOMEM);
1416 pos = len;
1417 path[pos] = 0; /* trailing null */
1418 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1419 struct inode *inode = temp->d_inode;
1420
1421 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1422 dout("build_path path+%d: %p SNAPDIR\n",
1423 pos, temp);
1424 } else if (stop_on_nosnap && inode &&
1425 ceph_snap(inode) == CEPH_NOSNAP) {
1426 break;
1427 } else {
1428 pos -= temp->d_name.len;
1429 if (pos < 0)
1430 break;
1431 strncpy(path + pos, temp->d_name.name,
1432 temp->d_name.len);
1433 }
1434 if (pos)
1435 path[--pos] = '/';
1436 temp = temp->d_parent;
1437 if (temp == NULL) {
1438 pr_err("build_path corrupt dentry\n");
1439 kfree(path);
1440 return ERR_PTR(-EINVAL);
1441 }
1442 }
1443 if (pos != 0) {
1444 pr_err("build_path did not end path lookup where "
1445 "expected, namelen is %d, pos is %d\n", len, pos);
1446 /* presumably this is only possible if racing with a
1447 rename of one of the parent directories (we can not
1448 lock the dentries above us to prevent this, but
1449 retrying should be harmless) */
1450 kfree(path);
1451 goto retry;
1452 }
1453
1454 *base = ceph_ino(temp->d_inode);
1455 *plen = len;
1456 dout("build_path on %p %d built %llx '%.*s'\n",
1457 dentry, atomic_read(&dentry->d_count), *base, len, path);
1458 return path;
1459 }
1460
1461 static int build_dentry_path(struct dentry *dentry,
1462 const char **ppath, int *ppathlen, u64 *pino,
1463 int *pfreepath)
1464 {
1465 char *path;
1466
1467 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
1468 *pino = ceph_ino(dentry->d_parent->d_inode);
1469 *ppath = dentry->d_name.name;
1470 *ppathlen = dentry->d_name.len;
1471 return 0;
1472 }
1473 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1474 if (IS_ERR(path))
1475 return PTR_ERR(path);
1476 *ppath = path;
1477 *pfreepath = 1;
1478 return 0;
1479 }
1480
1481 static int build_inode_path(struct inode *inode,
1482 const char **ppath, int *ppathlen, u64 *pino,
1483 int *pfreepath)
1484 {
1485 struct dentry *dentry;
1486 char *path;
1487
1488 if (ceph_snap(inode) == CEPH_NOSNAP) {
1489 *pino = ceph_ino(inode);
1490 *ppathlen = 0;
1491 return 0;
1492 }
1493 dentry = d_find_alias(inode);
1494 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1495 dput(dentry);
1496 if (IS_ERR(path))
1497 return PTR_ERR(path);
1498 *ppath = path;
1499 *pfreepath = 1;
1500 return 0;
1501 }
1502
1503 /*
1504 * request arguments may be specified via an inode *, a dentry *, or
1505 * an explicit ino+path.
1506 */
1507 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1508 const char *rpath, u64 rino,
1509 const char **ppath, int *pathlen,
1510 u64 *ino, int *freepath)
1511 {
1512 int r = 0;
1513
1514 if (rinode) {
1515 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1516 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1517 ceph_snap(rinode));
1518 } else if (rdentry) {
1519 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1520 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1521 *ppath);
1522 } else if (rpath) {
1523 *ino = rino;
1524 *ppath = rpath;
1525 *pathlen = strlen(rpath);
1526 dout(" path %.*s\n", *pathlen, rpath);
1527 }
1528
1529 return r;
1530 }
1531
1532 /*
1533 * called under mdsc->mutex
1534 */
1535 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1536 struct ceph_mds_request *req,
1537 int mds)
1538 {
1539 struct ceph_msg *msg;
1540 struct ceph_mds_request_head *head;
1541 const char *path1 = NULL;
1542 const char *path2 = NULL;
1543 u64 ino1 = 0, ino2 = 0;
1544 int pathlen1 = 0, pathlen2 = 0;
1545 int freepath1 = 0, freepath2 = 0;
1546 int len;
1547 u16 releases;
1548 void *p, *end;
1549 int ret;
1550
1551 ret = set_request_path_attr(req->r_inode, req->r_dentry,
1552 req->r_path1, req->r_ino1.ino,
1553 &path1, &pathlen1, &ino1, &freepath1);
1554 if (ret < 0) {
1555 msg = ERR_PTR(ret);
1556 goto out;
1557 }
1558
1559 ret = set_request_path_attr(NULL, req->r_old_dentry,
1560 req->r_path2, req->r_ino2.ino,
1561 &path2, &pathlen2, &ino2, &freepath2);
1562 if (ret < 0) {
1563 msg = ERR_PTR(ret);
1564 goto out_free1;
1565 }
1566
1567 len = sizeof(*head) +
1568 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64));
1569
1570 /* calculate (max) length for cap releases */
1571 len += sizeof(struct ceph_mds_request_release) *
1572 (!!req->r_inode_drop + !!req->r_dentry_drop +
1573 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1574 if (req->r_dentry_drop)
1575 len += req->r_dentry->d_name.len;
1576 if (req->r_old_dentry_drop)
1577 len += req->r_old_dentry->d_name.len;
1578
1579 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS);
1580 if (!msg) {
1581 msg = ERR_PTR(-ENOMEM);
1582 goto out_free2;
1583 }
1584
1585 msg->hdr.tid = cpu_to_le64(req->r_tid);
1586
1587 head = msg->front.iov_base;
1588 p = msg->front.iov_base + sizeof(*head);
1589 end = msg->front.iov_base + msg->front.iov_len;
1590
1591 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1592 head->op = cpu_to_le32(req->r_op);
1593 head->caller_uid = cpu_to_le32(req->r_uid);
1594 head->caller_gid = cpu_to_le32(req->r_gid);
1595 head->args = req->r_args;
1596
1597 ceph_encode_filepath(&p, end, ino1, path1);
1598 ceph_encode_filepath(&p, end, ino2, path2);
1599
1600 /* make note of release offset, in case we need to replay */
1601 req->r_request_release_offset = p - msg->front.iov_base;
1602
1603 /* cap releases */
1604 releases = 0;
1605 if (req->r_inode_drop)
1606 releases += ceph_encode_inode_release(&p,
1607 req->r_inode ? req->r_inode : req->r_dentry->d_inode,
1608 mds, req->r_inode_drop, req->r_inode_unless, 0);
1609 if (req->r_dentry_drop)
1610 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1611 mds, req->r_dentry_drop, req->r_dentry_unless);
1612 if (req->r_old_dentry_drop)
1613 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1614 mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1615 if (req->r_old_inode_drop)
1616 releases += ceph_encode_inode_release(&p,
1617 req->r_old_dentry->d_inode,
1618 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1619 head->num_releases = cpu_to_le16(releases);
1620
1621 BUG_ON(p > end);
1622 msg->front.iov_len = p - msg->front.iov_base;
1623 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1624
1625 msg->pages = req->r_pages;
1626 msg->nr_pages = req->r_num_pages;
1627 msg->hdr.data_len = cpu_to_le32(req->r_data_len);
1628 msg->hdr.data_off = cpu_to_le16(0);
1629
1630 out_free2:
1631 if (freepath2)
1632 kfree((char *)path2);
1633 out_free1:
1634 if (freepath1)
1635 kfree((char *)path1);
1636 out:
1637 return msg;
1638 }
1639
1640 /*
1641 * called under mdsc->mutex if error, under no mutex if
1642 * success.
1643 */
1644 static void complete_request(struct ceph_mds_client *mdsc,
1645 struct ceph_mds_request *req)
1646 {
1647 if (req->r_callback)
1648 req->r_callback(mdsc, req);
1649 else
1650 complete_all(&req->r_completion);
1651 }
1652
1653 /*
1654 * called under mdsc->mutex
1655 */
1656 static int __prepare_send_request(struct ceph_mds_client *mdsc,
1657 struct ceph_mds_request *req,
1658 int mds)
1659 {
1660 struct ceph_mds_request_head *rhead;
1661 struct ceph_msg *msg;
1662 int flags = 0;
1663
1664 req->r_mds = mds;
1665 req->r_attempts++;
1666 if (req->r_inode) {
1667 struct ceph_cap *cap =
1668 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
1669
1670 if (cap)
1671 req->r_sent_on_mseq = cap->mseq;
1672 else
1673 req->r_sent_on_mseq = -1;
1674 }
1675 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
1676 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
1677
1678 if (req->r_got_unsafe) {
1679 /*
1680 * Replay. Do not regenerate message (and rebuild
1681 * paths, etc.); just use the original message.
1682 * Rebuilding paths will break for renames because
1683 * d_move mangles the src name.
1684 */
1685 msg = req->r_request;
1686 rhead = msg->front.iov_base;
1687
1688 flags = le32_to_cpu(rhead->flags);
1689 flags |= CEPH_MDS_FLAG_REPLAY;
1690 rhead->flags = cpu_to_le32(flags);
1691
1692 if (req->r_target_inode)
1693 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
1694
1695 rhead->num_retry = req->r_attempts - 1;
1696
1697 /* remove cap/dentry releases from message */
1698 rhead->num_releases = 0;
1699 msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset);
1700 msg->front.iov_len = req->r_request_release_offset;
1701 return 0;
1702 }
1703
1704 if (req->r_request) {
1705 ceph_msg_put(req->r_request);
1706 req->r_request = NULL;
1707 }
1708 msg = create_request_message(mdsc, req, mds);
1709 if (IS_ERR(msg)) {
1710 req->r_err = PTR_ERR(msg);
1711 complete_request(mdsc, req);
1712 return PTR_ERR(msg);
1713 }
1714 req->r_request = msg;
1715
1716 rhead = msg->front.iov_base;
1717 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
1718 if (req->r_got_unsafe)
1719 flags |= CEPH_MDS_FLAG_REPLAY;
1720 if (req->r_locked_dir)
1721 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
1722 rhead->flags = cpu_to_le32(flags);
1723 rhead->num_fwd = req->r_num_fwd;
1724 rhead->num_retry = req->r_attempts - 1;
1725 rhead->ino = 0;
1726
1727 dout(" r_locked_dir = %p\n", req->r_locked_dir);
1728 return 0;
1729 }
1730
1731 /*
1732 * send request, or put it on the appropriate wait list.
1733 */
1734 static int __do_request(struct ceph_mds_client *mdsc,
1735 struct ceph_mds_request *req)
1736 {
1737 struct ceph_mds_session *session = NULL;
1738 int mds = -1;
1739 int err = -EAGAIN;
1740
1741 if (req->r_err || req->r_got_result)
1742 goto out;
1743
1744 if (req->r_timeout &&
1745 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
1746 dout("do_request timed out\n");
1747 err = -EIO;
1748 goto finish;
1749 }
1750
1751 mds = __choose_mds(mdsc, req);
1752 if (mds < 0 ||
1753 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
1754 dout("do_request no mds or not active, waiting for map\n");
1755 list_add(&req->r_wait, &mdsc->waiting_for_map);
1756 goto out;
1757 }
1758
1759 /* get, open session */
1760 session = __ceph_lookup_mds_session(mdsc, mds);
1761 if (!session) {
1762 session = register_session(mdsc, mds);
1763 if (IS_ERR(session)) {
1764 err = PTR_ERR(session);
1765 goto finish;
1766 }
1767 }
1768 dout("do_request mds%d session %p state %s\n", mds, session,
1769 session_state_name(session->s_state));
1770 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
1771 session->s_state != CEPH_MDS_SESSION_HUNG) {
1772 if (session->s_state == CEPH_MDS_SESSION_NEW ||
1773 session->s_state == CEPH_MDS_SESSION_CLOSING)
1774 __open_session(mdsc, session);
1775 list_add(&req->r_wait, &session->s_waiting);
1776 goto out_session;
1777 }
1778
1779 /* send request */
1780 req->r_session = get_session(session);
1781 req->r_resend_mds = -1; /* forget any previous mds hint */
1782
1783 if (req->r_request_started == 0) /* note request start time */
1784 req->r_request_started = jiffies;
1785
1786 err = __prepare_send_request(mdsc, req, mds);
1787 if (!err) {
1788 ceph_msg_get(req->r_request);
1789 ceph_con_send(&session->s_con, req->r_request);
1790 }
1791
1792 out_session:
1793 ceph_put_mds_session(session);
1794 out:
1795 return err;
1796
1797 finish:
1798 req->r_err = err;
1799 complete_request(mdsc, req);
1800 goto out;
1801 }
1802
1803 /*
1804 * called under mdsc->mutex
1805 */
1806 static void __wake_requests(struct ceph_mds_client *mdsc,
1807 struct list_head *head)
1808 {
1809 struct ceph_mds_request *req, *nreq;
1810
1811 list_for_each_entry_safe(req, nreq, head, r_wait) {
1812 list_del_init(&req->r_wait);
1813 __do_request(mdsc, req);
1814 }
1815 }
1816
1817 /*
1818 * Wake up threads with requests pending for @mds, so that they can
1819 * resubmit their requests to a possibly different mds.
1820 */
1821 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
1822 {
1823 struct ceph_mds_request *req;
1824 struct rb_node *p;
1825
1826 dout("kick_requests mds%d\n", mds);
1827 for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) {
1828 req = rb_entry(p, struct ceph_mds_request, r_node);
1829 if (req->r_got_unsafe)
1830 continue;
1831 if (req->r_session &&
1832 req->r_session->s_mds == mds) {
1833 dout(" kicking tid %llu\n", req->r_tid);
1834 put_request_session(req);
1835 __do_request(mdsc, req);
1836 }
1837 }
1838 }
1839
1840 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
1841 struct ceph_mds_request *req)
1842 {
1843 dout("submit_request on %p\n", req);
1844 mutex_lock(&mdsc->mutex);
1845 __register_request(mdsc, req, NULL);
1846 __do_request(mdsc, req);
1847 mutex_unlock(&mdsc->mutex);
1848 }
1849
1850 /*
1851 * Synchrously perform an mds request. Take care of all of the
1852 * session setup, forwarding, retry details.
1853 */
1854 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
1855 struct inode *dir,
1856 struct ceph_mds_request *req)
1857 {
1858 int err;
1859
1860 dout("do_request on %p\n", req);
1861
1862 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
1863 if (req->r_inode)
1864 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
1865 if (req->r_locked_dir)
1866 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
1867 if (req->r_old_dentry)
1868 ceph_get_cap_refs(
1869 ceph_inode(req->r_old_dentry->d_parent->d_inode),
1870 CEPH_CAP_PIN);
1871
1872 /* issue */
1873 mutex_lock(&mdsc->mutex);
1874 __register_request(mdsc, req, dir);
1875 __do_request(mdsc, req);
1876
1877 if (req->r_err) {
1878 err = req->r_err;
1879 __unregister_request(mdsc, req);
1880 dout("do_request early error %d\n", err);
1881 goto out;
1882 }
1883
1884 /* wait */
1885 mutex_unlock(&mdsc->mutex);
1886 dout("do_request waiting\n");
1887 if (req->r_timeout) {
1888 err = (long)wait_for_completion_killable_timeout(
1889 &req->r_completion, req->r_timeout);
1890 if (err == 0)
1891 err = -EIO;
1892 } else {
1893 err = wait_for_completion_killable(&req->r_completion);
1894 }
1895 dout("do_request waited, got %d\n", err);
1896 mutex_lock(&mdsc->mutex);
1897
1898 /* only abort if we didn't race with a real reply */
1899 if (req->r_got_result) {
1900 err = le32_to_cpu(req->r_reply_info.head->result);
1901 } else if (err < 0) {
1902 dout("aborted request %lld with %d\n", req->r_tid, err);
1903
1904 /*
1905 * ensure we aren't running concurrently with
1906 * ceph_fill_trace or ceph_readdir_prepopulate, which
1907 * rely on locks (dir mutex) held by our caller.
1908 */
1909 mutex_lock(&req->r_fill_mutex);
1910 req->r_err = err;
1911 req->r_aborted = true;
1912 mutex_unlock(&req->r_fill_mutex);
1913
1914 if (req->r_locked_dir &&
1915 (req->r_op & CEPH_MDS_OP_WRITE))
1916 ceph_invalidate_dir_request(req);
1917 } else {
1918 err = req->r_err;
1919 }
1920
1921 out:
1922 mutex_unlock(&mdsc->mutex);
1923 dout("do_request %p done, result %d\n", req, err);
1924 return err;
1925 }
1926
1927 /*
1928 * Invalidate dir I_COMPLETE, dentry lease state on an aborted MDS
1929 * namespace request.
1930 */
1931 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
1932 {
1933 struct inode *inode = req->r_locked_dir;
1934 struct ceph_inode_info *ci = ceph_inode(inode);
1935
1936 dout("invalidate_dir_request %p (I_COMPLETE, lease(s))\n", inode);
1937 spin_lock(&inode->i_lock);
1938 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1939 ci->i_release_count++;
1940 spin_unlock(&inode->i_lock);
1941
1942 if (req->r_dentry)
1943 ceph_invalidate_dentry_lease(req->r_dentry);
1944 if (req->r_old_dentry)
1945 ceph_invalidate_dentry_lease(req->r_old_dentry);
1946 }
1947
1948 /*
1949 * Handle mds reply.
1950 *
1951 * We take the session mutex and parse and process the reply immediately.
1952 * This preserves the logical ordering of replies, capabilities, etc., sent
1953 * by the MDS as they are applied to our local cache.
1954 */
1955 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
1956 {
1957 struct ceph_mds_client *mdsc = session->s_mdsc;
1958 struct ceph_mds_request *req;
1959 struct ceph_mds_reply_head *head = msg->front.iov_base;
1960 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
1961 u64 tid;
1962 int err, result;
1963 int mds = session->s_mds;
1964
1965 if (msg->front.iov_len < sizeof(*head)) {
1966 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
1967 ceph_msg_dump(msg);
1968 return;
1969 }
1970
1971 /* get request, session */
1972 tid = le64_to_cpu(msg->hdr.tid);
1973 mutex_lock(&mdsc->mutex);
1974 req = __lookup_request(mdsc, tid);
1975 if (!req) {
1976 dout("handle_reply on unknown tid %llu\n", tid);
1977 mutex_unlock(&mdsc->mutex);
1978 return;
1979 }
1980 dout("handle_reply %p\n", req);
1981
1982 /* correct session? */
1983 if (req->r_session != session) {
1984 pr_err("mdsc_handle_reply got %llu on session mds%d"
1985 " not mds%d\n", tid, session->s_mds,
1986 req->r_session ? req->r_session->s_mds : -1);
1987 mutex_unlock(&mdsc->mutex);
1988 goto out;
1989 }
1990
1991 /* dup? */
1992 if ((req->r_got_unsafe && !head->safe) ||
1993 (req->r_got_safe && head->safe)) {
1994 pr_warning("got a dup %s reply on %llu from mds%d\n",
1995 head->safe ? "safe" : "unsafe", tid, mds);
1996 mutex_unlock(&mdsc->mutex);
1997 goto out;
1998 }
1999 if (req->r_got_safe && !head->safe) {
2000 pr_warning("got unsafe after safe on %llu from mds%d\n",
2001 tid, mds);
2002 mutex_unlock(&mdsc->mutex);
2003 goto out;
2004 }
2005
2006 result = le32_to_cpu(head->result);
2007
2008 /*
2009 * Handle an ESTALE
2010 * if we're not talking to the authority, send to them
2011 * if the authority has changed while we weren't looking,
2012 * send to new authority
2013 * Otherwise we just have to return an ESTALE
2014 */
2015 if (result == -ESTALE) {
2016 dout("got ESTALE on request %llu", req->r_tid);
2017 if (!req->r_inode) {
2018 /* do nothing; not an authority problem */
2019 } else if (req->r_direct_mode != USE_AUTH_MDS) {
2020 dout("not using auth, setting for that now");
2021 req->r_direct_mode = USE_AUTH_MDS;
2022 __do_request(mdsc, req);
2023 mutex_unlock(&mdsc->mutex);
2024 goto out;
2025 } else {
2026 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
2027 struct ceph_cap *cap =
2028 ceph_get_cap_for_mds(ci, req->r_mds);;
2029
2030 dout("already using auth");
2031 if ((!cap || cap != ci->i_auth_cap) ||
2032 (cap->mseq != req->r_sent_on_mseq)) {
2033 dout("but cap changed, so resending");
2034 __do_request(mdsc, req);
2035 mutex_unlock(&mdsc->mutex);
2036 goto out;
2037 }
2038 }
2039 dout("have to return ESTALE on request %llu", req->r_tid);
2040 }
2041
2042
2043 if (head->safe) {
2044 req->r_got_safe = true;
2045 __unregister_request(mdsc, req);
2046 complete_all(&req->r_safe_completion);
2047
2048 if (req->r_got_unsafe) {
2049 /*
2050 * We already handled the unsafe response, now do the
2051 * cleanup. No need to examine the response; the MDS
2052 * doesn't include any result info in the safe
2053 * response. And even if it did, there is nothing
2054 * useful we could do with a revised return value.
2055 */
2056 dout("got safe reply %llu, mds%d\n", tid, mds);
2057 list_del_init(&req->r_unsafe_item);
2058
2059 /* last unsafe request during umount? */
2060 if (mdsc->stopping && !__get_oldest_req(mdsc))
2061 complete_all(&mdsc->safe_umount_waiters);
2062 mutex_unlock(&mdsc->mutex);
2063 goto out;
2064 }
2065 } else {
2066 req->r_got_unsafe = true;
2067 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2068 }
2069
2070 dout("handle_reply tid %lld result %d\n", tid, result);
2071 rinfo = &req->r_reply_info;
2072 err = parse_reply_info(msg, rinfo);
2073 mutex_unlock(&mdsc->mutex);
2074
2075 mutex_lock(&session->s_mutex);
2076 if (err < 0) {
2077 pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds);
2078 ceph_msg_dump(msg);
2079 goto out_err;
2080 }
2081
2082 /* snap trace */
2083 if (rinfo->snapblob_len) {
2084 down_write(&mdsc->snap_rwsem);
2085 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2086 rinfo->snapblob + rinfo->snapblob_len,
2087 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
2088 downgrade_write(&mdsc->snap_rwsem);
2089 } else {
2090 down_read(&mdsc->snap_rwsem);
2091 }
2092
2093 /* insert trace into our cache */
2094 mutex_lock(&req->r_fill_mutex);
2095 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2096 if (err == 0) {
2097 if (result == 0 && rinfo->dir_nr)
2098 ceph_readdir_prepopulate(req, req->r_session);
2099 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2100 }
2101 mutex_unlock(&req->r_fill_mutex);
2102
2103 up_read(&mdsc->snap_rwsem);
2104 out_err:
2105 mutex_lock(&mdsc->mutex);
2106 if (!req->r_aborted) {
2107 if (err) {
2108 req->r_err = err;
2109 } else {
2110 req->r_reply = msg;
2111 ceph_msg_get(msg);
2112 req->r_got_result = true;
2113 }
2114 } else {
2115 dout("reply arrived after request %lld was aborted\n", tid);
2116 }
2117 mutex_unlock(&mdsc->mutex);
2118
2119 ceph_add_cap_releases(mdsc, req->r_session);
2120 mutex_unlock(&session->s_mutex);
2121
2122 /* kick calling process */
2123 complete_request(mdsc, req);
2124 out:
2125 ceph_mdsc_put_request(req);
2126 return;
2127 }
2128
2129
2130
2131 /*
2132 * handle mds notification that our request has been forwarded.
2133 */
2134 static void handle_forward(struct ceph_mds_client *mdsc,
2135 struct ceph_mds_session *session,
2136 struct ceph_msg *msg)
2137 {
2138 struct ceph_mds_request *req;
2139 u64 tid = le64_to_cpu(msg->hdr.tid);
2140 u32 next_mds;
2141 u32 fwd_seq;
2142 int err = -EINVAL;
2143 void *p = msg->front.iov_base;
2144 void *end = p + msg->front.iov_len;
2145
2146 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2147 next_mds = ceph_decode_32(&p);
2148 fwd_seq = ceph_decode_32(&p);
2149
2150 mutex_lock(&mdsc->mutex);
2151 req = __lookup_request(mdsc, tid);
2152 if (!req) {
2153 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2154 goto out; /* dup reply? */
2155 }
2156
2157 if (req->r_aborted) {
2158 dout("forward tid %llu aborted, unregistering\n", tid);
2159 __unregister_request(mdsc, req);
2160 } else if (fwd_seq <= req->r_num_fwd) {
2161 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2162 tid, next_mds, req->r_num_fwd, fwd_seq);
2163 } else {
2164 /* resend. forward race not possible; mds would drop */
2165 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2166 BUG_ON(req->r_err);
2167 BUG_ON(req->r_got_result);
2168 req->r_num_fwd = fwd_seq;
2169 req->r_resend_mds = next_mds;
2170 put_request_session(req);
2171 __do_request(mdsc, req);
2172 }
2173 ceph_mdsc_put_request(req);
2174 out:
2175 mutex_unlock(&mdsc->mutex);
2176 return;
2177
2178 bad:
2179 pr_err("mdsc_handle_forward decode error err=%d\n", err);
2180 }
2181
2182 /*
2183 * handle a mds session control message
2184 */
2185 static void handle_session(struct ceph_mds_session *session,
2186 struct ceph_msg *msg)
2187 {
2188 struct ceph_mds_client *mdsc = session->s_mdsc;
2189 u32 op;
2190 u64 seq;
2191 int mds = session->s_mds;
2192 struct ceph_mds_session_head *h = msg->front.iov_base;
2193 int wake = 0;
2194
2195 /* decode */
2196 if (msg->front.iov_len != sizeof(*h))
2197 goto bad;
2198 op = le32_to_cpu(h->op);
2199 seq = le64_to_cpu(h->seq);
2200
2201 mutex_lock(&mdsc->mutex);
2202 if (op == CEPH_SESSION_CLOSE)
2203 __unregister_session(mdsc, session);
2204 /* FIXME: this ttl calculation is generous */
2205 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2206 mutex_unlock(&mdsc->mutex);
2207
2208 mutex_lock(&session->s_mutex);
2209
2210 dout("handle_session mds%d %s %p state %s seq %llu\n",
2211 mds, ceph_session_op_name(op), session,
2212 session_state_name(session->s_state), seq);
2213
2214 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2215 session->s_state = CEPH_MDS_SESSION_OPEN;
2216 pr_info("mds%d came back\n", session->s_mds);
2217 }
2218
2219 switch (op) {
2220 case CEPH_SESSION_OPEN:
2221 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2222 pr_info("mds%d reconnect success\n", session->s_mds);
2223 session->s_state = CEPH_MDS_SESSION_OPEN;
2224 renewed_caps(mdsc, session, 0);
2225 wake = 1;
2226 if (mdsc->stopping)
2227 __close_session(mdsc, session);
2228 break;
2229
2230 case CEPH_SESSION_RENEWCAPS:
2231 if (session->s_renew_seq == seq)
2232 renewed_caps(mdsc, session, 1);
2233 break;
2234
2235 case CEPH_SESSION_CLOSE:
2236 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2237 pr_info("mds%d reconnect denied\n", session->s_mds);
2238 remove_session_caps(session);
2239 wake = 1; /* for good measure */
2240 wake_up_all(&mdsc->session_close_wq);
2241 kick_requests(mdsc, mds);
2242 break;
2243
2244 case CEPH_SESSION_STALE:
2245 pr_info("mds%d caps went stale, renewing\n",
2246 session->s_mds);
2247 spin_lock(&session->s_cap_lock);
2248 session->s_cap_gen++;
2249 session->s_cap_ttl = 0;
2250 spin_unlock(&session->s_cap_lock);
2251 send_renew_caps(mdsc, session);
2252 break;
2253
2254 case CEPH_SESSION_RECALL_STATE:
2255 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2256 break;
2257
2258 default:
2259 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2260 WARN_ON(1);
2261 }
2262
2263 mutex_unlock(&session->s_mutex);
2264 if (wake) {
2265 mutex_lock(&mdsc->mutex);
2266 __wake_requests(mdsc, &session->s_waiting);
2267 mutex_unlock(&mdsc->mutex);
2268 }
2269 return;
2270
2271 bad:
2272 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2273 (int)msg->front.iov_len);
2274 ceph_msg_dump(msg);
2275 return;
2276 }
2277
2278
2279 /*
2280 * called under session->mutex.
2281 */
2282 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2283 struct ceph_mds_session *session)
2284 {
2285 struct ceph_mds_request *req, *nreq;
2286 int err;
2287
2288 dout("replay_unsafe_requests mds%d\n", session->s_mds);
2289
2290 mutex_lock(&mdsc->mutex);
2291 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2292 err = __prepare_send_request(mdsc, req, session->s_mds);
2293 if (!err) {
2294 ceph_msg_get(req->r_request);
2295 ceph_con_send(&session->s_con, req->r_request);
2296 }
2297 }
2298 mutex_unlock(&mdsc->mutex);
2299 }
2300
2301 /*
2302 * Encode information about a cap for a reconnect with the MDS.
2303 */
2304 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2305 void *arg)
2306 {
2307 union {
2308 struct ceph_mds_cap_reconnect v2;
2309 struct ceph_mds_cap_reconnect_v1 v1;
2310 } rec;
2311 size_t reclen;
2312 struct ceph_inode_info *ci;
2313 struct ceph_reconnect_state *recon_state = arg;
2314 struct ceph_pagelist *pagelist = recon_state->pagelist;
2315 char *path;
2316 int pathlen, err;
2317 u64 pathbase;
2318 struct dentry *dentry;
2319
2320 ci = cap->ci;
2321
2322 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2323 inode, ceph_vinop(inode), cap, cap->cap_id,
2324 ceph_cap_string(cap->issued));
2325 err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2326 if (err)
2327 return err;
2328
2329 dentry = d_find_alias(inode);
2330 if (dentry) {
2331 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2332 if (IS_ERR(path)) {
2333 err = PTR_ERR(path);
2334 goto out_dput;
2335 }
2336 } else {
2337 path = NULL;
2338 pathlen = 0;
2339 }
2340 err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2341 if (err)
2342 goto out_free;
2343
2344 spin_lock(&inode->i_lock);
2345 cap->seq = 0; /* reset cap seq */
2346 cap->issue_seq = 0; /* and issue_seq */
2347
2348 if (recon_state->flock) {
2349 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2350 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2351 rec.v2.issued = cpu_to_le32(cap->issued);
2352 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2353 rec.v2.pathbase = cpu_to_le64(pathbase);
2354 rec.v2.flock_len = 0;
2355 reclen = sizeof(rec.v2);
2356 } else {
2357 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2358 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2359 rec.v1.issued = cpu_to_le32(cap->issued);
2360 rec.v1.size = cpu_to_le64(inode->i_size);
2361 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2362 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2363 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2364 rec.v1.pathbase = cpu_to_le64(pathbase);
2365 reclen = sizeof(rec.v1);
2366 }
2367 spin_unlock(&inode->i_lock);
2368
2369 if (recon_state->flock) {
2370 int num_fcntl_locks, num_flock_locks;
2371 struct ceph_pagelist_cursor trunc_point;
2372
2373 ceph_pagelist_set_cursor(pagelist, &trunc_point);
2374 do {
2375 lock_flocks();
2376 ceph_count_locks(inode, &num_fcntl_locks,
2377 &num_flock_locks);
2378 rec.v2.flock_len = (2*sizeof(u32) +
2379 (num_fcntl_locks+num_flock_locks) *
2380 sizeof(struct ceph_filelock));
2381 unlock_flocks();
2382
2383 /* pre-alloc pagelist */
2384 ceph_pagelist_truncate(pagelist, &trunc_point);
2385 err = ceph_pagelist_append(pagelist, &rec, reclen);
2386 if (!err)
2387 err = ceph_pagelist_reserve(pagelist,
2388 rec.v2.flock_len);
2389
2390 /* encode locks */
2391 if (!err) {
2392 lock_flocks();
2393 err = ceph_encode_locks(inode,
2394 pagelist,
2395 num_fcntl_locks,
2396 num_flock_locks);
2397 unlock_flocks();
2398 }
2399 } while (err == -ENOSPC);
2400 } else {
2401 err = ceph_pagelist_append(pagelist, &rec, reclen);
2402 }
2403
2404 out_free:
2405 kfree(path);
2406 out_dput:
2407 dput(dentry);
2408 return err;
2409 }
2410
2411
2412 /*
2413 * If an MDS fails and recovers, clients need to reconnect in order to
2414 * reestablish shared state. This includes all caps issued through
2415 * this session _and_ the snap_realm hierarchy. Because it's not
2416 * clear which snap realms the mds cares about, we send everything we
2417 * know about.. that ensures we'll then get any new info the
2418 * recovering MDS might have.
2419 *
2420 * This is a relatively heavyweight operation, but it's rare.
2421 *
2422 * called with mdsc->mutex held.
2423 */
2424 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2425 struct ceph_mds_session *session)
2426 {
2427 struct ceph_msg *reply;
2428 struct rb_node *p;
2429 int mds = session->s_mds;
2430 int err = -ENOMEM;
2431 struct ceph_pagelist *pagelist;
2432 struct ceph_reconnect_state recon_state;
2433
2434 pr_info("mds%d reconnect start\n", mds);
2435
2436 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2437 if (!pagelist)
2438 goto fail_nopagelist;
2439 ceph_pagelist_init(pagelist);
2440
2441 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS);
2442 if (!reply)
2443 goto fail_nomsg;
2444
2445 mutex_lock(&session->s_mutex);
2446 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2447 session->s_seq = 0;
2448
2449 ceph_con_open(&session->s_con,
2450 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2451
2452 /* replay unsafe requests */
2453 replay_unsafe_requests(mdsc, session);
2454
2455 down_read(&mdsc->snap_rwsem);
2456
2457 dout("session %p state %s\n", session,
2458 session_state_name(session->s_state));
2459
2460 /* drop old cap expires; we're about to reestablish that state */
2461 discard_cap_releases(mdsc, session);
2462
2463 /* traverse this session's caps */
2464 err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
2465 if (err)
2466 goto fail;
2467
2468 recon_state.pagelist = pagelist;
2469 recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
2470 err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2471 if (err < 0)
2472 goto fail;
2473
2474 /*
2475 * snaprealms. we provide mds with the ino, seq (version), and
2476 * parent for all of our realms. If the mds has any newer info,
2477 * it will tell us.
2478 */
2479 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
2480 struct ceph_snap_realm *realm =
2481 rb_entry(p, struct ceph_snap_realm, node);
2482 struct ceph_mds_snaprealm_reconnect sr_rec;
2483
2484 dout(" adding snap realm %llx seq %lld parent %llx\n",
2485 realm->ino, realm->seq, realm->parent_ino);
2486 sr_rec.ino = cpu_to_le64(realm->ino);
2487 sr_rec.seq = cpu_to_le64(realm->seq);
2488 sr_rec.parent = cpu_to_le64(realm->parent_ino);
2489 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
2490 if (err)
2491 goto fail;
2492 }
2493
2494 reply->pagelist = pagelist;
2495 if (recon_state.flock)
2496 reply->hdr.version = cpu_to_le16(2);
2497 reply->hdr.data_len = cpu_to_le32(pagelist->length);
2498 reply->nr_pages = calc_pages_for(0, pagelist->length);
2499 ceph_con_send(&session->s_con, reply);
2500
2501 mutex_unlock(&session->s_mutex);
2502
2503 mutex_lock(&mdsc->mutex);
2504 __wake_requests(mdsc, &session->s_waiting);
2505 mutex_unlock(&mdsc->mutex);
2506
2507 up_read(&mdsc->snap_rwsem);
2508 return;
2509
2510 fail:
2511 ceph_msg_put(reply);
2512 up_read(&mdsc->snap_rwsem);
2513 mutex_unlock(&session->s_mutex);
2514 fail_nomsg:
2515 ceph_pagelist_release(pagelist);
2516 kfree(pagelist);
2517 fail_nopagelist:
2518 pr_err("error %d preparing reconnect for mds%d\n", err, mds);
2519 return;
2520 }
2521
2522
2523 /*
2524 * compare old and new mdsmaps, kicking requests
2525 * and closing out old connections as necessary
2526 *
2527 * called under mdsc->mutex.
2528 */
2529 static void check_new_map(struct ceph_mds_client *mdsc,
2530 struct ceph_mdsmap *newmap,
2531 struct ceph_mdsmap *oldmap)
2532 {
2533 int i;
2534 int oldstate, newstate;
2535 struct ceph_mds_session *s;
2536
2537 dout("check_new_map new %u old %u\n",
2538 newmap->m_epoch, oldmap->m_epoch);
2539
2540 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
2541 if (mdsc->sessions[i] == NULL)
2542 continue;
2543 s = mdsc->sessions[i];
2544 oldstate = ceph_mdsmap_get_state(oldmap, i);
2545 newstate = ceph_mdsmap_get_state(newmap, i);
2546
2547 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
2548 i, ceph_mds_state_name(oldstate),
2549 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
2550 ceph_mds_state_name(newstate),
2551 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
2552 session_state_name(s->s_state));
2553
2554 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
2555 ceph_mdsmap_get_addr(newmap, i),
2556 sizeof(struct ceph_entity_addr))) {
2557 if (s->s_state == CEPH_MDS_SESSION_OPENING) {
2558 /* the session never opened, just close it
2559 * out now */
2560 __wake_requests(mdsc, &s->s_waiting);
2561 __unregister_session(mdsc, s);
2562 } else {
2563 /* just close it */
2564 mutex_unlock(&mdsc->mutex);
2565 mutex_lock(&s->s_mutex);
2566 mutex_lock(&mdsc->mutex);
2567 ceph_con_close(&s->s_con);
2568 mutex_unlock(&s->s_mutex);
2569 s->s_state = CEPH_MDS_SESSION_RESTARTING;
2570 }
2571
2572 /* kick any requests waiting on the recovering mds */
2573 kick_requests(mdsc, i);
2574 } else if (oldstate == newstate) {
2575 continue; /* nothing new with this mds */
2576 }
2577
2578 /*
2579 * send reconnect?
2580 */
2581 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
2582 newstate >= CEPH_MDS_STATE_RECONNECT) {
2583 mutex_unlock(&mdsc->mutex);
2584 send_mds_reconnect(mdsc, s);
2585 mutex_lock(&mdsc->mutex);
2586 }
2587
2588 /*
2589 * kick request on any mds that has gone active.
2590 */
2591 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
2592 newstate >= CEPH_MDS_STATE_ACTIVE) {
2593 if (oldstate != CEPH_MDS_STATE_CREATING &&
2594 oldstate != CEPH_MDS_STATE_STARTING)
2595 pr_info("mds%d recovery completed\n", s->s_mds);
2596 kick_requests(mdsc, i);
2597 ceph_kick_flushing_caps(mdsc, s);
2598 wake_up_session_caps(s, 1);
2599 }
2600 }
2601
2602 for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
2603 s = mdsc->sessions[i];
2604 if (!s)
2605 continue;
2606 if (!ceph_mdsmap_is_laggy(newmap, i))
2607 continue;
2608 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
2609 s->s_state == CEPH_MDS_SESSION_HUNG ||
2610 s->s_state == CEPH_MDS_SESSION_CLOSING) {
2611 dout(" connecting to export targets of laggy mds%d\n",
2612 i);
2613 __open_export_target_sessions(mdsc, s);
2614 }
2615 }
2616 }
2617
2618
2619
2620 /*
2621 * leases
2622 */
2623
2624 /*
2625 * caller must hold session s_mutex, dentry->d_lock
2626 */
2627 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
2628 {
2629 struct ceph_dentry_info *di = ceph_dentry(dentry);
2630
2631 ceph_put_mds_session(di->lease_session);
2632 di->lease_session = NULL;
2633 }
2634
2635 static void handle_lease(struct ceph_mds_client *mdsc,
2636 struct ceph_mds_session *session,
2637 struct ceph_msg *msg)
2638 {
2639 struct super_block *sb = mdsc->fsc->sb;
2640 struct inode *inode;
2641 struct ceph_inode_info *ci;
2642 struct dentry *parent, *dentry;
2643 struct ceph_dentry_info *di;
2644 int mds = session->s_mds;
2645 struct ceph_mds_lease *h = msg->front.iov_base;
2646 u32 seq;
2647 struct ceph_vino vino;
2648 int mask;
2649 struct qstr dname;
2650 int release = 0;
2651
2652 dout("handle_lease from mds%d\n", mds);
2653
2654 /* decode */
2655 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
2656 goto bad;
2657 vino.ino = le64_to_cpu(h->ino);
2658 vino.snap = CEPH_NOSNAP;
2659 mask = le16_to_cpu(h->mask);
2660 seq = le32_to_cpu(h->seq);
2661 dname.name = (void *)h + sizeof(*h) + sizeof(u32);
2662 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
2663 if (dname.len != get_unaligned_le32(h+1))
2664 goto bad;
2665
2666 mutex_lock(&session->s_mutex);
2667 session->s_seq++;
2668
2669 /* lookup inode */
2670 inode = ceph_find_inode(sb, vino);
2671 dout("handle_lease %s, mask %d, ino %llx %p %.*s\n",
2672 ceph_lease_op_name(h->action), mask, vino.ino, inode,
2673 dname.len, dname.name);
2674 if (inode == NULL) {
2675 dout("handle_lease no inode %llx\n", vino.ino);
2676 goto release;
2677 }
2678 ci = ceph_inode(inode);
2679
2680 /* dentry */
2681 parent = d_find_alias(inode);
2682 if (!parent) {
2683 dout("no parent dentry on inode %p\n", inode);
2684 WARN_ON(1);
2685 goto release; /* hrm... */
2686 }
2687 dname.hash = full_name_hash(dname.name, dname.len);
2688 dentry = d_lookup(parent, &dname);
2689 dput(parent);
2690 if (!dentry)
2691 goto release;
2692
2693 spin_lock(&dentry->d_lock);
2694 di = ceph_dentry(dentry);
2695 switch (h->action) {
2696 case CEPH_MDS_LEASE_REVOKE:
2697 if (di && di->lease_session == session) {
2698 if (ceph_seq_cmp(di->lease_seq, seq) > 0)
2699 h->seq = cpu_to_le32(di->lease_seq);
2700 __ceph_mdsc_drop_dentry_lease(dentry);
2701 }
2702 release = 1;
2703 break;
2704
2705 case CEPH_MDS_LEASE_RENEW:
2706 if (di && di->lease_session == session &&
2707 di->lease_gen == session->s_cap_gen &&
2708 di->lease_renew_from &&
2709 di->lease_renew_after == 0) {
2710 unsigned long duration =
2711 le32_to_cpu(h->duration_ms) * HZ / 1000;
2712
2713 di->lease_seq = seq;
2714 dentry->d_time = di->lease_renew_from + duration;
2715 di->lease_renew_after = di->lease_renew_from +
2716 (duration >> 1);
2717 di->lease_renew_from = 0;
2718 }
2719 break;
2720 }
2721 spin_unlock(&dentry->d_lock);
2722 dput(dentry);
2723
2724 if (!release)
2725 goto out;
2726
2727 release:
2728 /* let's just reuse the same message */
2729 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
2730 ceph_msg_get(msg);
2731 ceph_con_send(&session->s_con, msg);
2732
2733 out:
2734 iput(inode);
2735 mutex_unlock(&session->s_mutex);
2736 return;
2737
2738 bad:
2739 pr_err("corrupt lease message\n");
2740 ceph_msg_dump(msg);
2741 }
2742
2743 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
2744 struct inode *inode,
2745 struct dentry *dentry, char action,
2746 u32 seq)
2747 {
2748 struct ceph_msg *msg;
2749 struct ceph_mds_lease *lease;
2750 int len = sizeof(*lease) + sizeof(u32);
2751 int dnamelen = 0;
2752
2753 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
2754 inode, dentry, ceph_lease_op_name(action), session->s_mds);
2755 dnamelen = dentry->d_name.len;
2756 len += dnamelen;
2757
2758 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS);
2759 if (!msg)
2760 return;
2761 lease = msg->front.iov_base;
2762 lease->action = action;
2763 lease->mask = cpu_to_le16(1);
2764 lease->ino = cpu_to_le64(ceph_vino(inode).ino);
2765 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
2766 lease->seq = cpu_to_le32(seq);
2767 put_unaligned_le32(dnamelen, lease + 1);
2768 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
2769
2770 /*
2771 * if this is a preemptive lease RELEASE, no need to
2772 * flush request stream, since the actual request will
2773 * soon follow.
2774 */
2775 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
2776
2777 ceph_con_send(&session->s_con, msg);
2778 }
2779
2780 /*
2781 * Preemptively release a lease we expect to invalidate anyway.
2782 * Pass @inode always, @dentry is optional.
2783 */
2784 void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
2785 struct dentry *dentry, int mask)
2786 {
2787 struct ceph_dentry_info *di;
2788 struct ceph_mds_session *session;
2789 u32 seq;
2790
2791 BUG_ON(inode == NULL);
2792 BUG_ON(dentry == NULL);
2793 BUG_ON(mask == 0);
2794
2795 /* is dentry lease valid? */
2796 spin_lock(&dentry->d_lock);
2797 di = ceph_dentry(dentry);
2798 if (!di || !di->lease_session ||
2799 di->lease_session->s_mds < 0 ||
2800 di->lease_gen != di->lease_session->s_cap_gen ||
2801 !time_before(jiffies, dentry->d_time)) {
2802 dout("lease_release inode %p dentry %p -- "
2803 "no lease on %d\n",
2804 inode, dentry, mask);
2805 spin_unlock(&dentry->d_lock);
2806 return;
2807 }
2808
2809 /* we do have a lease on this dentry; note mds and seq */
2810 session = ceph_get_mds_session(di->lease_session);
2811 seq = di->lease_seq;
2812 __ceph_mdsc_drop_dentry_lease(dentry);
2813 spin_unlock(&dentry->d_lock);
2814
2815 dout("lease_release inode %p dentry %p mask %d to mds%d\n",
2816 inode, dentry, mask, session->s_mds);
2817 ceph_mdsc_lease_send_msg(session, inode, dentry,
2818 CEPH_MDS_LEASE_RELEASE, seq);
2819 ceph_put_mds_session(session);
2820 }
2821
2822 /*
2823 * drop all leases (and dentry refs) in preparation for umount
2824 */
2825 static void drop_leases(struct ceph_mds_client *mdsc)
2826 {
2827 int i;
2828
2829 dout("drop_leases\n");
2830 mutex_lock(&mdsc->mutex);
2831 for (i = 0; i < mdsc->max_sessions; i++) {
2832 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2833 if (!s)
2834 continue;
2835 mutex_unlock(&mdsc->mutex);
2836 mutex_lock(&s->s_mutex);
2837 mutex_unlock(&s->s_mutex);
2838 ceph_put_mds_session(s);
2839 mutex_lock(&mdsc->mutex);
2840 }
2841 mutex_unlock(&mdsc->mutex);
2842 }
2843
2844
2845
2846 /*
2847 * delayed work -- periodically trim expired leases, renew caps with mds
2848 */
2849 static void schedule_delayed(struct ceph_mds_client *mdsc)
2850 {
2851 int delay = 5;
2852 unsigned hz = round_jiffies_relative(HZ * delay);
2853 schedule_delayed_work(&mdsc->delayed_work, hz);
2854 }
2855
2856 static void delayed_work(struct work_struct *work)
2857 {
2858 int i;
2859 struct ceph_mds_client *mdsc =
2860 container_of(work, struct ceph_mds_client, delayed_work.work);
2861 int renew_interval;
2862 int renew_caps;
2863
2864 dout("mdsc delayed_work\n");
2865 ceph_check_delayed_caps(mdsc);
2866
2867 mutex_lock(&mdsc->mutex);
2868 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
2869 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
2870 mdsc->last_renew_caps);
2871 if (renew_caps)
2872 mdsc->last_renew_caps = jiffies;
2873
2874 for (i = 0; i < mdsc->max_sessions; i++) {
2875 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
2876 if (s == NULL)
2877 continue;
2878 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
2879 dout("resending session close request for mds%d\n",
2880 s->s_mds);
2881 request_close_session(mdsc, s);
2882 ceph_put_mds_session(s);
2883 continue;
2884 }
2885 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
2886 if (s->s_state == CEPH_MDS_SESSION_OPEN) {
2887 s->s_state = CEPH_MDS_SESSION_HUNG;
2888 pr_info("mds%d hung\n", s->s_mds);
2889 }
2890 }
2891 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
2892 /* this mds is failed or recovering, just wait */
2893 ceph_put_mds_session(s);
2894 continue;
2895 }
2896 mutex_unlock(&mdsc->mutex);
2897
2898 mutex_lock(&s->s_mutex);
2899 if (renew_caps)
2900 send_renew_caps(mdsc, s);
2901 else
2902 ceph_con_keepalive(&s->s_con);
2903 ceph_add_cap_releases(mdsc, s);
2904 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
2905 s->s_state == CEPH_MDS_SESSION_HUNG)
2906 ceph_send_cap_releases(mdsc, s);
2907 mutex_unlock(&s->s_mutex);
2908 ceph_put_mds_session(s);
2909
2910 mutex_lock(&mdsc->mutex);
2911 }
2912 mutex_unlock(&mdsc->mutex);
2913
2914 schedule_delayed(mdsc);
2915 }
2916
2917 int ceph_mdsc_init(struct ceph_fs_client *fsc)
2918
2919 {
2920 struct ceph_mds_client *mdsc;
2921
2922 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
2923 if (!mdsc)
2924 return -ENOMEM;
2925 mdsc->fsc = fsc;
2926 fsc->mdsc = mdsc;
2927 mutex_init(&mdsc->mutex);
2928 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
2929 if (mdsc->mdsmap == NULL)
2930 return -ENOMEM;
2931
2932 init_completion(&mdsc->safe_umount_waiters);
2933 init_waitqueue_head(&mdsc->session_close_wq);
2934 INIT_LIST_HEAD(&mdsc->waiting_for_map);
2935 mdsc->sessions = NULL;
2936 mdsc->max_sessions = 0;
2937 mdsc->stopping = 0;
2938 init_rwsem(&mdsc->snap_rwsem);
2939 mdsc->snap_realms = RB_ROOT;
2940 INIT_LIST_HEAD(&mdsc->snap_empty);
2941 spin_lock_init(&mdsc->snap_empty_lock);
2942 mdsc->last_tid = 0;
2943 mdsc->request_tree = RB_ROOT;
2944 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
2945 mdsc->last_renew_caps = jiffies;
2946 INIT_LIST_HEAD(&mdsc->cap_delay_list);
2947 spin_lock_init(&mdsc->cap_delay_lock);
2948 INIT_LIST_HEAD(&mdsc->snap_flush_list);
2949 spin_lock_init(&mdsc->snap_flush_lock);
2950 mdsc->cap_flush_seq = 0;
2951 INIT_LIST_HEAD(&mdsc->cap_dirty);
2952 mdsc->num_cap_flushing = 0;
2953 spin_lock_init(&mdsc->cap_dirty_lock);
2954 init_waitqueue_head(&mdsc->cap_flushing_wq);
2955 spin_lock_init(&mdsc->dentry_lru_lock);
2956 INIT_LIST_HEAD(&mdsc->dentry_lru);
2957
2958 ceph_caps_init(mdsc);
2959 ceph_adjust_min_caps(mdsc, fsc->min_caps);
2960
2961 return 0;
2962 }
2963
2964 /*
2965 * Wait for safe replies on open mds requests. If we time out, drop
2966 * all requests from the tree to avoid dangling dentry refs.
2967 */
2968 static void wait_requests(struct ceph_mds_client *mdsc)
2969 {
2970 struct ceph_mds_request *req;
2971 struct ceph_fs_client *fsc = mdsc->fsc;
2972
2973 mutex_lock(&mdsc->mutex);
2974 if (__get_oldest_req(mdsc)) {
2975 mutex_unlock(&mdsc->mutex);
2976
2977 dout("wait_requests waiting for requests\n");
2978 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
2979 fsc->client->options->mount_timeout * HZ);
2980
2981 /* tear down remaining requests */
2982 mutex_lock(&mdsc->mutex);
2983 while ((req = __get_oldest_req(mdsc))) {
2984 dout("wait_requests timed out on tid %llu\n",
2985 req->r_tid);
2986 __unregister_request(mdsc, req);
2987 }
2988 }
2989 mutex_unlock(&mdsc->mutex);
2990 dout("wait_requests done\n");
2991 }
2992
2993 /*
2994 * called before mount is ro, and before dentries are torn down.
2995 * (hmm, does this still race with new lookups?)
2996 */
2997 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
2998 {
2999 dout("pre_umount\n");
3000 mdsc->stopping = 1;
3001
3002 drop_leases(mdsc);
3003 ceph_flush_dirty_caps(mdsc);
3004 wait_requests(mdsc);
3005
3006 /*
3007 * wait for reply handlers to drop their request refs and
3008 * their inode/dcache refs
3009 */
3010 ceph_msgr_flush();
3011 }
3012
3013 /*
3014 * wait for all write mds requests to flush.
3015 */
3016 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3017 {
3018 struct ceph_mds_request *req = NULL, *nextreq;
3019 struct rb_node *n;
3020
3021 mutex_lock(&mdsc->mutex);
3022 dout("wait_unsafe_requests want %lld\n", want_tid);
3023 restart:
3024 req = __get_oldest_req(mdsc);
3025 while (req && req->r_tid <= want_tid) {
3026 /* find next request */
3027 n = rb_next(&req->r_node);
3028 if (n)
3029 nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3030 else
3031 nextreq = NULL;
3032 if ((req->r_op & CEPH_MDS_OP_WRITE)) {
3033 /* write op */
3034 ceph_mdsc_get_request(req);
3035 if (nextreq)
3036 ceph_mdsc_get_request(nextreq);
3037 mutex_unlock(&mdsc->mutex);
3038 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3039 req->r_tid, want_tid);
3040 wait_for_completion(&req->r_safe_completion);
3041 mutex_lock(&mdsc->mutex);
3042 ceph_mdsc_put_request(req);
3043 if (!nextreq)
3044 break; /* next dne before, so we're done! */
3045 if (RB_EMPTY_NODE(&nextreq->r_node)) {
3046 /* next request was removed from tree */
3047 ceph_mdsc_put_request(nextreq);
3048 goto restart;
3049 }
3050 ceph_mdsc_put_request(nextreq); /* won't go away */
3051 }
3052 req = nextreq;
3053 }
3054 mutex_unlock(&mdsc->mutex);
3055 dout("wait_unsafe_requests done\n");
3056 }
3057
3058 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3059 {
3060 u64 want_tid, want_flush;
3061
3062 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3063 return;
3064
3065 dout("sync\n");
3066 mutex_lock(&mdsc->mutex);
3067 want_tid = mdsc->last_tid;
3068 want_flush = mdsc->cap_flush_seq;
3069 mutex_unlock(&mdsc->mutex);
3070 dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
3071
3072 ceph_flush_dirty_caps(mdsc);
3073
3074 wait_unsafe_requests(mdsc, want_tid);
3075 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
3076 }
3077
3078 /*
3079 * true if all sessions are closed, or we force unmount
3080 */
3081 bool done_closing_sessions(struct ceph_mds_client *mdsc)
3082 {
3083 int i, n = 0;
3084
3085 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3086 return true;
3087
3088 mutex_lock(&mdsc->mutex);
3089 for (i = 0; i < mdsc->max_sessions; i++)
3090 if (mdsc->sessions[i])
3091 n++;
3092 mutex_unlock(&mdsc->mutex);
3093 return n == 0;
3094 }
3095
3096 /*
3097 * called after sb is ro.
3098 */
3099 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3100 {
3101 struct ceph_mds_session *session;
3102 int i;
3103 struct ceph_fs_client *fsc = mdsc->fsc;
3104 unsigned long timeout = fsc->client->options->mount_timeout * HZ;
3105
3106 dout("close_sessions\n");
3107
3108 /* close sessions */
3109 mutex_lock(&mdsc->mutex);
3110 for (i = 0; i < mdsc->max_sessions; i++) {
3111 session = __ceph_lookup_mds_session(mdsc, i);
3112 if (!session)
3113 continue;
3114 mutex_unlock(&mdsc->mutex);
3115 mutex_lock(&session->s_mutex);
3116 __close_session(mdsc, session);
3117 mutex_unlock(&session->s_mutex);
3118 ceph_put_mds_session(session);
3119 mutex_lock(&mdsc->mutex);
3120 }
3121 mutex_unlock(&mdsc->mutex);
3122
3123 dout("waiting for sessions to close\n");
3124 wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
3125 timeout);
3126
3127 /* tear down remaining sessions */
3128 mutex_lock(&mdsc->mutex);
3129 for (i = 0; i < mdsc->max_sessions; i++) {
3130 if (mdsc->sessions[i]) {
3131 session = get_session(mdsc->sessions[i]);
3132 __unregister_session(mdsc, session);
3133 mutex_unlock(&mdsc->mutex);
3134 mutex_lock(&session->s_mutex);
3135 remove_session_caps(session);
3136 mutex_unlock(&session->s_mutex);
3137 ceph_put_mds_session(session);
3138 mutex_lock(&mdsc->mutex);
3139 }
3140 }
3141 WARN_ON(!list_empty(&mdsc->cap_delay_list));
3142 mutex_unlock(&mdsc->mutex);
3143
3144 ceph_cleanup_empty_realms(mdsc);
3145
3146 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3147
3148 dout("stopped\n");
3149 }
3150
3151 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3152 {
3153 dout("stop\n");
3154 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3155 if (mdsc->mdsmap)
3156 ceph_mdsmap_destroy(mdsc->mdsmap);
3157 kfree(mdsc->sessions);
3158 ceph_caps_finalize(mdsc);
3159 }
3160
3161 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3162 {
3163 struct ceph_mds_client *mdsc = fsc->mdsc;
3164
3165 ceph_mdsc_stop(mdsc);
3166 fsc->mdsc = NULL;
3167 kfree(mdsc);
3168 }
3169
3170
3171 /*
3172 * handle mds map update.
3173 */
3174 void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3175 {
3176 u32 epoch;
3177 u32 maplen;
3178 void *p = msg->front.iov_base;
3179 void *end = p + msg->front.iov_len;
3180 struct ceph_mdsmap *newmap, *oldmap;
3181 struct ceph_fsid fsid;
3182 int err = -EINVAL;
3183
3184 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3185 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3186 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3187 return;
3188 epoch = ceph_decode_32(&p);
3189 maplen = ceph_decode_32(&p);
3190 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3191
3192 /* do we need it? */
3193 ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch);
3194 mutex_lock(&mdsc->mutex);
3195 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3196 dout("handle_map epoch %u <= our %u\n",
3197 epoch, mdsc->mdsmap->m_epoch);
3198 mutex_unlock(&mdsc->mutex);
3199 return;
3200 }
3201
3202 newmap = ceph_mdsmap_decode(&p, end);
3203 if (IS_ERR(newmap)) {
3204 err = PTR_ERR(newmap);
3205 goto bad_unlock;
3206 }
3207
3208 /* swap into place */
3209 if (mdsc->mdsmap) {
3210 oldmap = mdsc->mdsmap;
3211 mdsc->mdsmap = newmap;
3212 check_new_map(mdsc, newmap, oldmap);
3213 ceph_mdsmap_destroy(oldmap);
3214 } else {
3215 mdsc->mdsmap = newmap; /* first mds map */
3216 }
3217 mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3218
3219 __wake_requests(mdsc, &mdsc->waiting_for_map);
3220
3221 mutex_unlock(&mdsc->mutex);
3222 schedule_delayed(mdsc);
3223 return;
3224
3225 bad_unlock:
3226 mutex_unlock(&mdsc->mutex);
3227 bad:
3228 pr_err("error decoding mdsmap %d\n", err);
3229 return;
3230 }
3231
3232 static struct ceph_connection *con_get(struct ceph_connection *con)
3233 {
3234 struct ceph_mds_session *s = con->private;
3235
3236 if (get_session(s)) {
3237 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3238 return con;
3239 }
3240 dout("mdsc con_get %p FAIL\n", s);
3241 return NULL;
3242 }
3243
3244 static void con_put(struct ceph_connection *con)
3245 {
3246 struct ceph_mds_session *s = con->private;
3247
3248 ceph_put_mds_session(s);
3249 dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
3250 }
3251
3252 /*
3253 * if the client is unresponsive for long enough, the mds will kill
3254 * the session entirely.
3255 */
3256 static void peer_reset(struct ceph_connection *con)
3257 {
3258 struct ceph_mds_session *s = con->private;
3259 struct ceph_mds_client *mdsc = s->s_mdsc;
3260
3261 pr_warning("mds%d closed our session\n", s->s_mds);
3262 send_mds_reconnect(mdsc, s);
3263 }
3264
3265 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3266 {
3267 struct ceph_mds_session *s = con->private;
3268 struct ceph_mds_client *mdsc = s->s_mdsc;
3269 int type = le16_to_cpu(msg->hdr.type);
3270
3271 mutex_lock(&mdsc->mutex);
3272 if (__verify_registered_session(mdsc, s) < 0) {
3273 mutex_unlock(&mdsc->mutex);
3274 goto out;
3275 }
3276 mutex_unlock(&mdsc->mutex);
3277
3278 switch (type) {
3279 case CEPH_MSG_MDS_MAP:
3280 ceph_mdsc_handle_map(mdsc, msg);
3281 break;
3282 case CEPH_MSG_CLIENT_SESSION:
3283 handle_session(s, msg);
3284 break;
3285 case CEPH_MSG_CLIENT_REPLY:
3286 handle_reply(s, msg);
3287 break;
3288 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3289 handle_forward(mdsc, s, msg);
3290 break;
3291 case CEPH_MSG_CLIENT_CAPS:
3292 ceph_handle_caps(s, msg);
3293 break;
3294 case CEPH_MSG_CLIENT_SNAP:
3295 ceph_handle_snap(mdsc, s, msg);
3296 break;
3297 case CEPH_MSG_CLIENT_LEASE:
3298 handle_lease(mdsc, s, msg);
3299 break;
3300
3301 default:
3302 pr_err("received unknown message type %d %s\n", type,
3303 ceph_msg_type_name(type));
3304 }
3305 out:
3306 ceph_msg_put(msg);
3307 }
3308
3309 /*
3310 * authentication
3311 */
3312 static int get_authorizer(struct ceph_connection *con,
3313 void **buf, int *len, int *proto,
3314 void **reply_buf, int *reply_len, int force_new)
3315 {
3316 struct ceph_mds_session *s = con->private;
3317 struct ceph_mds_client *mdsc = s->s_mdsc;
3318 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3319 int ret = 0;
3320
3321 if (force_new && s->s_authorizer) {
3322 ac->ops->destroy_authorizer(ac, s->s_authorizer);
3323 s->s_authorizer = NULL;
3324 }
3325 if (s->s_authorizer == NULL) {
3326 if (ac->ops->create_authorizer) {
3327 ret = ac->ops->create_authorizer(
3328 ac, CEPH_ENTITY_TYPE_MDS,
3329 &s->s_authorizer,
3330 &s->s_authorizer_buf,
3331 &s->s_authorizer_buf_len,
3332 &s->s_authorizer_reply_buf,
3333 &s->s_authorizer_reply_buf_len);
3334 if (ret)
3335 return ret;
3336 }
3337 }
3338
3339 *proto = ac->protocol;
3340 *buf = s->s_authorizer_buf;
3341 *len = s->s_authorizer_buf_len;
3342 *reply_buf = s->s_authorizer_reply_buf;
3343 *reply_len = s->s_authorizer_reply_buf_len;
3344 return 0;
3345 }
3346
3347
3348 static int verify_authorizer_reply(struct ceph_connection *con, int len)
3349 {
3350 struct ceph_mds_session *s = con->private;
3351 struct ceph_mds_client *mdsc = s->s_mdsc;
3352 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3353
3354 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
3355 }
3356
3357 static int invalidate_authorizer(struct ceph_connection *con)
3358 {
3359 struct ceph_mds_session *s = con->private;
3360 struct ceph_mds_client *mdsc = s->s_mdsc;
3361 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3362
3363 if (ac->ops->invalidate_authorizer)
3364 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3365
3366 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3367 }
3368
3369 static const struct ceph_connection_operations mds_con_ops = {
3370 .get = con_get,
3371 .put = con_put,
3372 .dispatch = dispatch,
3373 .get_authorizer = get_authorizer,
3374 .verify_authorizer_reply = verify_authorizer_reply,
3375 .invalidate_authorizer = invalidate_authorizer,
3376 .peer_reset = peer_reset,
3377 };
3378
3379 /* eof */