]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
2f2dc053 | 2 | |
496e5955 | 3 | #include <linux/fs.h> |
2f2dc053 | 4 | #include <linux/wait.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
2f2dc053 | 6 | #include <linux/sched.h> |
3d14c5d2 YS |
7 | #include <linux/debugfs.h> |
8 | #include <linux/seq_file.h> | |
2f2dc053 | 9 | |
2f2dc053 | 10 | #include "super.h" |
3d14c5d2 YS |
11 | #include "mds_client.h" |
12 | ||
1fe60e51 | 13 | #include <linux/ceph/ceph_features.h> |
3d14c5d2 YS |
14 | #include <linux/ceph/messenger.h> |
15 | #include <linux/ceph/decode.h> | |
16 | #include <linux/ceph/pagelist.h> | |
17 | #include <linux/ceph/auth.h> | |
18 | #include <linux/ceph/debugfs.h> | |
2f2dc053 SW |
19 | |
20 | /* | |
21 | * A cluster of MDS (metadata server) daemons is responsible for | |
22 | * managing the file system namespace (the directory hierarchy and | |
23 | * inodes) and for coordinating shared access to storage. Metadata is | |
24 | * partitioning hierarchically across a number of servers, and that | |
25 | * partition varies over time as the cluster adjusts the distribution | |
26 | * in order to balance load. | |
27 | * | |
28 | * The MDS client is primarily responsible to managing synchronous | |
29 | * metadata requests for operations like open, unlink, and so forth. | |
30 | * If there is a MDS failure, we find out about it when we (possibly | |
31 | * request and) receive a new MDS map, and can resubmit affected | |
32 | * requests. | |
33 | * | |
34 | * For the most part, though, we take advantage of a lossless | |
35 | * communications channel to the MDS, and do not need to worry about | |
36 | * timing out or resubmitting requests. | |
37 | * | |
38 | * We maintain a stateful "session" with each MDS we interact with. | |
39 | * Within each session, we sent periodic heartbeat messages to ensure | |
40 | * any capabilities or leases we have been issues remain valid. If | |
41 | * the session times out and goes stale, our leases and capabilities | |
42 | * are no longer valid. | |
43 | */ | |
44 | ||
20cb34ae SW |
45 | struct ceph_reconnect_state { |
46 | struct ceph_pagelist *pagelist; | |
47 | bool flock; | |
48 | }; | |
49 | ||
2f2dc053 SW |
50 | static void __wake_requests(struct ceph_mds_client *mdsc, |
51 | struct list_head *head); | |
52 | ||
9e32789f | 53 | static const struct ceph_connection_operations mds_con_ops; |
2f2dc053 SW |
54 | |
55 | ||
56 | /* | |
57 | * mds reply parsing | |
58 | */ | |
59 | ||
60 | /* | |
61 | * parse individual inode info | |
62 | */ | |
63 | static int parse_reply_info_in(void **p, void *end, | |
14303d20 SW |
64 | struct ceph_mds_reply_info_in *info, |
65 | int features) | |
2f2dc053 SW |
66 | { |
67 | int err = -EIO; | |
68 | ||
69 | info->in = *p; | |
70 | *p += sizeof(struct ceph_mds_reply_inode) + | |
71 | sizeof(*info->in->fragtree.splits) * | |
72 | le32_to_cpu(info->in->fragtree.nsplits); | |
73 | ||
74 | ceph_decode_32_safe(p, end, info->symlink_len, bad); | |
75 | ceph_decode_need(p, end, info->symlink_len, bad); | |
76 | info->symlink = *p; | |
77 | *p += info->symlink_len; | |
78 | ||
14303d20 SW |
79 | if (features & CEPH_FEATURE_DIRLAYOUTHASH) |
80 | ceph_decode_copy_safe(p, end, &info->dir_layout, | |
81 | sizeof(info->dir_layout), bad); | |
82 | else | |
83 | memset(&info->dir_layout, 0, sizeof(info->dir_layout)); | |
84 | ||
2f2dc053 SW |
85 | ceph_decode_32_safe(p, end, info->xattr_len, bad); |
86 | ceph_decode_need(p, end, info->xattr_len, bad); | |
87 | info->xattr_data = *p; | |
88 | *p += info->xattr_len; | |
89 | return 0; | |
90 | bad: | |
91 | return err; | |
92 | } | |
93 | ||
94 | /* | |
95 | * parse a normal reply, which may contain a (dir+)dentry and/or a | |
96 | * target inode. | |
97 | */ | |
98 | static int parse_reply_info_trace(void **p, void *end, | |
14303d20 SW |
99 | struct ceph_mds_reply_info_parsed *info, |
100 | int features) | |
2f2dc053 SW |
101 | { |
102 | int err; | |
103 | ||
104 | if (info->head->is_dentry) { | |
14303d20 | 105 | err = parse_reply_info_in(p, end, &info->diri, features); |
2f2dc053 SW |
106 | if (err < 0) |
107 | goto out_bad; | |
108 | ||
109 | if (unlikely(*p + sizeof(*info->dirfrag) > end)) | |
110 | goto bad; | |
111 | info->dirfrag = *p; | |
112 | *p += sizeof(*info->dirfrag) + | |
113 | sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); | |
114 | if (unlikely(*p > end)) | |
115 | goto bad; | |
116 | ||
117 | ceph_decode_32_safe(p, end, info->dname_len, bad); | |
118 | ceph_decode_need(p, end, info->dname_len, bad); | |
119 | info->dname = *p; | |
120 | *p += info->dname_len; | |
121 | info->dlease = *p; | |
122 | *p += sizeof(*info->dlease); | |
123 | } | |
124 | ||
125 | if (info->head->is_target) { | |
14303d20 | 126 | err = parse_reply_info_in(p, end, &info->targeti, features); |
2f2dc053 SW |
127 | if (err < 0) |
128 | goto out_bad; | |
129 | } | |
130 | ||
131 | if (unlikely(*p != end)) | |
132 | goto bad; | |
133 | return 0; | |
134 | ||
135 | bad: | |
136 | err = -EIO; | |
137 | out_bad: | |
138 | pr_err("problem parsing mds trace %d\n", err); | |
139 | return err; | |
140 | } | |
141 | ||
142 | /* | |
143 | * parse readdir results | |
144 | */ | |
145 | static int parse_reply_info_dir(void **p, void *end, | |
14303d20 SW |
146 | struct ceph_mds_reply_info_parsed *info, |
147 | int features) | |
2f2dc053 SW |
148 | { |
149 | u32 num, i = 0; | |
150 | int err; | |
151 | ||
152 | info->dir_dir = *p; | |
153 | if (*p + sizeof(*info->dir_dir) > end) | |
154 | goto bad; | |
155 | *p += sizeof(*info->dir_dir) + | |
156 | sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); | |
157 | if (*p > end) | |
158 | goto bad; | |
159 | ||
160 | ceph_decode_need(p, end, sizeof(num) + 2, bad); | |
c89136ea SW |
161 | num = ceph_decode_32(p); |
162 | info->dir_end = ceph_decode_8(p); | |
163 | info->dir_complete = ceph_decode_8(p); | |
2f2dc053 SW |
164 | if (num == 0) |
165 | goto done; | |
166 | ||
167 | /* alloc large array */ | |
168 | info->dir_nr = num; | |
169 | info->dir_in = kcalloc(num, sizeof(*info->dir_in) + | |
170 | sizeof(*info->dir_dname) + | |
171 | sizeof(*info->dir_dname_len) + | |
172 | sizeof(*info->dir_dlease), | |
173 | GFP_NOFS); | |
174 | if (info->dir_in == NULL) { | |
175 | err = -ENOMEM; | |
176 | goto out_bad; | |
177 | } | |
178 | info->dir_dname = (void *)(info->dir_in + num); | |
179 | info->dir_dname_len = (void *)(info->dir_dname + num); | |
180 | info->dir_dlease = (void *)(info->dir_dname_len + num); | |
181 | ||
182 | while (num) { | |
183 | /* dentry */ | |
184 | ceph_decode_need(p, end, sizeof(u32)*2, bad); | |
c89136ea | 185 | info->dir_dname_len[i] = ceph_decode_32(p); |
2f2dc053 SW |
186 | ceph_decode_need(p, end, info->dir_dname_len[i], bad); |
187 | info->dir_dname[i] = *p; | |
188 | *p += info->dir_dname_len[i]; | |
189 | dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i], | |
190 | info->dir_dname[i]); | |
191 | info->dir_dlease[i] = *p; | |
192 | *p += sizeof(struct ceph_mds_reply_lease); | |
193 | ||
194 | /* inode */ | |
14303d20 | 195 | err = parse_reply_info_in(p, end, &info->dir_in[i], features); |
2f2dc053 SW |
196 | if (err < 0) |
197 | goto out_bad; | |
198 | i++; | |
199 | num--; | |
200 | } | |
201 | ||
202 | done: | |
203 | if (*p != end) | |
204 | goto bad; | |
205 | return 0; | |
206 | ||
207 | bad: | |
208 | err = -EIO; | |
209 | out_bad: | |
210 | pr_err("problem parsing dir contents %d\n", err); | |
211 | return err; | |
212 | } | |
213 | ||
25933abd HS |
214 | /* |
215 | * parse fcntl F_GETLK results | |
216 | */ | |
217 | static int parse_reply_info_filelock(void **p, void *end, | |
14303d20 SW |
218 | struct ceph_mds_reply_info_parsed *info, |
219 | int features) | |
25933abd HS |
220 | { |
221 | if (*p + sizeof(*info->filelock_reply) > end) | |
222 | goto bad; | |
223 | ||
224 | info->filelock_reply = *p; | |
225 | *p += sizeof(*info->filelock_reply); | |
226 | ||
227 | if (unlikely(*p != end)) | |
228 | goto bad; | |
229 | return 0; | |
230 | ||
231 | bad: | |
232 | return -EIO; | |
233 | } | |
234 | ||
6e8575fa SL |
235 | /* |
236 | * parse create results | |
237 | */ | |
238 | static int parse_reply_info_create(void **p, void *end, | |
239 | struct ceph_mds_reply_info_parsed *info, | |
240 | int features) | |
241 | { | |
242 | if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { | |
243 | if (*p == end) { | |
244 | info->has_create_ino = false; | |
245 | } else { | |
246 | info->has_create_ino = true; | |
247 | info->ino = ceph_decode_64(p); | |
248 | } | |
249 | } | |
250 | ||
251 | if (unlikely(*p != end)) | |
252 | goto bad; | |
253 | return 0; | |
254 | ||
255 | bad: | |
256 | return -EIO; | |
257 | } | |
258 | ||
25933abd HS |
259 | /* |
260 | * parse extra results | |
261 | */ | |
262 | static int parse_reply_info_extra(void **p, void *end, | |
14303d20 SW |
263 | struct ceph_mds_reply_info_parsed *info, |
264 | int features) | |
25933abd HS |
265 | { |
266 | if (info->head->op == CEPH_MDS_OP_GETFILELOCK) | |
14303d20 | 267 | return parse_reply_info_filelock(p, end, info, features); |
8a034497 YZ |
268 | else if (info->head->op == CEPH_MDS_OP_READDIR || |
269 | info->head->op == CEPH_MDS_OP_LSSNAP) | |
14303d20 | 270 | return parse_reply_info_dir(p, end, info, features); |
6e8575fa SL |
271 | else if (info->head->op == CEPH_MDS_OP_CREATE) |
272 | return parse_reply_info_create(p, end, info, features); | |
273 | else | |
274 | return -EIO; | |
25933abd HS |
275 | } |
276 | ||
2f2dc053 SW |
277 | /* |
278 | * parse entire mds reply | |
279 | */ | |
280 | static int parse_reply_info(struct ceph_msg *msg, | |
14303d20 SW |
281 | struct ceph_mds_reply_info_parsed *info, |
282 | int features) | |
2f2dc053 SW |
283 | { |
284 | void *p, *end; | |
285 | u32 len; | |
286 | int err; | |
287 | ||
288 | info->head = msg->front.iov_base; | |
289 | p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); | |
290 | end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); | |
291 | ||
292 | /* trace */ | |
293 | ceph_decode_32_safe(&p, end, len, bad); | |
294 | if (len > 0) { | |
32852a81 | 295 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 296 | err = parse_reply_info_trace(&p, p+len, info, features); |
2f2dc053 SW |
297 | if (err < 0) |
298 | goto out_bad; | |
299 | } | |
300 | ||
25933abd | 301 | /* extra */ |
2f2dc053 SW |
302 | ceph_decode_32_safe(&p, end, len, bad); |
303 | if (len > 0) { | |
32852a81 | 304 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 305 | err = parse_reply_info_extra(&p, p+len, info, features); |
2f2dc053 SW |
306 | if (err < 0) |
307 | goto out_bad; | |
308 | } | |
309 | ||
310 | /* snap blob */ | |
311 | ceph_decode_32_safe(&p, end, len, bad); | |
312 | info->snapblob_len = len; | |
313 | info->snapblob = p; | |
314 | p += len; | |
315 | ||
316 | if (p != end) | |
317 | goto bad; | |
318 | return 0; | |
319 | ||
320 | bad: | |
321 | err = -EIO; | |
322 | out_bad: | |
323 | pr_err("mds parse_reply err %d\n", err); | |
324 | return err; | |
325 | } | |
326 | ||
327 | static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) | |
328 | { | |
329 | kfree(info->dir_in); | |
330 | } | |
331 | ||
332 | ||
333 | /* | |
334 | * sessions | |
335 | */ | |
336 | static const char *session_state_name(int s) | |
337 | { | |
338 | switch (s) { | |
339 | case CEPH_MDS_SESSION_NEW: return "new"; | |
340 | case CEPH_MDS_SESSION_OPENING: return "opening"; | |
341 | case CEPH_MDS_SESSION_OPEN: return "open"; | |
342 | case CEPH_MDS_SESSION_HUNG: return "hung"; | |
343 | case CEPH_MDS_SESSION_CLOSING: return "closing"; | |
44ca18f2 | 344 | case CEPH_MDS_SESSION_RESTARTING: return "restarting"; |
2f2dc053 SW |
345 | case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; |
346 | default: return "???"; | |
347 | } | |
348 | } | |
349 | ||
350 | static struct ceph_mds_session *get_session(struct ceph_mds_session *s) | |
351 | { | |
352 | if (atomic_inc_not_zero(&s->s_ref)) { | |
353 | dout("mdsc get_session %p %d -> %d\n", s, | |
354 | atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); | |
355 | return s; | |
356 | } else { | |
357 | dout("mdsc get_session %p 0 -- FAIL", s); | |
358 | return NULL; | |
359 | } | |
360 | } | |
361 | ||
362 | void ceph_put_mds_session(struct ceph_mds_session *s) | |
363 | { | |
364 | dout("mdsc put_session %p %d -> %d\n", s, | |
365 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); | |
4e7a5dcd | 366 | if (atomic_dec_and_test(&s->s_ref)) { |
6c4a1915 | 367 | if (s->s_auth.authorizer) |
27859f97 SW |
368 | ceph_auth_destroy_authorizer( |
369 | s->s_mdsc->fsc->client->monc.auth, | |
370 | s->s_auth.authorizer); | |
2f2dc053 | 371 | kfree(s); |
4e7a5dcd | 372 | } |
2f2dc053 SW |
373 | } |
374 | ||
375 | /* | |
376 | * called under mdsc->mutex | |
377 | */ | |
378 | struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, | |
379 | int mds) | |
380 | { | |
381 | struct ceph_mds_session *session; | |
382 | ||
383 | if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) | |
384 | return NULL; | |
385 | session = mdsc->sessions[mds]; | |
386 | dout("lookup_mds_session %p %d\n", session, | |
387 | atomic_read(&session->s_ref)); | |
388 | get_session(session); | |
389 | return session; | |
390 | } | |
391 | ||
392 | static bool __have_session(struct ceph_mds_client *mdsc, int mds) | |
393 | { | |
394 | if (mds >= mdsc->max_sessions) | |
395 | return false; | |
396 | return mdsc->sessions[mds]; | |
397 | } | |
398 | ||
2600d2dd SW |
399 | static int __verify_registered_session(struct ceph_mds_client *mdsc, |
400 | struct ceph_mds_session *s) | |
401 | { | |
402 | if (s->s_mds >= mdsc->max_sessions || | |
403 | mdsc->sessions[s->s_mds] != s) | |
404 | return -ENOENT; | |
405 | return 0; | |
406 | } | |
407 | ||
2f2dc053 SW |
408 | /* |
409 | * create+register a new session for given mds. | |
410 | * called under mdsc->mutex. | |
411 | */ | |
412 | static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |
413 | int mds) | |
414 | { | |
415 | struct ceph_mds_session *s; | |
416 | ||
417 | s = kzalloc(sizeof(*s), GFP_NOFS); | |
4736b009 DC |
418 | if (!s) |
419 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
420 | s->s_mdsc = mdsc; |
421 | s->s_mds = mds; | |
422 | s->s_state = CEPH_MDS_SESSION_NEW; | |
423 | s->s_ttl = 0; | |
424 | s->s_seq = 0; | |
425 | mutex_init(&s->s_mutex); | |
426 | ||
b7a9e5dd | 427 | ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); |
2f2dc053 | 428 | |
d8fb02ab | 429 | spin_lock_init(&s->s_gen_ttl_lock); |
2f2dc053 | 430 | s->s_cap_gen = 0; |
1ce208a6 | 431 | s->s_cap_ttl = jiffies - 1; |
d8fb02ab AE |
432 | |
433 | spin_lock_init(&s->s_cap_lock); | |
2f2dc053 SW |
434 | s->s_renew_requested = 0; |
435 | s->s_renew_seq = 0; | |
436 | INIT_LIST_HEAD(&s->s_caps); | |
437 | s->s_nr_caps = 0; | |
5dacf091 | 438 | s->s_trim_caps = 0; |
2f2dc053 SW |
439 | atomic_set(&s->s_ref, 1); |
440 | INIT_LIST_HEAD(&s->s_waiting); | |
441 | INIT_LIST_HEAD(&s->s_unsafe); | |
442 | s->s_num_cap_releases = 0; | |
7c1332b8 | 443 | s->s_cap_iterator = NULL; |
2f2dc053 SW |
444 | INIT_LIST_HEAD(&s->s_cap_releases); |
445 | INIT_LIST_HEAD(&s->s_cap_releases_done); | |
446 | INIT_LIST_HEAD(&s->s_cap_flushing); | |
447 | INIT_LIST_HEAD(&s->s_cap_snaps_flushing); | |
448 | ||
449 | dout("register_session mds%d\n", mds); | |
450 | if (mds >= mdsc->max_sessions) { | |
451 | int newmax = 1 << get_count_order(mds+1); | |
452 | struct ceph_mds_session **sa; | |
453 | ||
454 | dout("register_session realloc to %d\n", newmax); | |
455 | sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); | |
456 | if (sa == NULL) | |
42ce56e5 | 457 | goto fail_realloc; |
2f2dc053 SW |
458 | if (mdsc->sessions) { |
459 | memcpy(sa, mdsc->sessions, | |
460 | mdsc->max_sessions * sizeof(void *)); | |
461 | kfree(mdsc->sessions); | |
462 | } | |
463 | mdsc->sessions = sa; | |
464 | mdsc->max_sessions = newmax; | |
465 | } | |
466 | mdsc->sessions[mds] = s; | |
467 | atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ | |
42ce56e5 | 468 | |
b7a9e5dd SW |
469 | ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, |
470 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
42ce56e5 | 471 | |
2f2dc053 | 472 | return s; |
42ce56e5 SW |
473 | |
474 | fail_realloc: | |
475 | kfree(s); | |
476 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
477 | } |
478 | ||
479 | /* | |
480 | * called under mdsc->mutex | |
481 | */ | |
2600d2dd | 482 | static void __unregister_session(struct ceph_mds_client *mdsc, |
42ce56e5 | 483 | struct ceph_mds_session *s) |
2f2dc053 | 484 | { |
2600d2dd SW |
485 | dout("__unregister_session mds%d %p\n", s->s_mds, s); |
486 | BUG_ON(mdsc->sessions[s->s_mds] != s); | |
42ce56e5 SW |
487 | mdsc->sessions[s->s_mds] = NULL; |
488 | ceph_con_close(&s->s_con); | |
489 | ceph_put_mds_session(s); | |
2f2dc053 SW |
490 | } |
491 | ||
492 | /* | |
493 | * drop session refs in request. | |
494 | * | |
495 | * should be last request ref, or hold mdsc->mutex | |
496 | */ | |
497 | static void put_request_session(struct ceph_mds_request *req) | |
498 | { | |
499 | if (req->r_session) { | |
500 | ceph_put_mds_session(req->r_session); | |
501 | req->r_session = NULL; | |
502 | } | |
503 | } | |
504 | ||
153c8e6b | 505 | void ceph_mdsc_release_request(struct kref *kref) |
2f2dc053 | 506 | { |
153c8e6b SW |
507 | struct ceph_mds_request *req = container_of(kref, |
508 | struct ceph_mds_request, | |
509 | r_kref); | |
510 | if (req->r_request) | |
511 | ceph_msg_put(req->r_request); | |
512 | if (req->r_reply) { | |
513 | ceph_msg_put(req->r_reply); | |
514 | destroy_reply_info(&req->r_reply_info); | |
515 | } | |
516 | if (req->r_inode) { | |
41b02e1f | 517 | ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); |
153c8e6b SW |
518 | iput(req->r_inode); |
519 | } | |
520 | if (req->r_locked_dir) | |
41b02e1f | 521 | ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); |
153c8e6b SW |
522 | if (req->r_target_inode) |
523 | iput(req->r_target_inode); | |
524 | if (req->r_dentry) | |
525 | dput(req->r_dentry); | |
526 | if (req->r_old_dentry) { | |
41b02e1f SW |
527 | /* |
528 | * track (and drop pins for) r_old_dentry_dir | |
529 | * separately, since r_old_dentry's d_parent may have | |
530 | * changed between the dir mutex being dropped and | |
531 | * this request being freed. | |
532 | */ | |
533 | ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), | |
534 | CEPH_CAP_PIN); | |
153c8e6b | 535 | dput(req->r_old_dentry); |
41b02e1f | 536 | iput(req->r_old_dentry_dir); |
2f2dc053 | 537 | } |
153c8e6b SW |
538 | kfree(req->r_path1); |
539 | kfree(req->r_path2); | |
540 | put_request_session(req); | |
37151668 | 541 | ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
153c8e6b | 542 | kfree(req); |
2f2dc053 SW |
543 | } |
544 | ||
545 | /* | |
546 | * lookup session, bump ref if found. | |
547 | * | |
548 | * called under mdsc->mutex. | |
549 | */ | |
550 | static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, | |
551 | u64 tid) | |
552 | { | |
553 | struct ceph_mds_request *req; | |
44ca18f2 SW |
554 | struct rb_node *n = mdsc->request_tree.rb_node; |
555 | ||
556 | while (n) { | |
557 | req = rb_entry(n, struct ceph_mds_request, r_node); | |
558 | if (tid < req->r_tid) | |
559 | n = n->rb_left; | |
560 | else if (tid > req->r_tid) | |
561 | n = n->rb_right; | |
562 | else { | |
563 | ceph_mdsc_get_request(req); | |
564 | return req; | |
565 | } | |
566 | } | |
567 | return NULL; | |
568 | } | |
569 | ||
570 | static void __insert_request(struct ceph_mds_client *mdsc, | |
571 | struct ceph_mds_request *new) | |
572 | { | |
573 | struct rb_node **p = &mdsc->request_tree.rb_node; | |
574 | struct rb_node *parent = NULL; | |
575 | struct ceph_mds_request *req = NULL; | |
576 | ||
577 | while (*p) { | |
578 | parent = *p; | |
579 | req = rb_entry(parent, struct ceph_mds_request, r_node); | |
580 | if (new->r_tid < req->r_tid) | |
581 | p = &(*p)->rb_left; | |
582 | else if (new->r_tid > req->r_tid) | |
583 | p = &(*p)->rb_right; | |
584 | else | |
585 | BUG(); | |
586 | } | |
587 | ||
588 | rb_link_node(&new->r_node, parent, p); | |
589 | rb_insert_color(&new->r_node, &mdsc->request_tree); | |
2f2dc053 SW |
590 | } |
591 | ||
592 | /* | |
593 | * Register an in-flight request, and assign a tid. Link to directory | |
594 | * are modifying (if any). | |
595 | * | |
596 | * Called under mdsc->mutex. | |
597 | */ | |
598 | static void __register_request(struct ceph_mds_client *mdsc, | |
599 | struct ceph_mds_request *req, | |
600 | struct inode *dir) | |
601 | { | |
602 | req->r_tid = ++mdsc->last_tid; | |
603 | if (req->r_num_caps) | |
37151668 YS |
604 | ceph_reserve_caps(mdsc, &req->r_caps_reservation, |
605 | req->r_num_caps); | |
2f2dc053 SW |
606 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
607 | ceph_mdsc_get_request(req); | |
44ca18f2 | 608 | __insert_request(mdsc, req); |
2f2dc053 | 609 | |
cb4276cc SW |
610 | req->r_uid = current_fsuid(); |
611 | req->r_gid = current_fsgid(); | |
612 | ||
2f2dc053 SW |
613 | if (dir) { |
614 | struct ceph_inode_info *ci = ceph_inode(dir); | |
615 | ||
3b663780 | 616 | ihold(dir); |
2f2dc053 SW |
617 | spin_lock(&ci->i_unsafe_lock); |
618 | req->r_unsafe_dir = dir; | |
619 | list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); | |
620 | spin_unlock(&ci->i_unsafe_lock); | |
621 | } | |
622 | } | |
623 | ||
624 | static void __unregister_request(struct ceph_mds_client *mdsc, | |
625 | struct ceph_mds_request *req) | |
626 | { | |
627 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | |
44ca18f2 | 628 | rb_erase(&req->r_node, &mdsc->request_tree); |
80fc7314 | 629 | RB_CLEAR_NODE(&req->r_node); |
2f2dc053 SW |
630 | |
631 | if (req->r_unsafe_dir) { | |
632 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); | |
633 | ||
634 | spin_lock(&ci->i_unsafe_lock); | |
635 | list_del_init(&req->r_unsafe_dir_item); | |
636 | spin_unlock(&ci->i_unsafe_lock); | |
3b663780 SW |
637 | |
638 | iput(req->r_unsafe_dir); | |
639 | req->r_unsafe_dir = NULL; | |
2f2dc053 | 640 | } |
94aa8ae1 SW |
641 | |
642 | ceph_mdsc_put_request(req); | |
2f2dc053 SW |
643 | } |
644 | ||
645 | /* | |
646 | * Choose mds to send request to next. If there is a hint set in the | |
647 | * request (e.g., due to a prior forward hint from the mds), use that. | |
648 | * Otherwise, consult frag tree and/or caps to identify the | |
649 | * appropriate mds. If all else fails, choose randomly. | |
650 | * | |
651 | * Called under mdsc->mutex. | |
652 | */ | |
7fd7d101 | 653 | static struct dentry *get_nonsnap_parent(struct dentry *dentry) |
eb6bb1c5 | 654 | { |
d79698da SW |
655 | /* |
656 | * we don't need to worry about protecting the d_parent access | |
657 | * here because we never renaming inside the snapped namespace | |
658 | * except to resplice to another snapdir, and either the old or new | |
659 | * result is a valid result. | |
660 | */ | |
eb6bb1c5 SW |
661 | while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP) |
662 | dentry = dentry->d_parent; | |
663 | return dentry; | |
664 | } | |
665 | ||
2f2dc053 SW |
666 | static int __choose_mds(struct ceph_mds_client *mdsc, |
667 | struct ceph_mds_request *req) | |
668 | { | |
669 | struct inode *inode; | |
670 | struct ceph_inode_info *ci; | |
671 | struct ceph_cap *cap; | |
672 | int mode = req->r_direct_mode; | |
673 | int mds = -1; | |
674 | u32 hash = req->r_direct_hash; | |
675 | bool is_hash = req->r_direct_is_hash; | |
676 | ||
677 | /* | |
678 | * is there a specific mds we should try? ignore hint if we have | |
679 | * no session and the mds is not up (active or recovering). | |
680 | */ | |
681 | if (req->r_resend_mds >= 0 && | |
682 | (__have_session(mdsc, req->r_resend_mds) || | |
683 | ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { | |
684 | dout("choose_mds using resend_mds mds%d\n", | |
685 | req->r_resend_mds); | |
686 | return req->r_resend_mds; | |
687 | } | |
688 | ||
689 | if (mode == USE_RANDOM_MDS) | |
690 | goto random; | |
691 | ||
692 | inode = NULL; | |
693 | if (req->r_inode) { | |
694 | inode = req->r_inode; | |
695 | } else if (req->r_dentry) { | |
d79698da SW |
696 | /* ignore race with rename; old or new d_parent is okay */ |
697 | struct dentry *parent = req->r_dentry->d_parent; | |
698 | struct inode *dir = parent->d_inode; | |
eb6bb1c5 | 699 | |
3d14c5d2 | 700 | if (dir->i_sb != mdsc->fsc->sb) { |
eb6bb1c5 SW |
701 | /* not this fs! */ |
702 | inode = req->r_dentry->d_inode; | |
703 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { | |
704 | /* direct snapped/virtual snapdir requests | |
705 | * based on parent dir inode */ | |
d79698da | 706 | struct dentry *dn = get_nonsnap_parent(parent); |
eb6bb1c5 SW |
707 | inode = dn->d_inode; |
708 | dout("__choose_mds using nonsnap parent %p\n", inode); | |
709 | } else if (req->r_dentry->d_inode) { | |
710 | /* dentry target */ | |
2f2dc053 SW |
711 | inode = req->r_dentry->d_inode; |
712 | } else { | |
eb6bb1c5 SW |
713 | /* dir + name */ |
714 | inode = dir; | |
e5f86dc3 | 715 | hash = ceph_dentry_hash(dir, req->r_dentry); |
2f2dc053 SW |
716 | is_hash = true; |
717 | } | |
718 | } | |
eb6bb1c5 | 719 | |
2f2dc053 SW |
720 | dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, |
721 | (int)hash, mode); | |
722 | if (!inode) | |
723 | goto random; | |
724 | ci = ceph_inode(inode); | |
725 | ||
726 | if (is_hash && S_ISDIR(inode->i_mode)) { | |
727 | struct ceph_inode_frag frag; | |
728 | int found; | |
729 | ||
730 | ceph_choose_frag(ci, hash, &frag, &found); | |
731 | if (found) { | |
732 | if (mode == USE_ANY_MDS && frag.ndist > 0) { | |
733 | u8 r; | |
734 | ||
735 | /* choose a random replica */ | |
736 | get_random_bytes(&r, 1); | |
737 | r %= frag.ndist; | |
738 | mds = frag.dist[r]; | |
739 | dout("choose_mds %p %llx.%llx " | |
740 | "frag %u mds%d (%d/%d)\n", | |
741 | inode, ceph_vinop(inode), | |
d66bbd44 | 742 | frag.frag, mds, |
2f2dc053 | 743 | (int)r, frag.ndist); |
d66bbd44 SW |
744 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
745 | CEPH_MDS_STATE_ACTIVE) | |
746 | return mds; | |
2f2dc053 SW |
747 | } |
748 | ||
749 | /* since this file/dir wasn't known to be | |
750 | * replicated, then we want to look for the | |
751 | * authoritative mds. */ | |
752 | mode = USE_AUTH_MDS; | |
753 | if (frag.mds >= 0) { | |
754 | /* choose auth mds */ | |
755 | mds = frag.mds; | |
756 | dout("choose_mds %p %llx.%llx " | |
757 | "frag %u mds%d (auth)\n", | |
758 | inode, ceph_vinop(inode), frag.frag, mds); | |
d66bbd44 SW |
759 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
760 | CEPH_MDS_STATE_ACTIVE) | |
761 | return mds; | |
2f2dc053 SW |
762 | } |
763 | } | |
764 | } | |
765 | ||
be655596 | 766 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
767 | cap = NULL; |
768 | if (mode == USE_AUTH_MDS) | |
769 | cap = ci->i_auth_cap; | |
770 | if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) | |
771 | cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); | |
772 | if (!cap) { | |
be655596 | 773 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
774 | goto random; |
775 | } | |
776 | mds = cap->session->s_mds; | |
777 | dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", | |
778 | inode, ceph_vinop(inode), mds, | |
779 | cap == ci->i_auth_cap ? "auth " : "", cap); | |
be655596 | 780 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
781 | return mds; |
782 | ||
783 | random: | |
784 | mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); | |
785 | dout("choose_mds chose random mds%d\n", mds); | |
786 | return mds; | |
787 | } | |
788 | ||
789 | ||
790 | /* | |
791 | * session messages | |
792 | */ | |
793 | static struct ceph_msg *create_session_msg(u32 op, u64 seq) | |
794 | { | |
795 | struct ceph_msg *msg; | |
796 | struct ceph_mds_session_head *h; | |
797 | ||
b61c2763 SW |
798 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, |
799 | false); | |
a79832f2 | 800 | if (!msg) { |
2f2dc053 | 801 | pr_err("create_session_msg ENOMEM creating msg\n"); |
a79832f2 | 802 | return NULL; |
2f2dc053 SW |
803 | } |
804 | h = msg->front.iov_base; | |
805 | h->op = cpu_to_le32(op); | |
806 | h->seq = cpu_to_le64(seq); | |
807 | return msg; | |
808 | } | |
809 | ||
810 | /* | |
811 | * send session open request. | |
812 | * | |
813 | * called under mdsc->mutex | |
814 | */ | |
815 | static int __open_session(struct ceph_mds_client *mdsc, | |
816 | struct ceph_mds_session *session) | |
817 | { | |
818 | struct ceph_msg *msg; | |
819 | int mstate; | |
820 | int mds = session->s_mds; | |
2f2dc053 SW |
821 | |
822 | /* wait for mds to go active? */ | |
823 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); | |
824 | dout("open_session to mds%d (%s)\n", mds, | |
825 | ceph_mds_state_name(mstate)); | |
826 | session->s_state = CEPH_MDS_SESSION_OPENING; | |
827 | session->s_renew_requested = jiffies; | |
828 | ||
829 | /* send connect message */ | |
830 | msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq); | |
a79832f2 SW |
831 | if (!msg) |
832 | return -ENOMEM; | |
2f2dc053 | 833 | ceph_con_send(&session->s_con, msg); |
2f2dc053 SW |
834 | return 0; |
835 | } | |
836 | ||
ed0552a1 SW |
837 | /* |
838 | * open sessions for any export targets for the given mds | |
839 | * | |
840 | * called under mdsc->mutex | |
841 | */ | |
842 | static void __open_export_target_sessions(struct ceph_mds_client *mdsc, | |
843 | struct ceph_mds_session *session) | |
844 | { | |
845 | struct ceph_mds_info *mi; | |
846 | struct ceph_mds_session *ts; | |
847 | int i, mds = session->s_mds; | |
848 | int target; | |
849 | ||
850 | if (mds >= mdsc->mdsmap->m_max_mds) | |
851 | return; | |
852 | mi = &mdsc->mdsmap->m_info[mds]; | |
853 | dout("open_export_target_sessions for mds%d (%d targets)\n", | |
854 | session->s_mds, mi->num_export_targets); | |
855 | ||
856 | for (i = 0; i < mi->num_export_targets; i++) { | |
857 | target = mi->export_targets[i]; | |
858 | ts = __ceph_lookup_mds_session(mdsc, target); | |
859 | if (!ts) { | |
860 | ts = register_session(mdsc, target); | |
861 | if (IS_ERR(ts)) | |
862 | return; | |
863 | } | |
864 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
865 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
866 | __open_session(mdsc, session); | |
867 | else | |
868 | dout(" mds%d target mds%d %p is %s\n", session->s_mds, | |
869 | i, ts, session_state_name(ts->s_state)); | |
870 | ceph_put_mds_session(ts); | |
871 | } | |
872 | } | |
873 | ||
154f42c2 SW |
874 | void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, |
875 | struct ceph_mds_session *session) | |
876 | { | |
877 | mutex_lock(&mdsc->mutex); | |
878 | __open_export_target_sessions(mdsc, session); | |
879 | mutex_unlock(&mdsc->mutex); | |
880 | } | |
881 | ||
2f2dc053 SW |
882 | /* |
883 | * session caps | |
884 | */ | |
885 | ||
886 | /* | |
887 | * Free preallocated cap messages assigned to this session | |
888 | */ | |
889 | static void cleanup_cap_releases(struct ceph_mds_session *session) | |
890 | { | |
891 | struct ceph_msg *msg; | |
892 | ||
893 | spin_lock(&session->s_cap_lock); | |
894 | while (!list_empty(&session->s_cap_releases)) { | |
895 | msg = list_first_entry(&session->s_cap_releases, | |
896 | struct ceph_msg, list_head); | |
897 | list_del_init(&msg->list_head); | |
898 | ceph_msg_put(msg); | |
899 | } | |
900 | while (!list_empty(&session->s_cap_releases_done)) { | |
901 | msg = list_first_entry(&session->s_cap_releases_done, | |
902 | struct ceph_msg, list_head); | |
903 | list_del_init(&msg->list_head); | |
904 | ceph_msg_put(msg); | |
905 | } | |
906 | spin_unlock(&session->s_cap_lock); | |
907 | } | |
908 | ||
909 | /* | |
f818a736 SW |
910 | * Helper to safely iterate over all caps associated with a session, with |
911 | * special care taken to handle a racing __ceph_remove_cap(). | |
2f2dc053 | 912 | * |
f818a736 | 913 | * Caller must hold session s_mutex. |
2f2dc053 SW |
914 | */ |
915 | static int iterate_session_caps(struct ceph_mds_session *session, | |
916 | int (*cb)(struct inode *, struct ceph_cap *, | |
917 | void *), void *arg) | |
918 | { | |
7c1332b8 SW |
919 | struct list_head *p; |
920 | struct ceph_cap *cap; | |
921 | struct inode *inode, *last_inode = NULL; | |
922 | struct ceph_cap *old_cap = NULL; | |
2f2dc053 SW |
923 | int ret; |
924 | ||
925 | dout("iterate_session_caps %p mds%d\n", session, session->s_mds); | |
926 | spin_lock(&session->s_cap_lock); | |
7c1332b8 SW |
927 | p = session->s_caps.next; |
928 | while (p != &session->s_caps) { | |
929 | cap = list_entry(p, struct ceph_cap, session_caps); | |
2f2dc053 | 930 | inode = igrab(&cap->ci->vfs_inode); |
7c1332b8 SW |
931 | if (!inode) { |
932 | p = p->next; | |
2f2dc053 | 933 | continue; |
7c1332b8 SW |
934 | } |
935 | session->s_cap_iterator = cap; | |
2f2dc053 | 936 | spin_unlock(&session->s_cap_lock); |
7c1332b8 SW |
937 | |
938 | if (last_inode) { | |
939 | iput(last_inode); | |
940 | last_inode = NULL; | |
941 | } | |
942 | if (old_cap) { | |
37151668 | 943 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 SW |
944 | old_cap = NULL; |
945 | } | |
946 | ||
2f2dc053 | 947 | ret = cb(inode, cap, arg); |
7c1332b8 SW |
948 | last_inode = inode; |
949 | ||
2f2dc053 | 950 | spin_lock(&session->s_cap_lock); |
7c1332b8 SW |
951 | p = p->next; |
952 | if (cap->ci == NULL) { | |
953 | dout("iterate_session_caps finishing cap %p removal\n", | |
954 | cap); | |
955 | BUG_ON(cap->session != session); | |
956 | list_del_init(&cap->session_caps); | |
957 | session->s_nr_caps--; | |
958 | cap->session = NULL; | |
959 | old_cap = cap; /* put_cap it w/o locks held */ | |
960 | } | |
5dacf091 SW |
961 | if (ret < 0) |
962 | goto out; | |
2f2dc053 | 963 | } |
5dacf091 SW |
964 | ret = 0; |
965 | out: | |
7c1332b8 | 966 | session->s_cap_iterator = NULL; |
2f2dc053 | 967 | spin_unlock(&session->s_cap_lock); |
7c1332b8 SW |
968 | |
969 | if (last_inode) | |
970 | iput(last_inode); | |
971 | if (old_cap) | |
37151668 | 972 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 | 973 | |
5dacf091 | 974 | return ret; |
2f2dc053 SW |
975 | } |
976 | ||
977 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |
6c99f254 | 978 | void *arg) |
2f2dc053 SW |
979 | { |
980 | struct ceph_inode_info *ci = ceph_inode(inode); | |
6c99f254 SW |
981 | int drop = 0; |
982 | ||
2f2dc053 SW |
983 | dout("removing cap %p, ci is %p, inode is %p\n", |
984 | cap, ci, &ci->vfs_inode); | |
be655596 | 985 | spin_lock(&ci->i_ceph_lock); |
6c99f254 SW |
986 | __ceph_remove_cap(cap); |
987 | if (!__ceph_is_any_real_caps(ci)) { | |
988 | struct ceph_mds_client *mdsc = | |
3d14c5d2 | 989 | ceph_sb_to_client(inode->i_sb)->mdsc; |
6c99f254 SW |
990 | |
991 | spin_lock(&mdsc->cap_dirty_lock); | |
992 | if (!list_empty(&ci->i_dirty_item)) { | |
993 | pr_info(" dropping dirty %s state for %p %lld\n", | |
994 | ceph_cap_string(ci->i_dirty_caps), | |
995 | inode, ceph_ino(inode)); | |
996 | ci->i_dirty_caps = 0; | |
997 | list_del_init(&ci->i_dirty_item); | |
998 | drop = 1; | |
999 | } | |
1000 | if (!list_empty(&ci->i_flushing_item)) { | |
1001 | pr_info(" dropping dirty+flushing %s state for %p %lld\n", | |
1002 | ceph_cap_string(ci->i_flushing_caps), | |
1003 | inode, ceph_ino(inode)); | |
1004 | ci->i_flushing_caps = 0; | |
1005 | list_del_init(&ci->i_flushing_item); | |
1006 | mdsc->num_cap_flushing--; | |
1007 | drop = 1; | |
1008 | } | |
1009 | if (drop && ci->i_wrbuffer_ref) { | |
1010 | pr_info(" dropping dirty data for %p %lld\n", | |
1011 | inode, ceph_ino(inode)); | |
1012 | ci->i_wrbuffer_ref = 0; | |
1013 | ci->i_wrbuffer_ref_head = 0; | |
1014 | drop++; | |
1015 | } | |
1016 | spin_unlock(&mdsc->cap_dirty_lock); | |
1017 | } | |
be655596 | 1018 | spin_unlock(&ci->i_ceph_lock); |
6c99f254 SW |
1019 | while (drop--) |
1020 | iput(inode); | |
2f2dc053 SW |
1021 | return 0; |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * caller must hold session s_mutex | |
1026 | */ | |
1027 | static void remove_session_caps(struct ceph_mds_session *session) | |
1028 | { | |
1029 | dout("remove_session_caps on %p\n", session); | |
1030 | iterate_session_caps(session, remove_session_caps_cb, NULL); | |
1031 | BUG_ON(session->s_nr_caps > 0); | |
6c99f254 | 1032 | BUG_ON(!list_empty(&session->s_cap_flushing)); |
2f2dc053 SW |
1033 | cleanup_cap_releases(session); |
1034 | } | |
1035 | ||
1036 | /* | |
1037 | * wake up any threads waiting on this session's caps. if the cap is | |
1038 | * old (didn't get renewed on the client reconnect), remove it now. | |
1039 | * | |
1040 | * caller must hold s_mutex. | |
1041 | */ | |
1042 | static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, | |
1043 | void *arg) | |
1044 | { | |
0dc2570f SW |
1045 | struct ceph_inode_info *ci = ceph_inode(inode); |
1046 | ||
03066f23 | 1047 | wake_up_all(&ci->i_cap_wq); |
0dc2570f | 1048 | if (arg) { |
be655596 | 1049 | spin_lock(&ci->i_ceph_lock); |
0dc2570f SW |
1050 | ci->i_wanted_max_size = 0; |
1051 | ci->i_requested_max_size = 0; | |
be655596 | 1052 | spin_unlock(&ci->i_ceph_lock); |
0dc2570f | 1053 | } |
2f2dc053 SW |
1054 | return 0; |
1055 | } | |
1056 | ||
0dc2570f SW |
1057 | static void wake_up_session_caps(struct ceph_mds_session *session, |
1058 | int reconnect) | |
2f2dc053 SW |
1059 | { |
1060 | dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); | |
0dc2570f SW |
1061 | iterate_session_caps(session, wake_up_session_cb, |
1062 | (void *)(unsigned long)reconnect); | |
2f2dc053 SW |
1063 | } |
1064 | ||
1065 | /* | |
1066 | * Send periodic message to MDS renewing all currently held caps. The | |
1067 | * ack will reset the expiration for all caps from this session. | |
1068 | * | |
1069 | * caller holds s_mutex | |
1070 | */ | |
1071 | static int send_renew_caps(struct ceph_mds_client *mdsc, | |
1072 | struct ceph_mds_session *session) | |
1073 | { | |
1074 | struct ceph_msg *msg; | |
1075 | int state; | |
1076 | ||
1077 | if (time_after_eq(jiffies, session->s_cap_ttl) && | |
1078 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) | |
1079 | pr_info("mds%d caps stale\n", session->s_mds); | |
e4cb4cb8 | 1080 | session->s_renew_requested = jiffies; |
2f2dc053 SW |
1081 | |
1082 | /* do not try to renew caps until a recovering mds has reconnected | |
1083 | * with its clients. */ | |
1084 | state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); | |
1085 | if (state < CEPH_MDS_STATE_RECONNECT) { | |
1086 | dout("send_renew_caps ignoring mds%d (%s)\n", | |
1087 | session->s_mds, ceph_mds_state_name(state)); | |
1088 | return 0; | |
1089 | } | |
1090 | ||
1091 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, | |
1092 | ceph_mds_state_name(state)); | |
2f2dc053 SW |
1093 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
1094 | ++session->s_renew_seq); | |
a79832f2 SW |
1095 | if (!msg) |
1096 | return -ENOMEM; | |
2f2dc053 SW |
1097 | ceph_con_send(&session->s_con, msg); |
1098 | return 0; | |
1099 | } | |
1100 | ||
1101 | /* | |
1102 | * Note new cap ttl, and any transition from stale -> not stale (fresh?). | |
0dc2570f SW |
1103 | * |
1104 | * Called under session->s_mutex | |
2f2dc053 SW |
1105 | */ |
1106 | static void renewed_caps(struct ceph_mds_client *mdsc, | |
1107 | struct ceph_mds_session *session, int is_renew) | |
1108 | { | |
1109 | int was_stale; | |
1110 | int wake = 0; | |
1111 | ||
1112 | spin_lock(&session->s_cap_lock); | |
1ce208a6 | 1113 | was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); |
2f2dc053 SW |
1114 | |
1115 | session->s_cap_ttl = session->s_renew_requested + | |
1116 | mdsc->mdsmap->m_session_timeout*HZ; | |
1117 | ||
1118 | if (was_stale) { | |
1119 | if (time_before(jiffies, session->s_cap_ttl)) { | |
1120 | pr_info("mds%d caps renewed\n", session->s_mds); | |
1121 | wake = 1; | |
1122 | } else { | |
1123 | pr_info("mds%d caps still stale\n", session->s_mds); | |
1124 | } | |
1125 | } | |
1126 | dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", | |
1127 | session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", | |
1128 | time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); | |
1129 | spin_unlock(&session->s_cap_lock); | |
1130 | ||
1131 | if (wake) | |
0dc2570f | 1132 | wake_up_session_caps(session, 0); |
2f2dc053 SW |
1133 | } |
1134 | ||
1135 | /* | |
1136 | * send a session close request | |
1137 | */ | |
1138 | static int request_close_session(struct ceph_mds_client *mdsc, | |
1139 | struct ceph_mds_session *session) | |
1140 | { | |
1141 | struct ceph_msg *msg; | |
2f2dc053 SW |
1142 | |
1143 | dout("request_close_session mds%d state %s seq %lld\n", | |
1144 | session->s_mds, session_state_name(session->s_state), | |
1145 | session->s_seq); | |
1146 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); | |
a79832f2 SW |
1147 | if (!msg) |
1148 | return -ENOMEM; | |
1149 | ceph_con_send(&session->s_con, msg); | |
1150 | return 0; | |
2f2dc053 SW |
1151 | } |
1152 | ||
1153 | /* | |
1154 | * Called with s_mutex held. | |
1155 | */ | |
1156 | static int __close_session(struct ceph_mds_client *mdsc, | |
1157 | struct ceph_mds_session *session) | |
1158 | { | |
1159 | if (session->s_state >= CEPH_MDS_SESSION_CLOSING) | |
1160 | return 0; | |
1161 | session->s_state = CEPH_MDS_SESSION_CLOSING; | |
1162 | return request_close_session(mdsc, session); | |
1163 | } | |
1164 | ||
1165 | /* | |
1166 | * Trim old(er) caps. | |
1167 | * | |
1168 | * Because we can't cache an inode without one or more caps, we do | |
1169 | * this indirectly: if a cap is unused, we prune its aliases, at which | |
1170 | * point the inode will hopefully get dropped to. | |
1171 | * | |
1172 | * Yes, this is a bit sloppy. Our only real goal here is to respond to | |
1173 | * memory pressure from the MDS, though, so it needn't be perfect. | |
1174 | */ | |
1175 | static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) | |
1176 | { | |
1177 | struct ceph_mds_session *session = arg; | |
1178 | struct ceph_inode_info *ci = ceph_inode(inode); | |
1179 | int used, oissued, mine; | |
1180 | ||
1181 | if (session->s_trim_caps <= 0) | |
1182 | return -1; | |
1183 | ||
be655596 | 1184 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
1185 | mine = cap->issued | cap->implemented; |
1186 | used = __ceph_caps_used(ci); | |
1187 | oissued = __ceph_caps_issued_other(ci, cap); | |
1188 | ||
1189 | dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n", | |
1190 | inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), | |
1191 | ceph_cap_string(used)); | |
1192 | if (ci->i_dirty_caps) | |
1193 | goto out; /* dirty caps */ | |
1194 | if ((used & ~oissued) & mine) | |
1195 | goto out; /* we need these caps */ | |
1196 | ||
1197 | session->s_trim_caps--; | |
1198 | if (oissued) { | |
1199 | /* we aren't the only cap.. just remove us */ | |
d40ee0dc YZ |
1200 | __queue_cap_release(session, ceph_ino(inode), cap->cap_id, |
1201 | cap->mseq, cap->issue_seq); | |
7c1332b8 | 1202 | __ceph_remove_cap(cap); |
2f2dc053 SW |
1203 | } else { |
1204 | /* try to drop referring dentries */ | |
be655596 | 1205 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1206 | d_prune_aliases(inode); |
1207 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | |
1208 | inode, cap, atomic_read(&inode->i_count)); | |
1209 | return 0; | |
1210 | } | |
1211 | ||
1212 | out: | |
be655596 | 1213 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1214 | return 0; |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * Trim session cap count down to some max number. | |
1219 | */ | |
1220 | static int trim_caps(struct ceph_mds_client *mdsc, | |
1221 | struct ceph_mds_session *session, | |
1222 | int max_caps) | |
1223 | { | |
1224 | int trim_caps = session->s_nr_caps - max_caps; | |
1225 | ||
1226 | dout("trim_caps mds%d start: %d / %d, trim %d\n", | |
1227 | session->s_mds, session->s_nr_caps, max_caps, trim_caps); | |
1228 | if (trim_caps > 0) { | |
1229 | session->s_trim_caps = trim_caps; | |
1230 | iterate_session_caps(session, trim_caps_cb, session); | |
1231 | dout("trim_caps mds%d done: %d / %d, trimmed %d\n", | |
1232 | session->s_mds, session->s_nr_caps, max_caps, | |
1233 | trim_caps - session->s_trim_caps); | |
5dacf091 | 1234 | session->s_trim_caps = 0; |
2f2dc053 SW |
1235 | } |
1236 | return 0; | |
1237 | } | |
1238 | ||
1239 | /* | |
1240 | * Allocate cap_release messages. If there is a partially full message | |
1241 | * in the queue, try to allocate enough to cover it's remainder, so that | |
1242 | * we can send it immediately. | |
1243 | * | |
1244 | * Called under s_mutex. | |
1245 | */ | |
2b2300d6 | 1246 | int ceph_add_cap_releases(struct ceph_mds_client *mdsc, |
ee6b272b | 1247 | struct ceph_mds_session *session) |
2f2dc053 | 1248 | { |
38e8883e | 1249 | struct ceph_msg *msg, *partial = NULL; |
2f2dc053 SW |
1250 | struct ceph_mds_cap_release *head; |
1251 | int err = -ENOMEM; | |
3d14c5d2 | 1252 | int extra = mdsc->fsc->mount_options->cap_release_safety; |
38e8883e | 1253 | int num; |
2f2dc053 | 1254 | |
38e8883e SW |
1255 | dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds, |
1256 | extra); | |
2f2dc053 SW |
1257 | |
1258 | spin_lock(&session->s_cap_lock); | |
1259 | ||
1260 | if (!list_empty(&session->s_cap_releases)) { | |
1261 | msg = list_first_entry(&session->s_cap_releases, | |
1262 | struct ceph_msg, | |
1263 | list_head); | |
1264 | head = msg->front.iov_base; | |
38e8883e SW |
1265 | num = le32_to_cpu(head->num); |
1266 | if (num) { | |
1267 | dout(" partial %p with (%d/%d)\n", msg, num, | |
1268 | (int)CEPH_CAPS_PER_RELEASE); | |
1269 | extra += CEPH_CAPS_PER_RELEASE - num; | |
1270 | partial = msg; | |
1271 | } | |
2f2dc053 | 1272 | } |
2f2dc053 SW |
1273 | while (session->s_num_cap_releases < session->s_nr_caps + extra) { |
1274 | spin_unlock(&session->s_cap_lock); | |
34d23762 | 1275 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, |
b61c2763 | 1276 | GFP_NOFS, false); |
2f2dc053 SW |
1277 | if (!msg) |
1278 | goto out_unlocked; | |
1279 | dout("add_cap_releases %p msg %p now %d\n", session, msg, | |
1280 | (int)msg->front.iov_len); | |
1281 | head = msg->front.iov_base; | |
1282 | head->num = cpu_to_le32(0); | |
1283 | msg->front.iov_len = sizeof(*head); | |
1284 | spin_lock(&session->s_cap_lock); | |
1285 | list_add(&msg->list_head, &session->s_cap_releases); | |
1286 | session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE; | |
1287 | } | |
1288 | ||
38e8883e SW |
1289 | if (partial) { |
1290 | head = partial->front.iov_base; | |
1291 | num = le32_to_cpu(head->num); | |
1292 | dout(" queueing partial %p with %d/%d\n", partial, num, | |
1293 | (int)CEPH_CAPS_PER_RELEASE); | |
1294 | list_move_tail(&partial->list_head, | |
1295 | &session->s_cap_releases_done); | |
1296 | session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num; | |
2f2dc053 SW |
1297 | } |
1298 | err = 0; | |
1299 | spin_unlock(&session->s_cap_lock); | |
1300 | out_unlocked: | |
1301 | return err; | |
1302 | } | |
1303 | ||
1304 | /* | |
1305 | * flush all dirty inode data to disk. | |
1306 | * | |
1307 | * returns true if we've flushed through want_flush_seq | |
1308 | */ | |
1309 | static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) | |
1310 | { | |
1311 | int mds, ret = 1; | |
1312 | ||
1313 | dout("check_cap_flush want %lld\n", want_flush_seq); | |
1314 | mutex_lock(&mdsc->mutex); | |
1315 | for (mds = 0; ret && mds < mdsc->max_sessions; mds++) { | |
1316 | struct ceph_mds_session *session = mdsc->sessions[mds]; | |
1317 | ||
1318 | if (!session) | |
1319 | continue; | |
1320 | get_session(session); | |
1321 | mutex_unlock(&mdsc->mutex); | |
1322 | ||
1323 | mutex_lock(&session->s_mutex); | |
1324 | if (!list_empty(&session->s_cap_flushing)) { | |
1325 | struct ceph_inode_info *ci = | |
1326 | list_entry(session->s_cap_flushing.next, | |
1327 | struct ceph_inode_info, | |
1328 | i_flushing_item); | |
1329 | struct inode *inode = &ci->vfs_inode; | |
1330 | ||
be655596 | 1331 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
1332 | if (ci->i_cap_flush_seq <= want_flush_seq) { |
1333 | dout("check_cap_flush still flushing %p " | |
1334 | "seq %lld <= %lld to mds%d\n", inode, | |
1335 | ci->i_cap_flush_seq, want_flush_seq, | |
1336 | session->s_mds); | |
1337 | ret = 0; | |
1338 | } | |
be655596 | 1339 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1340 | } |
1341 | mutex_unlock(&session->s_mutex); | |
1342 | ceph_put_mds_session(session); | |
1343 | ||
1344 | if (!ret) | |
1345 | return ret; | |
1346 | mutex_lock(&mdsc->mutex); | |
1347 | } | |
1348 | ||
1349 | mutex_unlock(&mdsc->mutex); | |
1350 | dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); | |
1351 | return ret; | |
1352 | } | |
1353 | ||
1354 | /* | |
1355 | * called under s_mutex | |
1356 | */ | |
3d7ded4d SW |
1357 | void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
1358 | struct ceph_mds_session *session) | |
2f2dc053 SW |
1359 | { |
1360 | struct ceph_msg *msg; | |
1361 | ||
1362 | dout("send_cap_releases mds%d\n", session->s_mds); | |
0f8605f2 SW |
1363 | spin_lock(&session->s_cap_lock); |
1364 | while (!list_empty(&session->s_cap_releases_done)) { | |
2f2dc053 SW |
1365 | msg = list_first_entry(&session->s_cap_releases_done, |
1366 | struct ceph_msg, list_head); | |
1367 | list_del_init(&msg->list_head); | |
1368 | spin_unlock(&session->s_cap_lock); | |
1369 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
1370 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1371 | ceph_con_send(&session->s_con, msg); | |
0f8605f2 | 1372 | spin_lock(&session->s_cap_lock); |
2f2dc053 SW |
1373 | } |
1374 | spin_unlock(&session->s_cap_lock); | |
1375 | } | |
1376 | ||
e01a5946 SW |
1377 | static void discard_cap_releases(struct ceph_mds_client *mdsc, |
1378 | struct ceph_mds_session *session) | |
1379 | { | |
1380 | struct ceph_msg *msg; | |
1381 | struct ceph_mds_cap_release *head; | |
1382 | unsigned num; | |
1383 | ||
1384 | dout("discard_cap_releases mds%d\n", session->s_mds); | |
1385 | spin_lock(&session->s_cap_lock); | |
1386 | ||
1387 | /* zero out the in-progress message */ | |
1388 | msg = list_first_entry(&session->s_cap_releases, | |
1389 | struct ceph_msg, list_head); | |
1390 | head = msg->front.iov_base; | |
1391 | num = le32_to_cpu(head->num); | |
1392 | dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num); | |
1393 | head->num = cpu_to_le32(0); | |
1394 | session->s_num_cap_releases += num; | |
1395 | ||
1396 | /* requeue completed messages */ | |
1397 | while (!list_empty(&session->s_cap_releases_done)) { | |
1398 | msg = list_first_entry(&session->s_cap_releases_done, | |
1399 | struct ceph_msg, list_head); | |
1400 | list_del_init(&msg->list_head); | |
1401 | ||
1402 | head = msg->front.iov_base; | |
1403 | num = le32_to_cpu(head->num); | |
1404 | dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, | |
1405 | num); | |
1406 | session->s_num_cap_releases += num; | |
1407 | head->num = cpu_to_le32(0); | |
1408 | msg->front.iov_len = sizeof(*head); | |
1409 | list_add(&msg->list_head, &session->s_cap_releases); | |
1410 | } | |
1411 | ||
1412 | spin_unlock(&session->s_cap_lock); | |
1413 | } | |
1414 | ||
2f2dc053 SW |
1415 | /* |
1416 | * requests | |
1417 | */ | |
1418 | ||
1419 | /* | |
1420 | * Create an mds request. | |
1421 | */ | |
1422 | struct ceph_mds_request * | |
1423 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |
1424 | { | |
1425 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); | |
1426 | ||
1427 | if (!req) | |
1428 | return ERR_PTR(-ENOMEM); | |
1429 | ||
b4556396 | 1430 | mutex_init(&req->r_fill_mutex); |
37151668 | 1431 | req->r_mdsc = mdsc; |
2f2dc053 SW |
1432 | req->r_started = jiffies; |
1433 | req->r_resend_mds = -1; | |
1434 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | |
1435 | req->r_fmode = -1; | |
153c8e6b | 1436 | kref_init(&req->r_kref); |
2f2dc053 SW |
1437 | INIT_LIST_HEAD(&req->r_wait); |
1438 | init_completion(&req->r_completion); | |
1439 | init_completion(&req->r_safe_completion); | |
1440 | INIT_LIST_HEAD(&req->r_unsafe_item); | |
1441 | ||
1442 | req->r_op = op; | |
1443 | req->r_direct_mode = mode; | |
1444 | return req; | |
1445 | } | |
1446 | ||
1447 | /* | |
44ca18f2 | 1448 | * return oldest (lowest) request, tid in request tree, 0 if none. |
2f2dc053 SW |
1449 | * |
1450 | * called under mdsc->mutex. | |
1451 | */ | |
44ca18f2 SW |
1452 | static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) |
1453 | { | |
1454 | if (RB_EMPTY_ROOT(&mdsc->request_tree)) | |
1455 | return NULL; | |
1456 | return rb_entry(rb_first(&mdsc->request_tree), | |
1457 | struct ceph_mds_request, r_node); | |
1458 | } | |
1459 | ||
2f2dc053 SW |
1460 | static u64 __get_oldest_tid(struct ceph_mds_client *mdsc) |
1461 | { | |
44ca18f2 SW |
1462 | struct ceph_mds_request *req = __get_oldest_req(mdsc); |
1463 | ||
1464 | if (req) | |
1465 | return req->r_tid; | |
1466 | return 0; | |
2f2dc053 SW |
1467 | } |
1468 | ||
1469 | /* | |
1470 | * Build a dentry's path. Allocate on heap; caller must kfree. Based | |
1471 | * on build_path_from_dentry in fs/cifs/dir.c. | |
1472 | * | |
1473 | * If @stop_on_nosnap, generate path relative to the first non-snapped | |
1474 | * inode. | |
1475 | * | |
1476 | * Encode hidden .snap dirs as a double /, i.e. | |
1477 | * foo/.snap/bar -> foo//bar | |
1478 | */ | |
1479 | char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, | |
1480 | int stop_on_nosnap) | |
1481 | { | |
1482 | struct dentry *temp; | |
1483 | char *path; | |
1484 | int len, pos; | |
1b71fe2e | 1485 | unsigned seq; |
2f2dc053 SW |
1486 | |
1487 | if (dentry == NULL) | |
1488 | return ERR_PTR(-EINVAL); | |
1489 | ||
1490 | retry: | |
1491 | len = 0; | |
1b71fe2e AV |
1492 | seq = read_seqbegin(&rename_lock); |
1493 | rcu_read_lock(); | |
2f2dc053 SW |
1494 | for (temp = dentry; !IS_ROOT(temp);) { |
1495 | struct inode *inode = temp->d_inode; | |
1496 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) | |
1497 | len++; /* slash only */ | |
1498 | else if (stop_on_nosnap && inode && | |
1499 | ceph_snap(inode) == CEPH_NOSNAP) | |
1500 | break; | |
1501 | else | |
1502 | len += 1 + temp->d_name.len; | |
1503 | temp = temp->d_parent; | |
2f2dc053 | 1504 | } |
1b71fe2e | 1505 | rcu_read_unlock(); |
2f2dc053 SW |
1506 | if (len) |
1507 | len--; /* no leading '/' */ | |
1508 | ||
1509 | path = kmalloc(len+1, GFP_NOFS); | |
1510 | if (path == NULL) | |
1511 | return ERR_PTR(-ENOMEM); | |
1512 | pos = len; | |
1513 | path[pos] = 0; /* trailing null */ | |
1b71fe2e | 1514 | rcu_read_lock(); |
2f2dc053 | 1515 | for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { |
1b71fe2e | 1516 | struct inode *inode; |
2f2dc053 | 1517 | |
1b71fe2e AV |
1518 | spin_lock(&temp->d_lock); |
1519 | inode = temp->d_inode; | |
2f2dc053 | 1520 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
104648ad | 1521 | dout("build_path path+%d: %p SNAPDIR\n", |
2f2dc053 SW |
1522 | pos, temp); |
1523 | } else if (stop_on_nosnap && inode && | |
1524 | ceph_snap(inode) == CEPH_NOSNAP) { | |
9d5a09e6 | 1525 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1526 | break; |
1527 | } else { | |
1528 | pos -= temp->d_name.len; | |
1b71fe2e AV |
1529 | if (pos < 0) { |
1530 | spin_unlock(&temp->d_lock); | |
2f2dc053 | 1531 | break; |
1b71fe2e | 1532 | } |
2f2dc053 SW |
1533 | strncpy(path + pos, temp->d_name.name, |
1534 | temp->d_name.len); | |
2f2dc053 | 1535 | } |
1b71fe2e | 1536 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1537 | if (pos) |
1538 | path[--pos] = '/'; | |
1539 | temp = temp->d_parent; | |
2f2dc053 | 1540 | } |
1b71fe2e AV |
1541 | rcu_read_unlock(); |
1542 | if (pos != 0 || read_seqretry(&rename_lock, seq)) { | |
104648ad | 1543 | pr_err("build_path did not end path lookup where " |
2f2dc053 SW |
1544 | "expected, namelen is %d, pos is %d\n", len, pos); |
1545 | /* presumably this is only possible if racing with a | |
1546 | rename of one of the parent directories (we can not | |
1547 | lock the dentries above us to prevent this, but | |
1548 | retrying should be harmless) */ | |
1549 | kfree(path); | |
1550 | goto retry; | |
1551 | } | |
1552 | ||
1553 | *base = ceph_ino(temp->d_inode); | |
1554 | *plen = len; | |
104648ad | 1555 | dout("build_path on %p %d built %llx '%.*s'\n", |
b7ab39f6 | 1556 | dentry, dentry->d_count, *base, len, path); |
2f2dc053 SW |
1557 | return path; |
1558 | } | |
1559 | ||
1560 | static int build_dentry_path(struct dentry *dentry, | |
1561 | const char **ppath, int *ppathlen, u64 *pino, | |
1562 | int *pfreepath) | |
1563 | { | |
1564 | char *path; | |
1565 | ||
1566 | if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) { | |
1567 | *pino = ceph_ino(dentry->d_parent->d_inode); | |
1568 | *ppath = dentry->d_name.name; | |
1569 | *ppathlen = dentry->d_name.len; | |
1570 | return 0; | |
1571 | } | |
1572 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1573 | if (IS_ERR(path)) | |
1574 | return PTR_ERR(path); | |
1575 | *ppath = path; | |
1576 | *pfreepath = 1; | |
1577 | return 0; | |
1578 | } | |
1579 | ||
1580 | static int build_inode_path(struct inode *inode, | |
1581 | const char **ppath, int *ppathlen, u64 *pino, | |
1582 | int *pfreepath) | |
1583 | { | |
1584 | struct dentry *dentry; | |
1585 | char *path; | |
1586 | ||
1587 | if (ceph_snap(inode) == CEPH_NOSNAP) { | |
1588 | *pino = ceph_ino(inode); | |
1589 | *ppathlen = 0; | |
1590 | return 0; | |
1591 | } | |
1592 | dentry = d_find_alias(inode); | |
1593 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1594 | dput(dentry); | |
1595 | if (IS_ERR(path)) | |
1596 | return PTR_ERR(path); | |
1597 | *ppath = path; | |
1598 | *pfreepath = 1; | |
1599 | return 0; | |
1600 | } | |
1601 | ||
1602 | /* | |
1603 | * request arguments may be specified via an inode *, a dentry *, or | |
1604 | * an explicit ino+path. | |
1605 | */ | |
1606 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | |
1607 | const char *rpath, u64 rino, | |
1608 | const char **ppath, int *pathlen, | |
1609 | u64 *ino, int *freepath) | |
1610 | { | |
1611 | int r = 0; | |
1612 | ||
1613 | if (rinode) { | |
1614 | r = build_inode_path(rinode, ppath, pathlen, ino, freepath); | |
1615 | dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), | |
1616 | ceph_snap(rinode)); | |
1617 | } else if (rdentry) { | |
1618 | r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); | |
1619 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, | |
1620 | *ppath); | |
795858db | 1621 | } else if (rpath || rino) { |
2f2dc053 SW |
1622 | *ino = rino; |
1623 | *ppath = rpath; | |
b000056a | 1624 | *pathlen = rpath ? strlen(rpath) : 0; |
2f2dc053 SW |
1625 | dout(" path %.*s\n", *pathlen, rpath); |
1626 | } | |
1627 | ||
1628 | return r; | |
1629 | } | |
1630 | ||
1631 | /* | |
1632 | * called under mdsc->mutex | |
1633 | */ | |
1634 | static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |
1635 | struct ceph_mds_request *req, | |
1636 | int mds) | |
1637 | { | |
1638 | struct ceph_msg *msg; | |
1639 | struct ceph_mds_request_head *head; | |
1640 | const char *path1 = NULL; | |
1641 | const char *path2 = NULL; | |
1642 | u64 ino1 = 0, ino2 = 0; | |
1643 | int pathlen1 = 0, pathlen2 = 0; | |
1644 | int freepath1 = 0, freepath2 = 0; | |
1645 | int len; | |
1646 | u16 releases; | |
1647 | void *p, *end; | |
1648 | int ret; | |
1649 | ||
1650 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | |
1651 | req->r_path1, req->r_ino1.ino, | |
1652 | &path1, &pathlen1, &ino1, &freepath1); | |
1653 | if (ret < 0) { | |
1654 | msg = ERR_PTR(ret); | |
1655 | goto out; | |
1656 | } | |
1657 | ||
1658 | ret = set_request_path_attr(NULL, req->r_old_dentry, | |
1659 | req->r_path2, req->r_ino2.ino, | |
1660 | &path2, &pathlen2, &ino2, &freepath2); | |
1661 | if (ret < 0) { | |
1662 | msg = ERR_PTR(ret); | |
1663 | goto out_free1; | |
1664 | } | |
1665 | ||
1666 | len = sizeof(*head) + | |
ac8839d7 | 1667 | pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)); |
2f2dc053 SW |
1668 | |
1669 | /* calculate (max) length for cap releases */ | |
1670 | len += sizeof(struct ceph_mds_request_release) * | |
1671 | (!!req->r_inode_drop + !!req->r_dentry_drop + | |
1672 | !!req->r_old_inode_drop + !!req->r_old_dentry_drop); | |
1673 | if (req->r_dentry_drop) | |
1674 | len += req->r_dentry->d_name.len; | |
1675 | if (req->r_old_dentry_drop) | |
1676 | len += req->r_old_dentry->d_name.len; | |
1677 | ||
b61c2763 | 1678 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); |
a79832f2 SW |
1679 | if (!msg) { |
1680 | msg = ERR_PTR(-ENOMEM); | |
2f2dc053 | 1681 | goto out_free2; |
a79832f2 | 1682 | } |
2f2dc053 | 1683 | |
6df058c0 SW |
1684 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
1685 | ||
2f2dc053 SW |
1686 | head = msg->front.iov_base; |
1687 | p = msg->front.iov_base + sizeof(*head); | |
1688 | end = msg->front.iov_base + msg->front.iov_len; | |
1689 | ||
1690 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); | |
1691 | head->op = cpu_to_le32(req->r_op); | |
ff3d0046 EB |
1692 | head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); |
1693 | head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); | |
2f2dc053 SW |
1694 | head->args = req->r_args; |
1695 | ||
1696 | ceph_encode_filepath(&p, end, ino1, path1); | |
1697 | ceph_encode_filepath(&p, end, ino2, path2); | |
1698 | ||
e979cf50 SW |
1699 | /* make note of release offset, in case we need to replay */ |
1700 | req->r_request_release_offset = p - msg->front.iov_base; | |
1701 | ||
2f2dc053 SW |
1702 | /* cap releases */ |
1703 | releases = 0; | |
1704 | if (req->r_inode_drop) | |
1705 | releases += ceph_encode_inode_release(&p, | |
1706 | req->r_inode ? req->r_inode : req->r_dentry->d_inode, | |
1707 | mds, req->r_inode_drop, req->r_inode_unless, 0); | |
1708 | if (req->r_dentry_drop) | |
1709 | releases += ceph_encode_dentry_release(&p, req->r_dentry, | |
1710 | mds, req->r_dentry_drop, req->r_dentry_unless); | |
1711 | if (req->r_old_dentry_drop) | |
1712 | releases += ceph_encode_dentry_release(&p, req->r_old_dentry, | |
1713 | mds, req->r_old_dentry_drop, req->r_old_dentry_unless); | |
1714 | if (req->r_old_inode_drop) | |
1715 | releases += ceph_encode_inode_release(&p, | |
1716 | req->r_old_dentry->d_inode, | |
1717 | mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); | |
1718 | head->num_releases = cpu_to_le16(releases); | |
1719 | ||
1720 | BUG_ON(p > end); | |
1721 | msg->front.iov_len = p - msg->front.iov_base; | |
1722 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
1723 | ||
ebf18f47 AE |
1724 | if (req->r_data_len) { |
1725 | /* outbound data set only by ceph_sync_setxattr() */ | |
1726 | BUG_ON(!req->r_pages); | |
90af3602 | 1727 | ceph_msg_data_add_pages(msg, req->r_pages, req->r_data_len, 0); |
ebf18f47 | 1728 | } |
02afca6c | 1729 | |
2f2dc053 SW |
1730 | msg->hdr.data_len = cpu_to_le32(req->r_data_len); |
1731 | msg->hdr.data_off = cpu_to_le16(0); | |
1732 | ||
1733 | out_free2: | |
1734 | if (freepath2) | |
1735 | kfree((char *)path2); | |
1736 | out_free1: | |
1737 | if (freepath1) | |
1738 | kfree((char *)path1); | |
1739 | out: | |
1740 | return msg; | |
1741 | } | |
1742 | ||
1743 | /* | |
1744 | * called under mdsc->mutex if error, under no mutex if | |
1745 | * success. | |
1746 | */ | |
1747 | static void complete_request(struct ceph_mds_client *mdsc, | |
1748 | struct ceph_mds_request *req) | |
1749 | { | |
1750 | if (req->r_callback) | |
1751 | req->r_callback(mdsc, req); | |
1752 | else | |
03066f23 | 1753 | complete_all(&req->r_completion); |
2f2dc053 SW |
1754 | } |
1755 | ||
1756 | /* | |
1757 | * called under mdsc->mutex | |
1758 | */ | |
1759 | static int __prepare_send_request(struct ceph_mds_client *mdsc, | |
1760 | struct ceph_mds_request *req, | |
1761 | int mds) | |
1762 | { | |
1763 | struct ceph_mds_request_head *rhead; | |
1764 | struct ceph_msg *msg; | |
1765 | int flags = 0; | |
1766 | ||
2f2dc053 | 1767 | req->r_attempts++; |
e55b71f8 GF |
1768 | if (req->r_inode) { |
1769 | struct ceph_cap *cap = | |
1770 | ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); | |
1771 | ||
1772 | if (cap) | |
1773 | req->r_sent_on_mseq = cap->mseq; | |
1774 | else | |
1775 | req->r_sent_on_mseq = -1; | |
1776 | } | |
2f2dc053 SW |
1777 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
1778 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | |
1779 | ||
01a92f17 SW |
1780 | if (req->r_got_unsafe) { |
1781 | /* | |
1782 | * Replay. Do not regenerate message (and rebuild | |
1783 | * paths, etc.); just use the original message. | |
1784 | * Rebuilding paths will break for renames because | |
1785 | * d_move mangles the src name. | |
1786 | */ | |
1787 | msg = req->r_request; | |
1788 | rhead = msg->front.iov_base; | |
1789 | ||
1790 | flags = le32_to_cpu(rhead->flags); | |
1791 | flags |= CEPH_MDS_FLAG_REPLAY; | |
1792 | rhead->flags = cpu_to_le32(flags); | |
1793 | ||
1794 | if (req->r_target_inode) | |
1795 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | |
1796 | ||
1797 | rhead->num_retry = req->r_attempts - 1; | |
e979cf50 SW |
1798 | |
1799 | /* remove cap/dentry releases from message */ | |
1800 | rhead->num_releases = 0; | |
1801 | msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset); | |
1802 | msg->front.iov_len = req->r_request_release_offset; | |
01a92f17 SW |
1803 | return 0; |
1804 | } | |
1805 | ||
2f2dc053 SW |
1806 | if (req->r_request) { |
1807 | ceph_msg_put(req->r_request); | |
1808 | req->r_request = NULL; | |
1809 | } | |
1810 | msg = create_request_message(mdsc, req, mds); | |
1811 | if (IS_ERR(msg)) { | |
e1518c7c | 1812 | req->r_err = PTR_ERR(msg); |
2f2dc053 | 1813 | complete_request(mdsc, req); |
a79832f2 | 1814 | return PTR_ERR(msg); |
2f2dc053 SW |
1815 | } |
1816 | req->r_request = msg; | |
1817 | ||
1818 | rhead = msg->front.iov_base; | |
2f2dc053 SW |
1819 | rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); |
1820 | if (req->r_got_unsafe) | |
1821 | flags |= CEPH_MDS_FLAG_REPLAY; | |
1822 | if (req->r_locked_dir) | |
1823 | flags |= CEPH_MDS_FLAG_WANT_DENTRY; | |
1824 | rhead->flags = cpu_to_le32(flags); | |
1825 | rhead->num_fwd = req->r_num_fwd; | |
1826 | rhead->num_retry = req->r_attempts - 1; | |
01a92f17 | 1827 | rhead->ino = 0; |
2f2dc053 SW |
1828 | |
1829 | dout(" r_locked_dir = %p\n", req->r_locked_dir); | |
2f2dc053 SW |
1830 | return 0; |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * send request, or put it on the appropriate wait list. | |
1835 | */ | |
1836 | static int __do_request(struct ceph_mds_client *mdsc, | |
1837 | struct ceph_mds_request *req) | |
1838 | { | |
1839 | struct ceph_mds_session *session = NULL; | |
1840 | int mds = -1; | |
1841 | int err = -EAGAIN; | |
1842 | ||
e1518c7c | 1843 | if (req->r_err || req->r_got_result) |
2f2dc053 SW |
1844 | goto out; |
1845 | ||
1846 | if (req->r_timeout && | |
1847 | time_after_eq(jiffies, req->r_started + req->r_timeout)) { | |
1848 | dout("do_request timed out\n"); | |
1849 | err = -EIO; | |
1850 | goto finish; | |
1851 | } | |
1852 | ||
dc69e2e9 SW |
1853 | put_request_session(req); |
1854 | ||
2f2dc053 SW |
1855 | mds = __choose_mds(mdsc, req); |
1856 | if (mds < 0 || | |
1857 | ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { | |
1858 | dout("do_request no mds or not active, waiting for map\n"); | |
1859 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
1860 | goto out; | |
1861 | } | |
1862 | ||
1863 | /* get, open session */ | |
1864 | session = __ceph_lookup_mds_session(mdsc, mds); | |
9c423956 | 1865 | if (!session) { |
2f2dc053 | 1866 | session = register_session(mdsc, mds); |
9c423956 SW |
1867 | if (IS_ERR(session)) { |
1868 | err = PTR_ERR(session); | |
1869 | goto finish; | |
1870 | } | |
1871 | } | |
dc69e2e9 SW |
1872 | req->r_session = get_session(session); |
1873 | ||
2f2dc053 SW |
1874 | dout("do_request mds%d session %p state %s\n", mds, session, |
1875 | session_state_name(session->s_state)); | |
1876 | if (session->s_state != CEPH_MDS_SESSION_OPEN && | |
1877 | session->s_state != CEPH_MDS_SESSION_HUNG) { | |
1878 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
1879 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
1880 | __open_session(mdsc, session); | |
1881 | list_add(&req->r_wait, &session->s_waiting); | |
1882 | goto out_session; | |
1883 | } | |
1884 | ||
1885 | /* send request */ | |
2f2dc053 SW |
1886 | req->r_resend_mds = -1; /* forget any previous mds hint */ |
1887 | ||
1888 | if (req->r_request_started == 0) /* note request start time */ | |
1889 | req->r_request_started = jiffies; | |
1890 | ||
1891 | err = __prepare_send_request(mdsc, req, mds); | |
1892 | if (!err) { | |
1893 | ceph_msg_get(req->r_request); | |
1894 | ceph_con_send(&session->s_con, req->r_request); | |
1895 | } | |
1896 | ||
1897 | out_session: | |
1898 | ceph_put_mds_session(session); | |
1899 | out: | |
1900 | return err; | |
1901 | ||
1902 | finish: | |
e1518c7c | 1903 | req->r_err = err; |
2f2dc053 SW |
1904 | complete_request(mdsc, req); |
1905 | goto out; | |
1906 | } | |
1907 | ||
1908 | /* | |
1909 | * called under mdsc->mutex | |
1910 | */ | |
1911 | static void __wake_requests(struct ceph_mds_client *mdsc, | |
1912 | struct list_head *head) | |
1913 | { | |
ed75ec2c YZ |
1914 | struct ceph_mds_request *req; |
1915 | LIST_HEAD(tmp_list); | |
1916 | ||
1917 | list_splice_init(head, &tmp_list); | |
2f2dc053 | 1918 | |
ed75ec2c YZ |
1919 | while (!list_empty(&tmp_list)) { |
1920 | req = list_entry(tmp_list.next, | |
1921 | struct ceph_mds_request, r_wait); | |
2f2dc053 | 1922 | list_del_init(&req->r_wait); |
7971bd92 | 1923 | dout(" wake request %p tid %llu\n", req, req->r_tid); |
2f2dc053 SW |
1924 | __do_request(mdsc, req); |
1925 | } | |
1926 | } | |
1927 | ||
1928 | /* | |
1929 | * Wake up threads with requests pending for @mds, so that they can | |
29790f26 | 1930 | * resubmit their requests to a possibly different mds. |
2f2dc053 | 1931 | */ |
29790f26 | 1932 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2f2dc053 | 1933 | { |
44ca18f2 SW |
1934 | struct ceph_mds_request *req; |
1935 | struct rb_node *p; | |
2f2dc053 SW |
1936 | |
1937 | dout("kick_requests mds%d\n", mds); | |
44ca18f2 SW |
1938 | for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) { |
1939 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
1940 | if (req->r_got_unsafe) | |
1941 | continue; | |
1942 | if (req->r_session && | |
1943 | req->r_session->s_mds == mds) { | |
1944 | dout(" kicking tid %llu\n", req->r_tid); | |
44ca18f2 | 1945 | __do_request(mdsc, req); |
2f2dc053 SW |
1946 | } |
1947 | } | |
1948 | } | |
1949 | ||
1950 | void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, | |
1951 | struct ceph_mds_request *req) | |
1952 | { | |
1953 | dout("submit_request on %p\n", req); | |
1954 | mutex_lock(&mdsc->mutex); | |
1955 | __register_request(mdsc, req, NULL); | |
1956 | __do_request(mdsc, req); | |
1957 | mutex_unlock(&mdsc->mutex); | |
1958 | } | |
1959 | ||
1960 | /* | |
1961 | * Synchrously perform an mds request. Take care of all of the | |
1962 | * session setup, forwarding, retry details. | |
1963 | */ | |
1964 | int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, | |
1965 | struct inode *dir, | |
1966 | struct ceph_mds_request *req) | |
1967 | { | |
1968 | int err; | |
1969 | ||
1970 | dout("do_request on %p\n", req); | |
1971 | ||
1972 | /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ | |
1973 | if (req->r_inode) | |
1974 | ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); | |
1975 | if (req->r_locked_dir) | |
1976 | ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); | |
1977 | if (req->r_old_dentry) | |
41b02e1f SW |
1978 | ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), |
1979 | CEPH_CAP_PIN); | |
2f2dc053 SW |
1980 | |
1981 | /* issue */ | |
1982 | mutex_lock(&mdsc->mutex); | |
1983 | __register_request(mdsc, req, dir); | |
1984 | __do_request(mdsc, req); | |
1985 | ||
e1518c7c SW |
1986 | if (req->r_err) { |
1987 | err = req->r_err; | |
1988 | __unregister_request(mdsc, req); | |
1989 | dout("do_request early error %d\n", err); | |
1990 | goto out; | |
2f2dc053 SW |
1991 | } |
1992 | ||
e1518c7c SW |
1993 | /* wait */ |
1994 | mutex_unlock(&mdsc->mutex); | |
1995 | dout("do_request waiting\n"); | |
1996 | if (req->r_timeout) { | |
aa91647c | 1997 | err = (long)wait_for_completion_killable_timeout( |
e1518c7c SW |
1998 | &req->r_completion, req->r_timeout); |
1999 | if (err == 0) | |
2000 | err = -EIO; | |
2001 | } else { | |
aa91647c | 2002 | err = wait_for_completion_killable(&req->r_completion); |
e1518c7c SW |
2003 | } |
2004 | dout("do_request waited, got %d\n", err); | |
2005 | mutex_lock(&mdsc->mutex); | |
5b1daecd | 2006 | |
e1518c7c SW |
2007 | /* only abort if we didn't race with a real reply */ |
2008 | if (req->r_got_result) { | |
2009 | err = le32_to_cpu(req->r_reply_info.head->result); | |
2010 | } else if (err < 0) { | |
2011 | dout("aborted request %lld with %d\n", req->r_tid, err); | |
b4556396 SW |
2012 | |
2013 | /* | |
2014 | * ensure we aren't running concurrently with | |
2015 | * ceph_fill_trace or ceph_readdir_prepopulate, which | |
2016 | * rely on locks (dir mutex) held by our caller. | |
2017 | */ | |
2018 | mutex_lock(&req->r_fill_mutex); | |
e1518c7c SW |
2019 | req->r_err = err; |
2020 | req->r_aborted = true; | |
b4556396 | 2021 | mutex_unlock(&req->r_fill_mutex); |
5b1daecd | 2022 | |
e1518c7c | 2023 | if (req->r_locked_dir && |
167c9e35 SW |
2024 | (req->r_op & CEPH_MDS_OP_WRITE)) |
2025 | ceph_invalidate_dir_request(req); | |
2f2dc053 | 2026 | } else { |
e1518c7c | 2027 | err = req->r_err; |
2f2dc053 | 2028 | } |
2f2dc053 | 2029 | |
e1518c7c SW |
2030 | out: |
2031 | mutex_unlock(&mdsc->mutex); | |
2f2dc053 SW |
2032 | dout("do_request %p done, result %d\n", req, err); |
2033 | return err; | |
2034 | } | |
2035 | ||
167c9e35 | 2036 | /* |
2f276c51 | 2037 | * Invalidate dir's completeness, dentry lease state on an aborted MDS |
167c9e35 SW |
2038 | * namespace request. |
2039 | */ | |
2040 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) | |
2041 | { | |
2042 | struct inode *inode = req->r_locked_dir; | |
167c9e35 | 2043 | |
2f276c51 | 2044 | dout("invalidate_dir_request %p (complete, lease(s))\n", inode); |
167c9e35 | 2045 | |
2f276c51 | 2046 | ceph_dir_clear_complete(inode); |
167c9e35 SW |
2047 | if (req->r_dentry) |
2048 | ceph_invalidate_dentry_lease(req->r_dentry); | |
2049 | if (req->r_old_dentry) | |
2050 | ceph_invalidate_dentry_lease(req->r_old_dentry); | |
2051 | } | |
2052 | ||
2f2dc053 SW |
2053 | /* |
2054 | * Handle mds reply. | |
2055 | * | |
2056 | * We take the session mutex and parse and process the reply immediately. | |
2057 | * This preserves the logical ordering of replies, capabilities, etc., sent | |
2058 | * by the MDS as they are applied to our local cache. | |
2059 | */ | |
2060 | static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |
2061 | { | |
2062 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2063 | struct ceph_mds_request *req; | |
2064 | struct ceph_mds_reply_head *head = msg->front.iov_base; | |
2065 | struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ | |
2066 | u64 tid; | |
2067 | int err, result; | |
2600d2dd | 2068 | int mds = session->s_mds; |
2f2dc053 | 2069 | |
2f2dc053 SW |
2070 | if (msg->front.iov_len < sizeof(*head)) { |
2071 | pr_err("mdsc_handle_reply got corrupt (short) reply\n"); | |
9ec7cab1 | 2072 | ceph_msg_dump(msg); |
2f2dc053 SW |
2073 | return; |
2074 | } | |
2075 | ||
2076 | /* get request, session */ | |
6df058c0 | 2077 | tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2078 | mutex_lock(&mdsc->mutex); |
2079 | req = __lookup_request(mdsc, tid); | |
2080 | if (!req) { | |
2081 | dout("handle_reply on unknown tid %llu\n", tid); | |
2082 | mutex_unlock(&mdsc->mutex); | |
2083 | return; | |
2084 | } | |
2085 | dout("handle_reply %p\n", req); | |
2f2dc053 SW |
2086 | |
2087 | /* correct session? */ | |
d96d6049 | 2088 | if (req->r_session != session) { |
2f2dc053 SW |
2089 | pr_err("mdsc_handle_reply got %llu on session mds%d" |
2090 | " not mds%d\n", tid, session->s_mds, | |
2091 | req->r_session ? req->r_session->s_mds : -1); | |
2092 | mutex_unlock(&mdsc->mutex); | |
2093 | goto out; | |
2094 | } | |
2095 | ||
2096 | /* dup? */ | |
2097 | if ((req->r_got_unsafe && !head->safe) || | |
2098 | (req->r_got_safe && head->safe)) { | |
2099 | pr_warning("got a dup %s reply on %llu from mds%d\n", | |
2100 | head->safe ? "safe" : "unsafe", tid, mds); | |
2101 | mutex_unlock(&mdsc->mutex); | |
2102 | goto out; | |
2103 | } | |
85792d0d SW |
2104 | if (req->r_got_safe && !head->safe) { |
2105 | pr_warning("got unsafe after safe on %llu from mds%d\n", | |
2106 | tid, mds); | |
2107 | mutex_unlock(&mdsc->mutex); | |
2108 | goto out; | |
2109 | } | |
2f2dc053 SW |
2110 | |
2111 | result = le32_to_cpu(head->result); | |
2112 | ||
2113 | /* | |
e55b71f8 GF |
2114 | * Handle an ESTALE |
2115 | * if we're not talking to the authority, send to them | |
2116 | * if the authority has changed while we weren't looking, | |
2117 | * send to new authority | |
2118 | * Otherwise we just have to return an ESTALE | |
2f2dc053 SW |
2119 | */ |
2120 | if (result == -ESTALE) { | |
e55b71f8 | 2121 | dout("got ESTALE on request %llu", req->r_tid); |
213c99ee SW |
2122 | if (!req->r_inode) { |
2123 | /* do nothing; not an authority problem */ | |
2124 | } else if (req->r_direct_mode != USE_AUTH_MDS) { | |
e55b71f8 GF |
2125 | dout("not using auth, setting for that now"); |
2126 | req->r_direct_mode = USE_AUTH_MDS; | |
2f2dc053 SW |
2127 | __do_request(mdsc, req); |
2128 | mutex_unlock(&mdsc->mutex); | |
2129 | goto out; | |
e55b71f8 GF |
2130 | } else { |
2131 | struct ceph_inode_info *ci = ceph_inode(req->r_inode); | |
4af25fdd SW |
2132 | struct ceph_cap *cap = NULL; |
2133 | ||
2134 | if (req->r_session) | |
2135 | cap = ceph_get_cap_for_mds(ci, | |
2136 | req->r_session->s_mds); | |
e55b71f8 GF |
2137 | |
2138 | dout("already using auth"); | |
2139 | if ((!cap || cap != ci->i_auth_cap) || | |
2140 | (cap->mseq != req->r_sent_on_mseq)) { | |
2141 | dout("but cap changed, so resending"); | |
2142 | __do_request(mdsc, req); | |
2143 | mutex_unlock(&mdsc->mutex); | |
2144 | goto out; | |
2145 | } | |
2f2dc053 | 2146 | } |
e55b71f8 | 2147 | dout("have to return ESTALE on request %llu", req->r_tid); |
2f2dc053 SW |
2148 | } |
2149 | ||
e55b71f8 | 2150 | |
2f2dc053 SW |
2151 | if (head->safe) { |
2152 | req->r_got_safe = true; | |
2153 | __unregister_request(mdsc, req); | |
03066f23 | 2154 | complete_all(&req->r_safe_completion); |
2f2dc053 SW |
2155 | |
2156 | if (req->r_got_unsafe) { | |
2157 | /* | |
2158 | * We already handled the unsafe response, now do the | |
2159 | * cleanup. No need to examine the response; the MDS | |
2160 | * doesn't include any result info in the safe | |
2161 | * response. And even if it did, there is nothing | |
2162 | * useful we could do with a revised return value. | |
2163 | */ | |
2164 | dout("got safe reply %llu, mds%d\n", tid, mds); | |
2165 | list_del_init(&req->r_unsafe_item); | |
2166 | ||
2167 | /* last unsafe request during umount? */ | |
44ca18f2 | 2168 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
03066f23 | 2169 | complete_all(&mdsc->safe_umount_waiters); |
2f2dc053 SW |
2170 | mutex_unlock(&mdsc->mutex); |
2171 | goto out; | |
2172 | } | |
e1518c7c | 2173 | } else { |
2f2dc053 SW |
2174 | req->r_got_unsafe = true; |
2175 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); | |
2176 | } | |
2177 | ||
2178 | dout("handle_reply tid %lld result %d\n", tid, result); | |
2179 | rinfo = &req->r_reply_info; | |
14303d20 | 2180 | err = parse_reply_info(msg, rinfo, session->s_con.peer_features); |
2f2dc053 SW |
2181 | mutex_unlock(&mdsc->mutex); |
2182 | ||
2183 | mutex_lock(&session->s_mutex); | |
2184 | if (err < 0) { | |
25933abd | 2185 | pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); |
9ec7cab1 | 2186 | ceph_msg_dump(msg); |
2f2dc053 SW |
2187 | goto out_err; |
2188 | } | |
2189 | ||
2190 | /* snap trace */ | |
2191 | if (rinfo->snapblob_len) { | |
2192 | down_write(&mdsc->snap_rwsem); | |
2193 | ceph_update_snap_trace(mdsc, rinfo->snapblob, | |
2194 | rinfo->snapblob + rinfo->snapblob_len, | |
2195 | le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP); | |
2196 | downgrade_write(&mdsc->snap_rwsem); | |
2197 | } else { | |
2198 | down_read(&mdsc->snap_rwsem); | |
2199 | } | |
2200 | ||
2201 | /* insert trace into our cache */ | |
b4556396 | 2202 | mutex_lock(&req->r_fill_mutex); |
3d14c5d2 | 2203 | err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); |
2f2dc053 | 2204 | if (err == 0) { |
6e8575fa SL |
2205 | if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || |
2206 | req->r_op == CEPH_MDS_OP_LSSNAP) && | |
25933abd | 2207 | rinfo->dir_nr) |
2f2dc053 | 2208 | ceph_readdir_prepopulate(req, req->r_session); |
37151668 | 2209 | ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
2f2dc053 | 2210 | } |
b4556396 | 2211 | mutex_unlock(&req->r_fill_mutex); |
2f2dc053 SW |
2212 | |
2213 | up_read(&mdsc->snap_rwsem); | |
2214 | out_err: | |
e1518c7c SW |
2215 | mutex_lock(&mdsc->mutex); |
2216 | if (!req->r_aborted) { | |
2217 | if (err) { | |
2218 | req->r_err = err; | |
2219 | } else { | |
2220 | req->r_reply = msg; | |
2221 | ceph_msg_get(msg); | |
2222 | req->r_got_result = true; | |
2223 | } | |
2f2dc053 | 2224 | } else { |
e1518c7c | 2225 | dout("reply arrived after request %lld was aborted\n", tid); |
2f2dc053 | 2226 | } |
e1518c7c | 2227 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 2228 | |
ee6b272b | 2229 | ceph_add_cap_releases(mdsc, req->r_session); |
2f2dc053 SW |
2230 | mutex_unlock(&session->s_mutex); |
2231 | ||
2232 | /* kick calling process */ | |
2233 | complete_request(mdsc, req); | |
2234 | out: | |
2235 | ceph_mdsc_put_request(req); | |
2236 | return; | |
2237 | } | |
2238 | ||
2239 | ||
2240 | ||
2241 | /* | |
2242 | * handle mds notification that our request has been forwarded. | |
2243 | */ | |
2600d2dd SW |
2244 | static void handle_forward(struct ceph_mds_client *mdsc, |
2245 | struct ceph_mds_session *session, | |
2246 | struct ceph_msg *msg) | |
2f2dc053 SW |
2247 | { |
2248 | struct ceph_mds_request *req; | |
a1ea787c | 2249 | u64 tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2250 | u32 next_mds; |
2251 | u32 fwd_seq; | |
2f2dc053 SW |
2252 | int err = -EINVAL; |
2253 | void *p = msg->front.iov_base; | |
2254 | void *end = p + msg->front.iov_len; | |
2f2dc053 | 2255 | |
a1ea787c | 2256 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
c89136ea SW |
2257 | next_mds = ceph_decode_32(&p); |
2258 | fwd_seq = ceph_decode_32(&p); | |
2f2dc053 SW |
2259 | |
2260 | mutex_lock(&mdsc->mutex); | |
2261 | req = __lookup_request(mdsc, tid); | |
2262 | if (!req) { | |
2a8e5e36 | 2263 | dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); |
2f2dc053 SW |
2264 | goto out; /* dup reply? */ |
2265 | } | |
2266 | ||
2a8e5e36 SW |
2267 | if (req->r_aborted) { |
2268 | dout("forward tid %llu aborted, unregistering\n", tid); | |
2269 | __unregister_request(mdsc, req); | |
2270 | } else if (fwd_seq <= req->r_num_fwd) { | |
2271 | dout("forward tid %llu to mds%d - old seq %d <= %d\n", | |
2f2dc053 SW |
2272 | tid, next_mds, req->r_num_fwd, fwd_seq); |
2273 | } else { | |
2274 | /* resend. forward race not possible; mds would drop */ | |
2a8e5e36 SW |
2275 | dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); |
2276 | BUG_ON(req->r_err); | |
2277 | BUG_ON(req->r_got_result); | |
2f2dc053 SW |
2278 | req->r_num_fwd = fwd_seq; |
2279 | req->r_resend_mds = next_mds; | |
2280 | put_request_session(req); | |
2281 | __do_request(mdsc, req); | |
2282 | } | |
2283 | ceph_mdsc_put_request(req); | |
2284 | out: | |
2285 | mutex_unlock(&mdsc->mutex); | |
2286 | return; | |
2287 | ||
2288 | bad: | |
2289 | pr_err("mdsc_handle_forward decode error err=%d\n", err); | |
2290 | } | |
2291 | ||
2292 | /* | |
2293 | * handle a mds session control message | |
2294 | */ | |
2295 | static void handle_session(struct ceph_mds_session *session, | |
2296 | struct ceph_msg *msg) | |
2297 | { | |
2298 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2299 | u32 op; | |
2300 | u64 seq; | |
2600d2dd | 2301 | int mds = session->s_mds; |
2f2dc053 SW |
2302 | struct ceph_mds_session_head *h = msg->front.iov_base; |
2303 | int wake = 0; | |
2304 | ||
2f2dc053 SW |
2305 | /* decode */ |
2306 | if (msg->front.iov_len != sizeof(*h)) | |
2307 | goto bad; | |
2308 | op = le32_to_cpu(h->op); | |
2309 | seq = le64_to_cpu(h->seq); | |
2310 | ||
2311 | mutex_lock(&mdsc->mutex); | |
2600d2dd SW |
2312 | if (op == CEPH_SESSION_CLOSE) |
2313 | __unregister_session(mdsc, session); | |
2f2dc053 SW |
2314 | /* FIXME: this ttl calculation is generous */ |
2315 | session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; | |
2316 | mutex_unlock(&mdsc->mutex); | |
2317 | ||
2318 | mutex_lock(&session->s_mutex); | |
2319 | ||
2320 | dout("handle_session mds%d %s %p state %s seq %llu\n", | |
2321 | mds, ceph_session_op_name(op), session, | |
2322 | session_state_name(session->s_state), seq); | |
2323 | ||
2324 | if (session->s_state == CEPH_MDS_SESSION_HUNG) { | |
2325 | session->s_state = CEPH_MDS_SESSION_OPEN; | |
2326 | pr_info("mds%d came back\n", session->s_mds); | |
2327 | } | |
2328 | ||
2329 | switch (op) { | |
2330 | case CEPH_SESSION_OPEN: | |
29790f26 SW |
2331 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2332 | pr_info("mds%d reconnect success\n", session->s_mds); | |
2f2dc053 SW |
2333 | session->s_state = CEPH_MDS_SESSION_OPEN; |
2334 | renewed_caps(mdsc, session, 0); | |
2335 | wake = 1; | |
2336 | if (mdsc->stopping) | |
2337 | __close_session(mdsc, session); | |
2338 | break; | |
2339 | ||
2340 | case CEPH_SESSION_RENEWCAPS: | |
2341 | if (session->s_renew_seq == seq) | |
2342 | renewed_caps(mdsc, session, 1); | |
2343 | break; | |
2344 | ||
2345 | case CEPH_SESSION_CLOSE: | |
29790f26 SW |
2346 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2347 | pr_info("mds%d reconnect denied\n", session->s_mds); | |
2f2dc053 SW |
2348 | remove_session_caps(session); |
2349 | wake = 1; /* for good measure */ | |
f3c60c59 | 2350 | wake_up_all(&mdsc->session_close_wq); |
29790f26 | 2351 | kick_requests(mdsc, mds); |
2f2dc053 SW |
2352 | break; |
2353 | ||
2354 | case CEPH_SESSION_STALE: | |
2355 | pr_info("mds%d caps went stale, renewing\n", | |
2356 | session->s_mds); | |
d8fb02ab | 2357 | spin_lock(&session->s_gen_ttl_lock); |
2f2dc053 | 2358 | session->s_cap_gen++; |
1ce208a6 | 2359 | session->s_cap_ttl = jiffies - 1; |
d8fb02ab | 2360 | spin_unlock(&session->s_gen_ttl_lock); |
2f2dc053 SW |
2361 | send_renew_caps(mdsc, session); |
2362 | break; | |
2363 | ||
2364 | case CEPH_SESSION_RECALL_STATE: | |
2365 | trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); | |
2366 | break; | |
2367 | ||
2368 | default: | |
2369 | pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); | |
2370 | WARN_ON(1); | |
2371 | } | |
2372 | ||
2373 | mutex_unlock(&session->s_mutex); | |
2374 | if (wake) { | |
2375 | mutex_lock(&mdsc->mutex); | |
2376 | __wake_requests(mdsc, &session->s_waiting); | |
2377 | mutex_unlock(&mdsc->mutex); | |
2378 | } | |
2379 | return; | |
2380 | ||
2381 | bad: | |
2382 | pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, | |
2383 | (int)msg->front.iov_len); | |
9ec7cab1 | 2384 | ceph_msg_dump(msg); |
2f2dc053 SW |
2385 | return; |
2386 | } | |
2387 | ||
2388 | ||
2389 | /* | |
2390 | * called under session->mutex. | |
2391 | */ | |
2392 | static void replay_unsafe_requests(struct ceph_mds_client *mdsc, | |
2393 | struct ceph_mds_session *session) | |
2394 | { | |
2395 | struct ceph_mds_request *req, *nreq; | |
2396 | int err; | |
2397 | ||
2398 | dout("replay_unsafe_requests mds%d\n", session->s_mds); | |
2399 | ||
2400 | mutex_lock(&mdsc->mutex); | |
2401 | list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { | |
2402 | err = __prepare_send_request(mdsc, req, session->s_mds); | |
2403 | if (!err) { | |
2404 | ceph_msg_get(req->r_request); | |
2405 | ceph_con_send(&session->s_con, req->r_request); | |
2406 | } | |
2407 | } | |
2408 | mutex_unlock(&mdsc->mutex); | |
2409 | } | |
2410 | ||
2411 | /* | |
2412 | * Encode information about a cap for a reconnect with the MDS. | |
2413 | */ | |
2f2dc053 SW |
2414 | static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, |
2415 | void *arg) | |
2416 | { | |
20cb34ae SW |
2417 | union { |
2418 | struct ceph_mds_cap_reconnect v2; | |
2419 | struct ceph_mds_cap_reconnect_v1 v1; | |
2420 | } rec; | |
2421 | size_t reclen; | |
2f2dc053 | 2422 | struct ceph_inode_info *ci; |
20cb34ae SW |
2423 | struct ceph_reconnect_state *recon_state = arg; |
2424 | struct ceph_pagelist *pagelist = recon_state->pagelist; | |
2f2dc053 SW |
2425 | char *path; |
2426 | int pathlen, err; | |
2427 | u64 pathbase; | |
2428 | struct dentry *dentry; | |
2429 | ||
2430 | ci = cap->ci; | |
2431 | ||
2432 | dout(" adding %p ino %llx.%llx cap %p %lld %s\n", | |
2433 | inode, ceph_vinop(inode), cap, cap->cap_id, | |
2434 | ceph_cap_string(cap->issued)); | |
93cea5be SW |
2435 | err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); |
2436 | if (err) | |
2437 | return err; | |
2f2dc053 SW |
2438 | |
2439 | dentry = d_find_alias(inode); | |
2440 | if (dentry) { | |
2441 | path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); | |
2442 | if (IS_ERR(path)) { | |
2443 | err = PTR_ERR(path); | |
e072f8aa | 2444 | goto out_dput; |
2f2dc053 SW |
2445 | } |
2446 | } else { | |
2447 | path = NULL; | |
2448 | pathlen = 0; | |
2449 | } | |
93cea5be SW |
2450 | err = ceph_pagelist_encode_string(pagelist, path, pathlen); |
2451 | if (err) | |
e072f8aa | 2452 | goto out_free; |
2f2dc053 | 2453 | |
be655596 | 2454 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
2455 | cap->seq = 0; /* reset cap seq */ |
2456 | cap->issue_seq = 0; /* and issue_seq */ | |
20cb34ae SW |
2457 | |
2458 | if (recon_state->flock) { | |
2459 | rec.v2.cap_id = cpu_to_le64(cap->cap_id); | |
2460 | rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2461 | rec.v2.issued = cpu_to_le32(cap->issued); | |
2462 | rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2463 | rec.v2.pathbase = cpu_to_le64(pathbase); | |
2464 | rec.v2.flock_len = 0; | |
2465 | reclen = sizeof(rec.v2); | |
2466 | } else { | |
2467 | rec.v1.cap_id = cpu_to_le64(cap->cap_id); | |
2468 | rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2469 | rec.v1.issued = cpu_to_le32(cap->issued); | |
2470 | rec.v1.size = cpu_to_le64(inode->i_size); | |
2471 | ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); | |
2472 | ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); | |
2473 | rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2474 | rec.v1.pathbase = cpu_to_le64(pathbase); | |
2475 | reclen = sizeof(rec.v1); | |
2476 | } | |
be655596 | 2477 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 | 2478 | |
40819f6f GF |
2479 | if (recon_state->flock) { |
2480 | int num_fcntl_locks, num_flock_locks; | |
fca4451a GF |
2481 | struct ceph_pagelist_cursor trunc_point; |
2482 | ||
2483 | ceph_pagelist_set_cursor(pagelist, &trunc_point); | |
2484 | do { | |
496e5955 | 2485 | lock_flocks(); |
fca4451a GF |
2486 | ceph_count_locks(inode, &num_fcntl_locks, |
2487 | &num_flock_locks); | |
c420276a | 2488 | rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + |
fca4451a GF |
2489 | (num_fcntl_locks+num_flock_locks) * |
2490 | sizeof(struct ceph_filelock)); | |
496e5955 | 2491 | unlock_flocks(); |
fca4451a GF |
2492 | |
2493 | /* pre-alloc pagelist */ | |
2494 | ceph_pagelist_truncate(pagelist, &trunc_point); | |
2495 | err = ceph_pagelist_append(pagelist, &rec, reclen); | |
2496 | if (!err) | |
2497 | err = ceph_pagelist_reserve(pagelist, | |
2498 | rec.v2.flock_len); | |
2499 | ||
2500 | /* encode locks */ | |
2501 | if (!err) { | |
496e5955 | 2502 | lock_flocks(); |
fca4451a GF |
2503 | err = ceph_encode_locks(inode, |
2504 | pagelist, | |
2505 | num_fcntl_locks, | |
2506 | num_flock_locks); | |
496e5955 | 2507 | unlock_flocks(); |
fca4451a GF |
2508 | } |
2509 | } while (err == -ENOSPC); | |
3612abbd SW |
2510 | } else { |
2511 | err = ceph_pagelist_append(pagelist, &rec, reclen); | |
40819f6f | 2512 | } |
93cea5be | 2513 | |
e072f8aa | 2514 | out_free: |
2f2dc053 | 2515 | kfree(path); |
e072f8aa | 2516 | out_dput: |
2f2dc053 | 2517 | dput(dentry); |
93cea5be | 2518 | return err; |
2f2dc053 SW |
2519 | } |
2520 | ||
2521 | ||
2522 | /* | |
2523 | * If an MDS fails and recovers, clients need to reconnect in order to | |
2524 | * reestablish shared state. This includes all caps issued through | |
2525 | * this session _and_ the snap_realm hierarchy. Because it's not | |
2526 | * clear which snap realms the mds cares about, we send everything we | |
2527 | * know about.. that ensures we'll then get any new info the | |
2528 | * recovering MDS might have. | |
2529 | * | |
2530 | * This is a relatively heavyweight operation, but it's rare. | |
2531 | * | |
2532 | * called with mdsc->mutex held. | |
2533 | */ | |
34b6c855 SW |
2534 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
2535 | struct ceph_mds_session *session) | |
2f2dc053 | 2536 | { |
2f2dc053 | 2537 | struct ceph_msg *reply; |
a105f00c | 2538 | struct rb_node *p; |
34b6c855 | 2539 | int mds = session->s_mds; |
9abf82b8 | 2540 | int err = -ENOMEM; |
93cea5be | 2541 | struct ceph_pagelist *pagelist; |
20cb34ae | 2542 | struct ceph_reconnect_state recon_state; |
2f2dc053 | 2543 | |
34b6c855 | 2544 | pr_info("mds%d reconnect start\n", mds); |
2f2dc053 | 2545 | |
93cea5be SW |
2546 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); |
2547 | if (!pagelist) | |
2548 | goto fail_nopagelist; | |
2549 | ceph_pagelist_init(pagelist); | |
2550 | ||
b61c2763 | 2551 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); |
a79832f2 | 2552 | if (!reply) |
93cea5be | 2553 | goto fail_nomsg; |
93cea5be | 2554 | |
34b6c855 SW |
2555 | mutex_lock(&session->s_mutex); |
2556 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; | |
2557 | session->s_seq = 0; | |
2f2dc053 | 2558 | |
a53aab64 | 2559 | ceph_con_close(&session->s_con); |
34b6c855 | 2560 | ceph_con_open(&session->s_con, |
b7a9e5dd | 2561 | CEPH_ENTITY_TYPE_MDS, mds, |
34b6c855 | 2562 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); |
2f2dc053 | 2563 | |
34b6c855 SW |
2564 | /* replay unsafe requests */ |
2565 | replay_unsafe_requests(mdsc, session); | |
2f2dc053 SW |
2566 | |
2567 | down_read(&mdsc->snap_rwsem); | |
2568 | ||
2f2dc053 SW |
2569 | dout("session %p state %s\n", session, |
2570 | session_state_name(session->s_state)); | |
2571 | ||
e01a5946 SW |
2572 | /* drop old cap expires; we're about to reestablish that state */ |
2573 | discard_cap_releases(mdsc, session); | |
2574 | ||
2f2dc053 | 2575 | /* traverse this session's caps */ |
93cea5be SW |
2576 | err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); |
2577 | if (err) | |
2578 | goto fail; | |
20cb34ae SW |
2579 | |
2580 | recon_state.pagelist = pagelist; | |
2581 | recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; | |
2582 | err = iterate_session_caps(session, encode_caps_cb, &recon_state); | |
2f2dc053 | 2583 | if (err < 0) |
9abf82b8 | 2584 | goto fail; |
2f2dc053 SW |
2585 | |
2586 | /* | |
2587 | * snaprealms. we provide mds with the ino, seq (version), and | |
2588 | * parent for all of our realms. If the mds has any newer info, | |
2589 | * it will tell us. | |
2590 | */ | |
a105f00c SW |
2591 | for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { |
2592 | struct ceph_snap_realm *realm = | |
2593 | rb_entry(p, struct ceph_snap_realm, node); | |
93cea5be | 2594 | struct ceph_mds_snaprealm_reconnect sr_rec; |
2f2dc053 SW |
2595 | |
2596 | dout(" adding snap realm %llx seq %lld parent %llx\n", | |
2597 | realm->ino, realm->seq, realm->parent_ino); | |
93cea5be SW |
2598 | sr_rec.ino = cpu_to_le64(realm->ino); |
2599 | sr_rec.seq = cpu_to_le64(realm->seq); | |
2600 | sr_rec.parent = cpu_to_le64(realm->parent_ino); | |
2601 | err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); | |
2602 | if (err) | |
2603 | goto fail; | |
2f2dc053 | 2604 | } |
2f2dc053 | 2605 | |
20cb34ae SW |
2606 | if (recon_state.flock) |
2607 | reply->hdr.version = cpu_to_le16(2); | |
ebf18f47 AE |
2608 | if (pagelist->length) { |
2609 | /* set up outbound data if we have any */ | |
2610 | reply->hdr.data_len = cpu_to_le32(pagelist->length); | |
90af3602 | 2611 | ceph_msg_data_add_pagelist(reply, pagelist); |
ebf18f47 | 2612 | } |
2f2dc053 SW |
2613 | ceph_con_send(&session->s_con, reply); |
2614 | ||
9abf82b8 SW |
2615 | mutex_unlock(&session->s_mutex); |
2616 | ||
2617 | mutex_lock(&mdsc->mutex); | |
2618 | __wake_requests(mdsc, &session->s_waiting); | |
2619 | mutex_unlock(&mdsc->mutex); | |
2620 | ||
2f2dc053 | 2621 | up_read(&mdsc->snap_rwsem); |
2f2dc053 SW |
2622 | return; |
2623 | ||
93cea5be | 2624 | fail: |
2f2dc053 | 2625 | ceph_msg_put(reply); |
9abf82b8 SW |
2626 | up_read(&mdsc->snap_rwsem); |
2627 | mutex_unlock(&session->s_mutex); | |
93cea5be SW |
2628 | fail_nomsg: |
2629 | ceph_pagelist_release(pagelist); | |
2630 | kfree(pagelist); | |
2631 | fail_nopagelist: | |
9abf82b8 | 2632 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
9abf82b8 | 2633 | return; |
2f2dc053 SW |
2634 | } |
2635 | ||
2636 | ||
2637 | /* | |
2638 | * compare old and new mdsmaps, kicking requests | |
2639 | * and closing out old connections as necessary | |
2640 | * | |
2641 | * called under mdsc->mutex. | |
2642 | */ | |
2643 | static void check_new_map(struct ceph_mds_client *mdsc, | |
2644 | struct ceph_mdsmap *newmap, | |
2645 | struct ceph_mdsmap *oldmap) | |
2646 | { | |
2647 | int i; | |
2648 | int oldstate, newstate; | |
2649 | struct ceph_mds_session *s; | |
2650 | ||
2651 | dout("check_new_map new %u old %u\n", | |
2652 | newmap->m_epoch, oldmap->m_epoch); | |
2653 | ||
2654 | for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { | |
2655 | if (mdsc->sessions[i] == NULL) | |
2656 | continue; | |
2657 | s = mdsc->sessions[i]; | |
2658 | oldstate = ceph_mdsmap_get_state(oldmap, i); | |
2659 | newstate = ceph_mdsmap_get_state(newmap, i); | |
2660 | ||
0deb01c9 | 2661 | dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", |
2f2dc053 | 2662 | i, ceph_mds_state_name(oldstate), |
0deb01c9 | 2663 | ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", |
2f2dc053 | 2664 | ceph_mds_state_name(newstate), |
0deb01c9 | 2665 | ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", |
2f2dc053 SW |
2666 | session_state_name(s->s_state)); |
2667 | ||
3e8f43a0 YZ |
2668 | if (i >= newmap->m_max_mds || |
2669 | memcmp(ceph_mdsmap_get_addr(oldmap, i), | |
2f2dc053 SW |
2670 | ceph_mdsmap_get_addr(newmap, i), |
2671 | sizeof(struct ceph_entity_addr))) { | |
2672 | if (s->s_state == CEPH_MDS_SESSION_OPENING) { | |
2673 | /* the session never opened, just close it | |
2674 | * out now */ | |
2675 | __wake_requests(mdsc, &s->s_waiting); | |
2600d2dd | 2676 | __unregister_session(mdsc, s); |
2f2dc053 SW |
2677 | } else { |
2678 | /* just close it */ | |
2679 | mutex_unlock(&mdsc->mutex); | |
2680 | mutex_lock(&s->s_mutex); | |
2681 | mutex_lock(&mdsc->mutex); | |
2682 | ceph_con_close(&s->s_con); | |
2683 | mutex_unlock(&s->s_mutex); | |
2684 | s->s_state = CEPH_MDS_SESSION_RESTARTING; | |
2685 | } | |
2686 | ||
2687 | /* kick any requests waiting on the recovering mds */ | |
29790f26 | 2688 | kick_requests(mdsc, i); |
2f2dc053 SW |
2689 | } else if (oldstate == newstate) { |
2690 | continue; /* nothing new with this mds */ | |
2691 | } | |
2692 | ||
2693 | /* | |
2694 | * send reconnect? | |
2695 | */ | |
2696 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && | |
34b6c855 SW |
2697 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
2698 | mutex_unlock(&mdsc->mutex); | |
2699 | send_mds_reconnect(mdsc, s); | |
2700 | mutex_lock(&mdsc->mutex); | |
2701 | } | |
2f2dc053 SW |
2702 | |
2703 | /* | |
29790f26 | 2704 | * kick request on any mds that has gone active. |
2f2dc053 SW |
2705 | */ |
2706 | if (oldstate < CEPH_MDS_STATE_ACTIVE && | |
2707 | newstate >= CEPH_MDS_STATE_ACTIVE) { | |
29790f26 SW |
2708 | if (oldstate != CEPH_MDS_STATE_CREATING && |
2709 | oldstate != CEPH_MDS_STATE_STARTING) | |
2710 | pr_info("mds%d recovery completed\n", s->s_mds); | |
2711 | kick_requests(mdsc, i); | |
2f2dc053 | 2712 | ceph_kick_flushing_caps(mdsc, s); |
0dc2570f | 2713 | wake_up_session_caps(s, 1); |
2f2dc053 SW |
2714 | } |
2715 | } | |
cb170a22 SW |
2716 | |
2717 | for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { | |
2718 | s = mdsc->sessions[i]; | |
2719 | if (!s) | |
2720 | continue; | |
2721 | if (!ceph_mdsmap_is_laggy(newmap, i)) | |
2722 | continue; | |
2723 | if (s->s_state == CEPH_MDS_SESSION_OPEN || | |
2724 | s->s_state == CEPH_MDS_SESSION_HUNG || | |
2725 | s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
2726 | dout(" connecting to export targets of laggy mds%d\n", | |
2727 | i); | |
2728 | __open_export_target_sessions(mdsc, s); | |
2729 | } | |
2730 | } | |
2f2dc053 SW |
2731 | } |
2732 | ||
2733 | ||
2734 | ||
2735 | /* | |
2736 | * leases | |
2737 | */ | |
2738 | ||
2739 | /* | |
2740 | * caller must hold session s_mutex, dentry->d_lock | |
2741 | */ | |
2742 | void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) | |
2743 | { | |
2744 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
2745 | ||
2746 | ceph_put_mds_session(di->lease_session); | |
2747 | di->lease_session = NULL; | |
2748 | } | |
2749 | ||
2600d2dd SW |
2750 | static void handle_lease(struct ceph_mds_client *mdsc, |
2751 | struct ceph_mds_session *session, | |
2752 | struct ceph_msg *msg) | |
2f2dc053 | 2753 | { |
3d14c5d2 | 2754 | struct super_block *sb = mdsc->fsc->sb; |
2f2dc053 | 2755 | struct inode *inode; |
2f2dc053 SW |
2756 | struct dentry *parent, *dentry; |
2757 | struct ceph_dentry_info *di; | |
2600d2dd | 2758 | int mds = session->s_mds; |
2f2dc053 | 2759 | struct ceph_mds_lease *h = msg->front.iov_base; |
1e5ea23d | 2760 | u32 seq; |
2f2dc053 | 2761 | struct ceph_vino vino; |
2f2dc053 SW |
2762 | struct qstr dname; |
2763 | int release = 0; | |
2764 | ||
2f2dc053 SW |
2765 | dout("handle_lease from mds%d\n", mds); |
2766 | ||
2767 | /* decode */ | |
2768 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) | |
2769 | goto bad; | |
2770 | vino.ino = le64_to_cpu(h->ino); | |
2771 | vino.snap = CEPH_NOSNAP; | |
1e5ea23d | 2772 | seq = le32_to_cpu(h->seq); |
2f2dc053 SW |
2773 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); |
2774 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); | |
2775 | if (dname.len != get_unaligned_le32(h+1)) | |
2776 | goto bad; | |
2777 | ||
2f2dc053 SW |
2778 | mutex_lock(&session->s_mutex); |
2779 | session->s_seq++; | |
2780 | ||
2781 | /* lookup inode */ | |
2782 | inode = ceph_find_inode(sb, vino); | |
2f90b852 SW |
2783 | dout("handle_lease %s, ino %llx %p %.*s\n", |
2784 | ceph_lease_op_name(h->action), vino.ino, inode, | |
1e5ea23d | 2785 | dname.len, dname.name); |
2f2dc053 SW |
2786 | if (inode == NULL) { |
2787 | dout("handle_lease no inode %llx\n", vino.ino); | |
2788 | goto release; | |
2789 | } | |
2f2dc053 SW |
2790 | |
2791 | /* dentry */ | |
2792 | parent = d_find_alias(inode); | |
2793 | if (!parent) { | |
2794 | dout("no parent dentry on inode %p\n", inode); | |
2795 | WARN_ON(1); | |
2796 | goto release; /* hrm... */ | |
2797 | } | |
2798 | dname.hash = full_name_hash(dname.name, dname.len); | |
2799 | dentry = d_lookup(parent, &dname); | |
2800 | dput(parent); | |
2801 | if (!dentry) | |
2802 | goto release; | |
2803 | ||
2804 | spin_lock(&dentry->d_lock); | |
2805 | di = ceph_dentry(dentry); | |
2806 | switch (h->action) { | |
2807 | case CEPH_MDS_LEASE_REVOKE: | |
3d8eb7a9 | 2808 | if (di->lease_session == session) { |
1e5ea23d SW |
2809 | if (ceph_seq_cmp(di->lease_seq, seq) > 0) |
2810 | h->seq = cpu_to_le32(di->lease_seq); | |
2f2dc053 SW |
2811 | __ceph_mdsc_drop_dentry_lease(dentry); |
2812 | } | |
2813 | release = 1; | |
2814 | break; | |
2815 | ||
2816 | case CEPH_MDS_LEASE_RENEW: | |
3d8eb7a9 | 2817 | if (di->lease_session == session && |
2f2dc053 SW |
2818 | di->lease_gen == session->s_cap_gen && |
2819 | di->lease_renew_from && | |
2820 | di->lease_renew_after == 0) { | |
2821 | unsigned long duration = | |
2822 | le32_to_cpu(h->duration_ms) * HZ / 1000; | |
2823 | ||
1e5ea23d | 2824 | di->lease_seq = seq; |
2f2dc053 SW |
2825 | dentry->d_time = di->lease_renew_from + duration; |
2826 | di->lease_renew_after = di->lease_renew_from + | |
2827 | (duration >> 1); | |
2828 | di->lease_renew_from = 0; | |
2829 | } | |
2830 | break; | |
2831 | } | |
2832 | spin_unlock(&dentry->d_lock); | |
2833 | dput(dentry); | |
2834 | ||
2835 | if (!release) | |
2836 | goto out; | |
2837 | ||
2838 | release: | |
2839 | /* let's just reuse the same message */ | |
2840 | h->action = CEPH_MDS_LEASE_REVOKE_ACK; | |
2841 | ceph_msg_get(msg); | |
2842 | ceph_con_send(&session->s_con, msg); | |
2843 | ||
2844 | out: | |
2845 | iput(inode); | |
2846 | mutex_unlock(&session->s_mutex); | |
2f2dc053 SW |
2847 | return; |
2848 | ||
2849 | bad: | |
2850 | pr_err("corrupt lease message\n"); | |
9ec7cab1 | 2851 | ceph_msg_dump(msg); |
2f2dc053 SW |
2852 | } |
2853 | ||
2854 | void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, | |
2855 | struct inode *inode, | |
2856 | struct dentry *dentry, char action, | |
2857 | u32 seq) | |
2858 | { | |
2859 | struct ceph_msg *msg; | |
2860 | struct ceph_mds_lease *lease; | |
2861 | int len = sizeof(*lease) + sizeof(u32); | |
2862 | int dnamelen = 0; | |
2863 | ||
2864 | dout("lease_send_msg inode %p dentry %p %s to mds%d\n", | |
2865 | inode, dentry, ceph_lease_op_name(action), session->s_mds); | |
2866 | dnamelen = dentry->d_name.len; | |
2867 | len += dnamelen; | |
2868 | ||
b61c2763 | 2869 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); |
a79832f2 | 2870 | if (!msg) |
2f2dc053 SW |
2871 | return; |
2872 | lease = msg->front.iov_base; | |
2873 | lease->action = action; | |
2f2dc053 SW |
2874 | lease->ino = cpu_to_le64(ceph_vino(inode).ino); |
2875 | lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); | |
2876 | lease->seq = cpu_to_le32(seq); | |
2877 | put_unaligned_le32(dnamelen, lease + 1); | |
2878 | memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); | |
2879 | ||
2880 | /* | |
2881 | * if this is a preemptive lease RELEASE, no need to | |
2882 | * flush request stream, since the actual request will | |
2883 | * soon follow. | |
2884 | */ | |
2885 | msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); | |
2886 | ||
2887 | ceph_con_send(&session->s_con, msg); | |
2888 | } | |
2889 | ||
2890 | /* | |
2891 | * Preemptively release a lease we expect to invalidate anyway. | |
2892 | * Pass @inode always, @dentry is optional. | |
2893 | */ | |
2894 | void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, | |
2f90b852 | 2895 | struct dentry *dentry) |
2f2dc053 SW |
2896 | { |
2897 | struct ceph_dentry_info *di; | |
2898 | struct ceph_mds_session *session; | |
2899 | u32 seq; | |
2900 | ||
2901 | BUG_ON(inode == NULL); | |
2902 | BUG_ON(dentry == NULL); | |
2f2dc053 SW |
2903 | |
2904 | /* is dentry lease valid? */ | |
2905 | spin_lock(&dentry->d_lock); | |
2906 | di = ceph_dentry(dentry); | |
2907 | if (!di || !di->lease_session || | |
2908 | di->lease_session->s_mds < 0 || | |
2909 | di->lease_gen != di->lease_session->s_cap_gen || | |
2910 | !time_before(jiffies, dentry->d_time)) { | |
2911 | dout("lease_release inode %p dentry %p -- " | |
2f90b852 SW |
2912 | "no lease\n", |
2913 | inode, dentry); | |
2f2dc053 SW |
2914 | spin_unlock(&dentry->d_lock); |
2915 | return; | |
2916 | } | |
2917 | ||
2918 | /* we do have a lease on this dentry; note mds and seq */ | |
2919 | session = ceph_get_mds_session(di->lease_session); | |
2920 | seq = di->lease_seq; | |
2921 | __ceph_mdsc_drop_dentry_lease(dentry); | |
2922 | spin_unlock(&dentry->d_lock); | |
2923 | ||
2f90b852 SW |
2924 | dout("lease_release inode %p dentry %p to mds%d\n", |
2925 | inode, dentry, session->s_mds); | |
2f2dc053 SW |
2926 | ceph_mdsc_lease_send_msg(session, inode, dentry, |
2927 | CEPH_MDS_LEASE_RELEASE, seq); | |
2928 | ceph_put_mds_session(session); | |
2929 | } | |
2930 | ||
2931 | /* | |
2932 | * drop all leases (and dentry refs) in preparation for umount | |
2933 | */ | |
2934 | static void drop_leases(struct ceph_mds_client *mdsc) | |
2935 | { | |
2936 | int i; | |
2937 | ||
2938 | dout("drop_leases\n"); | |
2939 | mutex_lock(&mdsc->mutex); | |
2940 | for (i = 0; i < mdsc->max_sessions; i++) { | |
2941 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
2942 | if (!s) | |
2943 | continue; | |
2944 | mutex_unlock(&mdsc->mutex); | |
2945 | mutex_lock(&s->s_mutex); | |
2946 | mutex_unlock(&s->s_mutex); | |
2947 | ceph_put_mds_session(s); | |
2948 | mutex_lock(&mdsc->mutex); | |
2949 | } | |
2950 | mutex_unlock(&mdsc->mutex); | |
2951 | } | |
2952 | ||
2953 | ||
2954 | ||
2955 | /* | |
2956 | * delayed work -- periodically trim expired leases, renew caps with mds | |
2957 | */ | |
2958 | static void schedule_delayed(struct ceph_mds_client *mdsc) | |
2959 | { | |
2960 | int delay = 5; | |
2961 | unsigned hz = round_jiffies_relative(HZ * delay); | |
2962 | schedule_delayed_work(&mdsc->delayed_work, hz); | |
2963 | } | |
2964 | ||
2965 | static void delayed_work(struct work_struct *work) | |
2966 | { | |
2967 | int i; | |
2968 | struct ceph_mds_client *mdsc = | |
2969 | container_of(work, struct ceph_mds_client, delayed_work.work); | |
2970 | int renew_interval; | |
2971 | int renew_caps; | |
2972 | ||
2973 | dout("mdsc delayed_work\n"); | |
afcdaea3 | 2974 | ceph_check_delayed_caps(mdsc); |
2f2dc053 SW |
2975 | |
2976 | mutex_lock(&mdsc->mutex); | |
2977 | renew_interval = mdsc->mdsmap->m_session_timeout >> 2; | |
2978 | renew_caps = time_after_eq(jiffies, HZ*renew_interval + | |
2979 | mdsc->last_renew_caps); | |
2980 | if (renew_caps) | |
2981 | mdsc->last_renew_caps = jiffies; | |
2982 | ||
2983 | for (i = 0; i < mdsc->max_sessions; i++) { | |
2984 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
2985 | if (s == NULL) | |
2986 | continue; | |
2987 | if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
2988 | dout("resending session close request for mds%d\n", | |
2989 | s->s_mds); | |
2990 | request_close_session(mdsc, s); | |
2991 | ceph_put_mds_session(s); | |
2992 | continue; | |
2993 | } | |
2994 | if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
2995 | if (s->s_state == CEPH_MDS_SESSION_OPEN) { | |
2996 | s->s_state = CEPH_MDS_SESSION_HUNG; | |
2997 | pr_info("mds%d hung\n", s->s_mds); | |
2998 | } | |
2999 | } | |
3000 | if (s->s_state < CEPH_MDS_SESSION_OPEN) { | |
3001 | /* this mds is failed or recovering, just wait */ | |
3002 | ceph_put_mds_session(s); | |
3003 | continue; | |
3004 | } | |
3005 | mutex_unlock(&mdsc->mutex); | |
3006 | ||
3007 | mutex_lock(&s->s_mutex); | |
3008 | if (renew_caps) | |
3009 | send_renew_caps(mdsc, s); | |
3010 | else | |
3011 | ceph_con_keepalive(&s->s_con); | |
ee6b272b | 3012 | ceph_add_cap_releases(mdsc, s); |
aab53dd9 SW |
3013 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
3014 | s->s_state == CEPH_MDS_SESSION_HUNG) | |
3d7ded4d | 3015 | ceph_send_cap_releases(mdsc, s); |
2f2dc053 SW |
3016 | mutex_unlock(&s->s_mutex); |
3017 | ceph_put_mds_session(s); | |
3018 | ||
3019 | mutex_lock(&mdsc->mutex); | |
3020 | } | |
3021 | mutex_unlock(&mdsc->mutex); | |
3022 | ||
3023 | schedule_delayed(mdsc); | |
3024 | } | |
3025 | ||
3d14c5d2 | 3026 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
2f2dc053 | 3027 | |
2f2dc053 | 3028 | { |
3d14c5d2 YS |
3029 | struct ceph_mds_client *mdsc; |
3030 | ||
3031 | mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); | |
3032 | if (!mdsc) | |
3033 | return -ENOMEM; | |
3034 | mdsc->fsc = fsc; | |
3035 | fsc->mdsc = mdsc; | |
2f2dc053 SW |
3036 | mutex_init(&mdsc->mutex); |
3037 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | |
2d06eeb8 CR |
3038 | if (mdsc->mdsmap == NULL) |
3039 | return -ENOMEM; | |
3040 | ||
2f2dc053 | 3041 | init_completion(&mdsc->safe_umount_waiters); |
f3c60c59 | 3042 | init_waitqueue_head(&mdsc->session_close_wq); |
2f2dc053 SW |
3043 | INIT_LIST_HEAD(&mdsc->waiting_for_map); |
3044 | mdsc->sessions = NULL; | |
3045 | mdsc->max_sessions = 0; | |
3046 | mdsc->stopping = 0; | |
3047 | init_rwsem(&mdsc->snap_rwsem); | |
a105f00c | 3048 | mdsc->snap_realms = RB_ROOT; |
2f2dc053 SW |
3049 | INIT_LIST_HEAD(&mdsc->snap_empty); |
3050 | spin_lock_init(&mdsc->snap_empty_lock); | |
3051 | mdsc->last_tid = 0; | |
44ca18f2 | 3052 | mdsc->request_tree = RB_ROOT; |
2f2dc053 SW |
3053 | INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); |
3054 | mdsc->last_renew_caps = jiffies; | |
3055 | INIT_LIST_HEAD(&mdsc->cap_delay_list); | |
3056 | spin_lock_init(&mdsc->cap_delay_lock); | |
3057 | INIT_LIST_HEAD(&mdsc->snap_flush_list); | |
3058 | spin_lock_init(&mdsc->snap_flush_lock); | |
3059 | mdsc->cap_flush_seq = 0; | |
3060 | INIT_LIST_HEAD(&mdsc->cap_dirty); | |
db354052 | 3061 | INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); |
2f2dc053 SW |
3062 | mdsc->num_cap_flushing = 0; |
3063 | spin_lock_init(&mdsc->cap_dirty_lock); | |
3064 | init_waitqueue_head(&mdsc->cap_flushing_wq); | |
3065 | spin_lock_init(&mdsc->dentry_lru_lock); | |
3066 | INIT_LIST_HEAD(&mdsc->dentry_lru); | |
2d06eeb8 | 3067 | |
37151668 | 3068 | ceph_caps_init(mdsc); |
3d14c5d2 | 3069 | ceph_adjust_min_caps(mdsc, fsc->min_caps); |
37151668 | 3070 | |
5f44f142 | 3071 | return 0; |
2f2dc053 SW |
3072 | } |
3073 | ||
3074 | /* | |
3075 | * Wait for safe replies on open mds requests. If we time out, drop | |
3076 | * all requests from the tree to avoid dangling dentry refs. | |
3077 | */ | |
3078 | static void wait_requests(struct ceph_mds_client *mdsc) | |
3079 | { | |
3080 | struct ceph_mds_request *req; | |
3d14c5d2 | 3081 | struct ceph_fs_client *fsc = mdsc->fsc; |
2f2dc053 SW |
3082 | |
3083 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3084 | if (__get_oldest_req(mdsc)) { |
2f2dc053 | 3085 | mutex_unlock(&mdsc->mutex); |
44ca18f2 | 3086 | |
2f2dc053 SW |
3087 | dout("wait_requests waiting for requests\n"); |
3088 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, | |
3d14c5d2 | 3089 | fsc->client->options->mount_timeout * HZ); |
2f2dc053 SW |
3090 | |
3091 | /* tear down remaining requests */ | |
44ca18f2 SW |
3092 | mutex_lock(&mdsc->mutex); |
3093 | while ((req = __get_oldest_req(mdsc))) { | |
2f2dc053 SW |
3094 | dout("wait_requests timed out on tid %llu\n", |
3095 | req->r_tid); | |
44ca18f2 | 3096 | __unregister_request(mdsc, req); |
2f2dc053 SW |
3097 | } |
3098 | } | |
3099 | mutex_unlock(&mdsc->mutex); | |
3100 | dout("wait_requests done\n"); | |
3101 | } | |
3102 | ||
3103 | /* | |
3104 | * called before mount is ro, and before dentries are torn down. | |
3105 | * (hmm, does this still race with new lookups?) | |
3106 | */ | |
3107 | void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |
3108 | { | |
3109 | dout("pre_umount\n"); | |
3110 | mdsc->stopping = 1; | |
3111 | ||
3112 | drop_leases(mdsc); | |
afcdaea3 | 3113 | ceph_flush_dirty_caps(mdsc); |
2f2dc053 | 3114 | wait_requests(mdsc); |
17c688c3 SW |
3115 | |
3116 | /* | |
3117 | * wait for reply handlers to drop their request refs and | |
3118 | * their inode/dcache refs | |
3119 | */ | |
3120 | ceph_msgr_flush(); | |
2f2dc053 SW |
3121 | } |
3122 | ||
3123 | /* | |
3124 | * wait for all write mds requests to flush. | |
3125 | */ | |
3126 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) | |
3127 | { | |
80fc7314 | 3128 | struct ceph_mds_request *req = NULL, *nextreq; |
44ca18f2 | 3129 | struct rb_node *n; |
2f2dc053 SW |
3130 | |
3131 | mutex_lock(&mdsc->mutex); | |
3132 | dout("wait_unsafe_requests want %lld\n", want_tid); | |
80fc7314 | 3133 | restart: |
44ca18f2 SW |
3134 | req = __get_oldest_req(mdsc); |
3135 | while (req && req->r_tid <= want_tid) { | |
80fc7314 SW |
3136 | /* find next request */ |
3137 | n = rb_next(&req->r_node); | |
3138 | if (n) | |
3139 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); | |
3140 | else | |
3141 | nextreq = NULL; | |
44ca18f2 SW |
3142 | if ((req->r_op & CEPH_MDS_OP_WRITE)) { |
3143 | /* write op */ | |
3144 | ceph_mdsc_get_request(req); | |
80fc7314 SW |
3145 | if (nextreq) |
3146 | ceph_mdsc_get_request(nextreq); | |
44ca18f2 SW |
3147 | mutex_unlock(&mdsc->mutex); |
3148 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", | |
3149 | req->r_tid, want_tid); | |
3150 | wait_for_completion(&req->r_safe_completion); | |
3151 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3152 | ceph_mdsc_put_request(req); |
80fc7314 SW |
3153 | if (!nextreq) |
3154 | break; /* next dne before, so we're done! */ | |
3155 | if (RB_EMPTY_NODE(&nextreq->r_node)) { | |
3156 | /* next request was removed from tree */ | |
3157 | ceph_mdsc_put_request(nextreq); | |
3158 | goto restart; | |
3159 | } | |
3160 | ceph_mdsc_put_request(nextreq); /* won't go away */ | |
44ca18f2 | 3161 | } |
80fc7314 | 3162 | req = nextreq; |
2f2dc053 SW |
3163 | } |
3164 | mutex_unlock(&mdsc->mutex); | |
3165 | dout("wait_unsafe_requests done\n"); | |
3166 | } | |
3167 | ||
3168 | void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |
3169 | { | |
3170 | u64 want_tid, want_flush; | |
3171 | ||
3d14c5d2 | 3172 | if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
56b7cf95 SW |
3173 | return; |
3174 | ||
2f2dc053 SW |
3175 | dout("sync\n"); |
3176 | mutex_lock(&mdsc->mutex); | |
3177 | want_tid = mdsc->last_tid; | |
3178 | want_flush = mdsc->cap_flush_seq; | |
3179 | mutex_unlock(&mdsc->mutex); | |
3180 | dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); | |
3181 | ||
afcdaea3 | 3182 | ceph_flush_dirty_caps(mdsc); |
2f2dc053 SW |
3183 | |
3184 | wait_unsafe_requests(mdsc, want_tid); | |
3185 | wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); | |
3186 | } | |
3187 | ||
f3c60c59 SW |
3188 | /* |
3189 | * true if all sessions are closed, or we force unmount | |
3190 | */ | |
7fd7d101 | 3191 | static bool done_closing_sessions(struct ceph_mds_client *mdsc) |
f3c60c59 SW |
3192 | { |
3193 | int i, n = 0; | |
3194 | ||
3d14c5d2 | 3195 | if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) |
f3c60c59 SW |
3196 | return true; |
3197 | ||
3198 | mutex_lock(&mdsc->mutex); | |
3199 | for (i = 0; i < mdsc->max_sessions; i++) | |
3200 | if (mdsc->sessions[i]) | |
3201 | n++; | |
3202 | mutex_unlock(&mdsc->mutex); | |
3203 | return n == 0; | |
3204 | } | |
2f2dc053 SW |
3205 | |
3206 | /* | |
3207 | * called after sb is ro. | |
3208 | */ | |
3209 | void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |
3210 | { | |
3211 | struct ceph_mds_session *session; | |
3212 | int i; | |
3d14c5d2 YS |
3213 | struct ceph_fs_client *fsc = mdsc->fsc; |
3214 | unsigned long timeout = fsc->client->options->mount_timeout * HZ; | |
2f2dc053 SW |
3215 | |
3216 | dout("close_sessions\n"); | |
3217 | ||
2f2dc053 | 3218 | /* close sessions */ |
f3c60c59 SW |
3219 | mutex_lock(&mdsc->mutex); |
3220 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3221 | session = __ceph_lookup_mds_session(mdsc, i); | |
3222 | if (!session) | |
3223 | continue; | |
2f2dc053 | 3224 | mutex_unlock(&mdsc->mutex); |
f3c60c59 SW |
3225 | mutex_lock(&session->s_mutex); |
3226 | __close_session(mdsc, session); | |
3227 | mutex_unlock(&session->s_mutex); | |
3228 | ceph_put_mds_session(session); | |
2f2dc053 SW |
3229 | mutex_lock(&mdsc->mutex); |
3230 | } | |
f3c60c59 SW |
3231 | mutex_unlock(&mdsc->mutex); |
3232 | ||
3233 | dout("waiting for sessions to close\n"); | |
3234 | wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), | |
3235 | timeout); | |
2f2dc053 SW |
3236 | |
3237 | /* tear down remaining sessions */ | |
f3c60c59 | 3238 | mutex_lock(&mdsc->mutex); |
2f2dc053 SW |
3239 | for (i = 0; i < mdsc->max_sessions; i++) { |
3240 | if (mdsc->sessions[i]) { | |
3241 | session = get_session(mdsc->sessions[i]); | |
2600d2dd | 3242 | __unregister_session(mdsc, session); |
2f2dc053 SW |
3243 | mutex_unlock(&mdsc->mutex); |
3244 | mutex_lock(&session->s_mutex); | |
3245 | remove_session_caps(session); | |
3246 | mutex_unlock(&session->s_mutex); | |
3247 | ceph_put_mds_session(session); | |
3248 | mutex_lock(&mdsc->mutex); | |
3249 | } | |
3250 | } | |
2f2dc053 | 3251 | WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
2f2dc053 SW |
3252 | mutex_unlock(&mdsc->mutex); |
3253 | ||
3254 | ceph_cleanup_empty_realms(mdsc); | |
3255 | ||
3256 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3257 | ||
3258 | dout("stopped\n"); | |
3259 | } | |
3260 | ||
3d14c5d2 | 3261 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
2f2dc053 SW |
3262 | { |
3263 | dout("stop\n"); | |
3264 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3265 | if (mdsc->mdsmap) | |
3266 | ceph_mdsmap_destroy(mdsc->mdsmap); | |
3267 | kfree(mdsc->sessions); | |
37151668 | 3268 | ceph_caps_finalize(mdsc); |
2f2dc053 SW |
3269 | } |
3270 | ||
3d14c5d2 YS |
3271 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) |
3272 | { | |
3273 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
3274 | ||
ef550f6f | 3275 | dout("mdsc_destroy %p\n", mdsc); |
3d14c5d2 | 3276 | ceph_mdsc_stop(mdsc); |
ef550f6f SW |
3277 | |
3278 | /* flush out any connection work with references to us */ | |
3279 | ceph_msgr_flush(); | |
3280 | ||
3d14c5d2 YS |
3281 | fsc->mdsc = NULL; |
3282 | kfree(mdsc); | |
ef550f6f | 3283 | dout("mdsc_destroy %p done\n", mdsc); |
3d14c5d2 YS |
3284 | } |
3285 | ||
2f2dc053 SW |
3286 | |
3287 | /* | |
3288 | * handle mds map update. | |
3289 | */ | |
3290 | void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) | |
3291 | { | |
3292 | u32 epoch; | |
3293 | u32 maplen; | |
3294 | void *p = msg->front.iov_base; | |
3295 | void *end = p + msg->front.iov_len; | |
3296 | struct ceph_mdsmap *newmap, *oldmap; | |
3297 | struct ceph_fsid fsid; | |
3298 | int err = -EINVAL; | |
3299 | ||
3300 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); | |
3301 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
3d14c5d2 | 3302 | if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
0743304d | 3303 | return; |
c89136ea SW |
3304 | epoch = ceph_decode_32(&p); |
3305 | maplen = ceph_decode_32(&p); | |
2f2dc053 SW |
3306 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); |
3307 | ||
3308 | /* do we need it? */ | |
3d14c5d2 | 3309 | ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); |
2f2dc053 SW |
3310 | mutex_lock(&mdsc->mutex); |
3311 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { | |
3312 | dout("handle_map epoch %u <= our %u\n", | |
3313 | epoch, mdsc->mdsmap->m_epoch); | |
3314 | mutex_unlock(&mdsc->mutex); | |
3315 | return; | |
3316 | } | |
3317 | ||
3318 | newmap = ceph_mdsmap_decode(&p, end); | |
3319 | if (IS_ERR(newmap)) { | |
3320 | err = PTR_ERR(newmap); | |
3321 | goto bad_unlock; | |
3322 | } | |
3323 | ||
3324 | /* swap into place */ | |
3325 | if (mdsc->mdsmap) { | |
3326 | oldmap = mdsc->mdsmap; | |
3327 | mdsc->mdsmap = newmap; | |
3328 | check_new_map(mdsc, newmap, oldmap); | |
3329 | ceph_mdsmap_destroy(oldmap); | |
3330 | } else { | |
3331 | mdsc->mdsmap = newmap; /* first mds map */ | |
3332 | } | |
3d14c5d2 | 3333 | mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
2f2dc053 SW |
3334 | |
3335 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3336 | ||
3337 | mutex_unlock(&mdsc->mutex); | |
3338 | schedule_delayed(mdsc); | |
3339 | return; | |
3340 | ||
3341 | bad_unlock: | |
3342 | mutex_unlock(&mdsc->mutex); | |
3343 | bad: | |
3344 | pr_err("error decoding mdsmap %d\n", err); | |
3345 | return; | |
3346 | } | |
3347 | ||
3348 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
3349 | { | |
3350 | struct ceph_mds_session *s = con->private; | |
3351 | ||
3352 | if (get_session(s)) { | |
2600d2dd | 3353 | dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref)); |
2f2dc053 SW |
3354 | return con; |
3355 | } | |
3356 | dout("mdsc con_get %p FAIL\n", s); | |
3357 | return NULL; | |
3358 | } | |
3359 | ||
3360 | static void con_put(struct ceph_connection *con) | |
3361 | { | |
3362 | struct ceph_mds_session *s = con->private; | |
3363 | ||
7d8e18a6 | 3364 | dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); |
2f2dc053 SW |
3365 | ceph_put_mds_session(s); |
3366 | } | |
3367 | ||
3368 | /* | |
3369 | * if the client is unresponsive for long enough, the mds will kill | |
3370 | * the session entirely. | |
3371 | */ | |
3372 | static void peer_reset(struct ceph_connection *con) | |
3373 | { | |
3374 | struct ceph_mds_session *s = con->private; | |
7e70f0ed | 3375 | struct ceph_mds_client *mdsc = s->s_mdsc; |
2f2dc053 | 3376 | |
7e70f0ed SW |
3377 | pr_warning("mds%d closed our session\n", s->s_mds); |
3378 | send_mds_reconnect(mdsc, s); | |
2f2dc053 SW |
3379 | } |
3380 | ||
3381 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
3382 | { | |
3383 | struct ceph_mds_session *s = con->private; | |
3384 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3385 | int type = le16_to_cpu(msg->hdr.type); | |
3386 | ||
2600d2dd SW |
3387 | mutex_lock(&mdsc->mutex); |
3388 | if (__verify_registered_session(mdsc, s) < 0) { | |
3389 | mutex_unlock(&mdsc->mutex); | |
3390 | goto out; | |
3391 | } | |
3392 | mutex_unlock(&mdsc->mutex); | |
3393 | ||
2f2dc053 SW |
3394 | switch (type) { |
3395 | case CEPH_MSG_MDS_MAP: | |
3396 | ceph_mdsc_handle_map(mdsc, msg); | |
3397 | break; | |
3398 | case CEPH_MSG_CLIENT_SESSION: | |
3399 | handle_session(s, msg); | |
3400 | break; | |
3401 | case CEPH_MSG_CLIENT_REPLY: | |
3402 | handle_reply(s, msg); | |
3403 | break; | |
3404 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: | |
2600d2dd | 3405 | handle_forward(mdsc, s, msg); |
2f2dc053 SW |
3406 | break; |
3407 | case CEPH_MSG_CLIENT_CAPS: | |
3408 | ceph_handle_caps(s, msg); | |
3409 | break; | |
3410 | case CEPH_MSG_CLIENT_SNAP: | |
2600d2dd | 3411 | ceph_handle_snap(mdsc, s, msg); |
2f2dc053 SW |
3412 | break; |
3413 | case CEPH_MSG_CLIENT_LEASE: | |
2600d2dd | 3414 | handle_lease(mdsc, s, msg); |
2f2dc053 SW |
3415 | break; |
3416 | ||
3417 | default: | |
3418 | pr_err("received unknown message type %d %s\n", type, | |
3419 | ceph_msg_type_name(type)); | |
3420 | } | |
2600d2dd | 3421 | out: |
2f2dc053 SW |
3422 | ceph_msg_put(msg); |
3423 | } | |
3424 | ||
4e7a5dcd SW |
3425 | /* |
3426 | * authentication | |
3427 | */ | |
a3530df3 AE |
3428 | |
3429 | /* | |
3430 | * Note: returned pointer is the address of a structure that's | |
3431 | * managed separately. Caller must *not* attempt to free it. | |
3432 | */ | |
3433 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 3434 | int *proto, int force_new) |
4e7a5dcd SW |
3435 | { |
3436 | struct ceph_mds_session *s = con->private; | |
3437 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3438 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
74f1869f | 3439 | struct ceph_auth_handshake *auth = &s->s_auth; |
4e7a5dcd | 3440 | |
74f1869f | 3441 | if (force_new && auth->authorizer) { |
27859f97 | 3442 | ceph_auth_destroy_authorizer(ac, auth->authorizer); |
74f1869f | 3443 | auth->authorizer = NULL; |
4e7a5dcd | 3444 | } |
27859f97 SW |
3445 | if (!auth->authorizer) { |
3446 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
3447 | auth); | |
0bed9b5c SW |
3448 | if (ret) |
3449 | return ERR_PTR(ret); | |
27859f97 SW |
3450 | } else { |
3451 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
3452 | auth); | |
a255651d | 3453 | if (ret) |
a3530df3 | 3454 | return ERR_PTR(ret); |
4e7a5dcd | 3455 | } |
4e7a5dcd | 3456 | *proto = ac->protocol; |
74f1869f | 3457 | |
a3530df3 | 3458 | return auth; |
4e7a5dcd SW |
3459 | } |
3460 | ||
3461 | ||
3462 | static int verify_authorizer_reply(struct ceph_connection *con, int len) | |
3463 | { | |
3464 | struct ceph_mds_session *s = con->private; | |
3465 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3466 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd | 3467 | |
27859f97 | 3468 | return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len); |
4e7a5dcd SW |
3469 | } |
3470 | ||
9bd2e6f8 SW |
3471 | static int invalidate_authorizer(struct ceph_connection *con) |
3472 | { | |
3473 | struct ceph_mds_session *s = con->private; | |
3474 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3475 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
9bd2e6f8 | 3476 | |
27859f97 | 3477 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
9bd2e6f8 | 3478 | |
3d14c5d2 | 3479 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
9bd2e6f8 SW |
3480 | } |
3481 | ||
53ded495 AE |
3482 | static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, |
3483 | struct ceph_msg_header *hdr, int *skip) | |
3484 | { | |
3485 | struct ceph_msg *msg; | |
3486 | int type = (int) le16_to_cpu(hdr->type); | |
3487 | int front_len = (int) le32_to_cpu(hdr->front_len); | |
3488 | ||
3489 | if (con->in_msg) | |
3490 | return con->in_msg; | |
3491 | ||
3492 | *skip = 0; | |
3493 | msg = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
3494 | if (!msg) { | |
3495 | pr_err("unable to allocate msg type %d len %d\n", | |
3496 | type, front_len); | |
3497 | return NULL; | |
3498 | } | |
53ded495 AE |
3499 | |
3500 | return msg; | |
3501 | } | |
3502 | ||
9e32789f | 3503 | static const struct ceph_connection_operations mds_con_ops = { |
2f2dc053 SW |
3504 | .get = con_get, |
3505 | .put = con_put, | |
3506 | .dispatch = dispatch, | |
4e7a5dcd SW |
3507 | .get_authorizer = get_authorizer, |
3508 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 3509 | .invalidate_authorizer = invalidate_authorizer, |
2f2dc053 | 3510 | .peer_reset = peer_reset, |
53ded495 | 3511 | .alloc_msg = mds_alloc_msg, |
2f2dc053 SW |
3512 | }; |
3513 | ||
2f2dc053 | 3514 | /* eof */ |