]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
2f2dc053 | 2 | |
496e5955 | 3 | #include <linux/fs.h> |
2f2dc053 | 4 | #include <linux/wait.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
54008399 | 6 | #include <linux/gfp.h> |
2f2dc053 | 7 | #include <linux/sched.h> |
3d14c5d2 YS |
8 | #include <linux/debugfs.h> |
9 | #include <linux/seq_file.h> | |
dbd0c8bf | 10 | #include <linux/utsname.h> |
3e0708b9 | 11 | #include <linux/ratelimit.h> |
2f2dc053 | 12 | |
2f2dc053 | 13 | #include "super.h" |
3d14c5d2 YS |
14 | #include "mds_client.h" |
15 | ||
1fe60e51 | 16 | #include <linux/ceph/ceph_features.h> |
3d14c5d2 YS |
17 | #include <linux/ceph/messenger.h> |
18 | #include <linux/ceph/decode.h> | |
19 | #include <linux/ceph/pagelist.h> | |
20 | #include <linux/ceph/auth.h> | |
21 | #include <linux/ceph/debugfs.h> | |
2f2dc053 SW |
22 | |
23 | /* | |
24 | * A cluster of MDS (metadata server) daemons is responsible for | |
25 | * managing the file system namespace (the directory hierarchy and | |
26 | * inodes) and for coordinating shared access to storage. Metadata is | |
27 | * partitioning hierarchically across a number of servers, and that | |
28 | * partition varies over time as the cluster adjusts the distribution | |
29 | * in order to balance load. | |
30 | * | |
31 | * The MDS client is primarily responsible to managing synchronous | |
32 | * metadata requests for operations like open, unlink, and so forth. | |
33 | * If there is a MDS failure, we find out about it when we (possibly | |
34 | * request and) receive a new MDS map, and can resubmit affected | |
35 | * requests. | |
36 | * | |
37 | * For the most part, though, we take advantage of a lossless | |
38 | * communications channel to the MDS, and do not need to worry about | |
39 | * timing out or resubmitting requests. | |
40 | * | |
41 | * We maintain a stateful "session" with each MDS we interact with. | |
42 | * Within each session, we sent periodic heartbeat messages to ensure | |
43 | * any capabilities or leases we have been issues remain valid. If | |
44 | * the session times out and goes stale, our leases and capabilities | |
45 | * are no longer valid. | |
46 | */ | |
47 | ||
20cb34ae | 48 | struct ceph_reconnect_state { |
44c99757 | 49 | int nr_caps; |
20cb34ae | 50 | struct ceph_pagelist *pagelist; |
121f22a1 | 51 | unsigned msg_version; |
20cb34ae SW |
52 | }; |
53 | ||
2f2dc053 SW |
54 | static void __wake_requests(struct ceph_mds_client *mdsc, |
55 | struct list_head *head); | |
56 | ||
9e32789f | 57 | static const struct ceph_connection_operations mds_con_ops; |
2f2dc053 SW |
58 | |
59 | ||
60 | /* | |
61 | * mds reply parsing | |
62 | */ | |
63 | ||
64 | /* | |
65 | * parse individual inode info | |
66 | */ | |
67 | static int parse_reply_info_in(void **p, void *end, | |
14303d20 | 68 | struct ceph_mds_reply_info_in *info, |
12b4629a | 69 | u64 features) |
2f2dc053 SW |
70 | { |
71 | int err = -EIO; | |
72 | ||
73 | info->in = *p; | |
74 | *p += sizeof(struct ceph_mds_reply_inode) + | |
75 | sizeof(*info->in->fragtree.splits) * | |
76 | le32_to_cpu(info->in->fragtree.nsplits); | |
77 | ||
78 | ceph_decode_32_safe(p, end, info->symlink_len, bad); | |
79 | ceph_decode_need(p, end, info->symlink_len, bad); | |
80 | info->symlink = *p; | |
81 | *p += info->symlink_len; | |
82 | ||
14303d20 SW |
83 | if (features & CEPH_FEATURE_DIRLAYOUTHASH) |
84 | ceph_decode_copy_safe(p, end, &info->dir_layout, | |
85 | sizeof(info->dir_layout), bad); | |
86 | else | |
87 | memset(&info->dir_layout, 0, sizeof(info->dir_layout)); | |
88 | ||
2f2dc053 SW |
89 | ceph_decode_32_safe(p, end, info->xattr_len, bad); |
90 | ceph_decode_need(p, end, info->xattr_len, bad); | |
91 | info->xattr_data = *p; | |
92 | *p += info->xattr_len; | |
fb01d1f8 YZ |
93 | |
94 | if (features & CEPH_FEATURE_MDS_INLINE_DATA) { | |
95 | ceph_decode_64_safe(p, end, info->inline_version, bad); | |
96 | ceph_decode_32_safe(p, end, info->inline_len, bad); | |
97 | ceph_decode_need(p, end, info->inline_len, bad); | |
98 | info->inline_data = *p; | |
99 | *p += info->inline_len; | |
100 | } else | |
101 | info->inline_version = CEPH_INLINE_NONE; | |
102 | ||
779fe0fb YZ |
103 | info->pool_ns_len = 0; |
104 | info->pool_ns_data = NULL; | |
5ea5c5e0 YZ |
105 | if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) { |
106 | ceph_decode_32_safe(p, end, info->pool_ns_len, bad); | |
779fe0fb YZ |
107 | if (info->pool_ns_len > 0) { |
108 | ceph_decode_need(p, end, info->pool_ns_len, bad); | |
109 | info->pool_ns_data = *p; | |
110 | *p += info->pool_ns_len; | |
111 | } | |
5ea5c5e0 YZ |
112 | } |
113 | ||
2f2dc053 SW |
114 | return 0; |
115 | bad: | |
116 | return err; | |
117 | } | |
118 | ||
119 | /* | |
120 | * parse a normal reply, which may contain a (dir+)dentry and/or a | |
121 | * target inode. | |
122 | */ | |
123 | static int parse_reply_info_trace(void **p, void *end, | |
14303d20 | 124 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 125 | u64 features) |
2f2dc053 SW |
126 | { |
127 | int err; | |
128 | ||
129 | if (info->head->is_dentry) { | |
14303d20 | 130 | err = parse_reply_info_in(p, end, &info->diri, features); |
2f2dc053 SW |
131 | if (err < 0) |
132 | goto out_bad; | |
133 | ||
134 | if (unlikely(*p + sizeof(*info->dirfrag) > end)) | |
135 | goto bad; | |
136 | info->dirfrag = *p; | |
137 | *p += sizeof(*info->dirfrag) + | |
138 | sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); | |
139 | if (unlikely(*p > end)) | |
140 | goto bad; | |
141 | ||
142 | ceph_decode_32_safe(p, end, info->dname_len, bad); | |
143 | ceph_decode_need(p, end, info->dname_len, bad); | |
144 | info->dname = *p; | |
145 | *p += info->dname_len; | |
146 | info->dlease = *p; | |
147 | *p += sizeof(*info->dlease); | |
148 | } | |
149 | ||
150 | if (info->head->is_target) { | |
14303d20 | 151 | err = parse_reply_info_in(p, end, &info->targeti, features); |
2f2dc053 SW |
152 | if (err < 0) |
153 | goto out_bad; | |
154 | } | |
155 | ||
156 | if (unlikely(*p != end)) | |
157 | goto bad; | |
158 | return 0; | |
159 | ||
160 | bad: | |
161 | err = -EIO; | |
162 | out_bad: | |
163 | pr_err("problem parsing mds trace %d\n", err); | |
164 | return err; | |
165 | } | |
166 | ||
167 | /* | |
168 | * parse readdir results | |
169 | */ | |
170 | static int parse_reply_info_dir(void **p, void *end, | |
14303d20 | 171 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 172 | u64 features) |
2f2dc053 SW |
173 | { |
174 | u32 num, i = 0; | |
175 | int err; | |
176 | ||
177 | info->dir_dir = *p; | |
178 | if (*p + sizeof(*info->dir_dir) > end) | |
179 | goto bad; | |
180 | *p += sizeof(*info->dir_dir) + | |
181 | sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); | |
182 | if (*p > end) | |
183 | goto bad; | |
184 | ||
185 | ceph_decode_need(p, end, sizeof(num) + 2, bad); | |
c89136ea | 186 | num = ceph_decode_32(p); |
956d39d6 YZ |
187 | { |
188 | u16 flags = ceph_decode_16(p); | |
189 | info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); | |
190 | info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); | |
f3c4ebe6 | 191 | info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); |
79162547 | 192 | info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH); |
956d39d6 | 193 | } |
2f2dc053 SW |
194 | if (num == 0) |
195 | goto done; | |
196 | ||
2a5beea3 YZ |
197 | BUG_ON(!info->dir_entries); |
198 | if ((unsigned long)(info->dir_entries + num) > | |
199 | (unsigned long)info->dir_entries + info->dir_buf_size) { | |
54008399 YZ |
200 | pr_err("dir contents are larger than expected\n"); |
201 | WARN_ON(1); | |
202 | goto bad; | |
203 | } | |
2f2dc053 | 204 | |
54008399 | 205 | info->dir_nr = num; |
2f2dc053 | 206 | while (num) { |
2a5beea3 | 207 | struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; |
2f2dc053 SW |
208 | /* dentry */ |
209 | ceph_decode_need(p, end, sizeof(u32)*2, bad); | |
2a5beea3 YZ |
210 | rde->name_len = ceph_decode_32(p); |
211 | ceph_decode_need(p, end, rde->name_len, bad); | |
212 | rde->name = *p; | |
213 | *p += rde->name_len; | |
214 | dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name); | |
215 | rde->lease = *p; | |
2f2dc053 SW |
216 | *p += sizeof(struct ceph_mds_reply_lease); |
217 | ||
218 | /* inode */ | |
2a5beea3 | 219 | err = parse_reply_info_in(p, end, &rde->inode, features); |
2f2dc053 SW |
220 | if (err < 0) |
221 | goto out_bad; | |
8974eebd YZ |
222 | /* ceph_readdir_prepopulate() will update it */ |
223 | rde->offset = 0; | |
2f2dc053 SW |
224 | i++; |
225 | num--; | |
226 | } | |
227 | ||
228 | done: | |
229 | if (*p != end) | |
230 | goto bad; | |
231 | return 0; | |
232 | ||
233 | bad: | |
234 | err = -EIO; | |
235 | out_bad: | |
236 | pr_err("problem parsing dir contents %d\n", err); | |
237 | return err; | |
238 | } | |
239 | ||
25933abd HS |
240 | /* |
241 | * parse fcntl F_GETLK results | |
242 | */ | |
243 | static int parse_reply_info_filelock(void **p, void *end, | |
14303d20 | 244 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 245 | u64 features) |
25933abd HS |
246 | { |
247 | if (*p + sizeof(*info->filelock_reply) > end) | |
248 | goto bad; | |
249 | ||
250 | info->filelock_reply = *p; | |
251 | *p += sizeof(*info->filelock_reply); | |
252 | ||
253 | if (unlikely(*p != end)) | |
254 | goto bad; | |
255 | return 0; | |
256 | ||
257 | bad: | |
258 | return -EIO; | |
259 | } | |
260 | ||
6e8575fa SL |
261 | /* |
262 | * parse create results | |
263 | */ | |
264 | static int parse_reply_info_create(void **p, void *end, | |
265 | struct ceph_mds_reply_info_parsed *info, | |
12b4629a | 266 | u64 features) |
6e8575fa SL |
267 | { |
268 | if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { | |
269 | if (*p == end) { | |
270 | info->has_create_ino = false; | |
271 | } else { | |
272 | info->has_create_ino = true; | |
273 | info->ino = ceph_decode_64(p); | |
274 | } | |
275 | } | |
276 | ||
277 | if (unlikely(*p != end)) | |
278 | goto bad; | |
279 | return 0; | |
280 | ||
281 | bad: | |
282 | return -EIO; | |
283 | } | |
284 | ||
25933abd HS |
285 | /* |
286 | * parse extra results | |
287 | */ | |
288 | static int parse_reply_info_extra(void **p, void *end, | |
14303d20 | 289 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 290 | u64 features) |
25933abd | 291 | { |
6df8c9d8 JL |
292 | u32 op = le32_to_cpu(info->head->op); |
293 | ||
294 | if (op == CEPH_MDS_OP_GETFILELOCK) | |
14303d20 | 295 | return parse_reply_info_filelock(p, end, info, features); |
6df8c9d8 | 296 | else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) |
14303d20 | 297 | return parse_reply_info_dir(p, end, info, features); |
6df8c9d8 | 298 | else if (op == CEPH_MDS_OP_CREATE) |
6e8575fa SL |
299 | return parse_reply_info_create(p, end, info, features); |
300 | else | |
301 | return -EIO; | |
25933abd HS |
302 | } |
303 | ||
2f2dc053 SW |
304 | /* |
305 | * parse entire mds reply | |
306 | */ | |
307 | static int parse_reply_info(struct ceph_msg *msg, | |
14303d20 | 308 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 309 | u64 features) |
2f2dc053 SW |
310 | { |
311 | void *p, *end; | |
312 | u32 len; | |
313 | int err; | |
314 | ||
315 | info->head = msg->front.iov_base; | |
316 | p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); | |
317 | end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); | |
318 | ||
319 | /* trace */ | |
320 | ceph_decode_32_safe(&p, end, len, bad); | |
321 | if (len > 0) { | |
32852a81 | 322 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 323 | err = parse_reply_info_trace(&p, p+len, info, features); |
2f2dc053 SW |
324 | if (err < 0) |
325 | goto out_bad; | |
326 | } | |
327 | ||
25933abd | 328 | /* extra */ |
2f2dc053 SW |
329 | ceph_decode_32_safe(&p, end, len, bad); |
330 | if (len > 0) { | |
32852a81 | 331 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 332 | err = parse_reply_info_extra(&p, p+len, info, features); |
2f2dc053 SW |
333 | if (err < 0) |
334 | goto out_bad; | |
335 | } | |
336 | ||
337 | /* snap blob */ | |
338 | ceph_decode_32_safe(&p, end, len, bad); | |
339 | info->snapblob_len = len; | |
340 | info->snapblob = p; | |
341 | p += len; | |
342 | ||
343 | if (p != end) | |
344 | goto bad; | |
345 | return 0; | |
346 | ||
347 | bad: | |
348 | err = -EIO; | |
349 | out_bad: | |
350 | pr_err("mds parse_reply err %d\n", err); | |
351 | return err; | |
352 | } | |
353 | ||
354 | static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) | |
355 | { | |
2a5beea3 | 356 | if (!info->dir_entries) |
54008399 | 357 | return; |
2a5beea3 | 358 | free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size)); |
2f2dc053 SW |
359 | } |
360 | ||
361 | ||
362 | /* | |
363 | * sessions | |
364 | */ | |
a687ecaf | 365 | const char *ceph_session_state_name(int s) |
2f2dc053 SW |
366 | { |
367 | switch (s) { | |
368 | case CEPH_MDS_SESSION_NEW: return "new"; | |
369 | case CEPH_MDS_SESSION_OPENING: return "opening"; | |
370 | case CEPH_MDS_SESSION_OPEN: return "open"; | |
371 | case CEPH_MDS_SESSION_HUNG: return "hung"; | |
372 | case CEPH_MDS_SESSION_CLOSING: return "closing"; | |
44ca18f2 | 373 | case CEPH_MDS_SESSION_RESTARTING: return "restarting"; |
2f2dc053 | 374 | case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; |
fcff415c | 375 | case CEPH_MDS_SESSION_REJECTED: return "rejected"; |
2f2dc053 SW |
376 | default: return "???"; |
377 | } | |
378 | } | |
379 | ||
380 | static struct ceph_mds_session *get_session(struct ceph_mds_session *s) | |
381 | { | |
3997c01d | 382 | if (refcount_inc_not_zero(&s->s_ref)) { |
2f2dc053 | 383 | dout("mdsc get_session %p %d -> %d\n", s, |
3997c01d | 384 | refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref)); |
2f2dc053 SW |
385 | return s; |
386 | } else { | |
387 | dout("mdsc get_session %p 0 -- FAIL", s); | |
388 | return NULL; | |
389 | } | |
390 | } | |
391 | ||
392 | void ceph_put_mds_session(struct ceph_mds_session *s) | |
393 | { | |
394 | dout("mdsc put_session %p %d -> %d\n", s, | |
3997c01d ER |
395 | refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1); |
396 | if (refcount_dec_and_test(&s->s_ref)) { | |
6c4a1915 | 397 | if (s->s_auth.authorizer) |
6c1ea260 | 398 | ceph_auth_destroy_authorizer(s->s_auth.authorizer); |
2f2dc053 | 399 | kfree(s); |
4e7a5dcd | 400 | } |
2f2dc053 SW |
401 | } |
402 | ||
403 | /* | |
404 | * called under mdsc->mutex | |
405 | */ | |
406 | struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, | |
407 | int mds) | |
408 | { | |
409 | struct ceph_mds_session *session; | |
410 | ||
411 | if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) | |
412 | return NULL; | |
413 | session = mdsc->sessions[mds]; | |
414 | dout("lookup_mds_session %p %d\n", session, | |
3997c01d | 415 | refcount_read(&session->s_ref)); |
2f2dc053 SW |
416 | get_session(session); |
417 | return session; | |
418 | } | |
419 | ||
420 | static bool __have_session(struct ceph_mds_client *mdsc, int mds) | |
421 | { | |
422 | if (mds >= mdsc->max_sessions) | |
423 | return false; | |
424 | return mdsc->sessions[mds]; | |
425 | } | |
426 | ||
2600d2dd SW |
427 | static int __verify_registered_session(struct ceph_mds_client *mdsc, |
428 | struct ceph_mds_session *s) | |
429 | { | |
430 | if (s->s_mds >= mdsc->max_sessions || | |
431 | mdsc->sessions[s->s_mds] != s) | |
432 | return -ENOENT; | |
433 | return 0; | |
434 | } | |
435 | ||
2f2dc053 SW |
436 | /* |
437 | * create+register a new session for given mds. | |
438 | * called under mdsc->mutex. | |
439 | */ | |
440 | static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |
441 | int mds) | |
442 | { | |
443 | struct ceph_mds_session *s; | |
444 | ||
76201b63 | 445 | if (mds >= mdsc->mdsmap->m_num_mds) |
c338c07c NY |
446 | return ERR_PTR(-EINVAL); |
447 | ||
2f2dc053 | 448 | s = kzalloc(sizeof(*s), GFP_NOFS); |
4736b009 DC |
449 | if (!s) |
450 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
451 | s->s_mdsc = mdsc; |
452 | s->s_mds = mds; | |
453 | s->s_state = CEPH_MDS_SESSION_NEW; | |
454 | s->s_ttl = 0; | |
455 | s->s_seq = 0; | |
456 | mutex_init(&s->s_mutex); | |
457 | ||
b7a9e5dd | 458 | ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); |
2f2dc053 | 459 | |
d8fb02ab | 460 | spin_lock_init(&s->s_gen_ttl_lock); |
2f2dc053 | 461 | s->s_cap_gen = 0; |
1ce208a6 | 462 | s->s_cap_ttl = jiffies - 1; |
d8fb02ab AE |
463 | |
464 | spin_lock_init(&s->s_cap_lock); | |
2f2dc053 SW |
465 | s->s_renew_requested = 0; |
466 | s->s_renew_seq = 0; | |
467 | INIT_LIST_HEAD(&s->s_caps); | |
468 | s->s_nr_caps = 0; | |
5dacf091 | 469 | s->s_trim_caps = 0; |
3997c01d | 470 | refcount_set(&s->s_ref, 1); |
2f2dc053 SW |
471 | INIT_LIST_HEAD(&s->s_waiting); |
472 | INIT_LIST_HEAD(&s->s_unsafe); | |
473 | s->s_num_cap_releases = 0; | |
99a9c273 | 474 | s->s_cap_reconnect = 0; |
7c1332b8 | 475 | s->s_cap_iterator = NULL; |
2f2dc053 | 476 | INIT_LIST_HEAD(&s->s_cap_releases); |
2f2dc053 | 477 | INIT_LIST_HEAD(&s->s_cap_flushing); |
2f2dc053 SW |
478 | |
479 | dout("register_session mds%d\n", mds); | |
480 | if (mds >= mdsc->max_sessions) { | |
481 | int newmax = 1 << get_count_order(mds+1); | |
482 | struct ceph_mds_session **sa; | |
483 | ||
484 | dout("register_session realloc to %d\n", newmax); | |
485 | sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); | |
486 | if (sa == NULL) | |
42ce56e5 | 487 | goto fail_realloc; |
2f2dc053 SW |
488 | if (mdsc->sessions) { |
489 | memcpy(sa, mdsc->sessions, | |
490 | mdsc->max_sessions * sizeof(void *)); | |
491 | kfree(mdsc->sessions); | |
492 | } | |
493 | mdsc->sessions = sa; | |
494 | mdsc->max_sessions = newmax; | |
495 | } | |
496 | mdsc->sessions[mds] = s; | |
86d8f67b | 497 | atomic_inc(&mdsc->num_sessions); |
3997c01d | 498 | refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */ |
42ce56e5 | 499 | |
b7a9e5dd SW |
500 | ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, |
501 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
42ce56e5 | 502 | |
2f2dc053 | 503 | return s; |
42ce56e5 SW |
504 | |
505 | fail_realloc: | |
506 | kfree(s); | |
507 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
508 | } |
509 | ||
510 | /* | |
511 | * called under mdsc->mutex | |
512 | */ | |
2600d2dd | 513 | static void __unregister_session(struct ceph_mds_client *mdsc, |
42ce56e5 | 514 | struct ceph_mds_session *s) |
2f2dc053 | 515 | { |
2600d2dd SW |
516 | dout("__unregister_session mds%d %p\n", s->s_mds, s); |
517 | BUG_ON(mdsc->sessions[s->s_mds] != s); | |
42ce56e5 SW |
518 | mdsc->sessions[s->s_mds] = NULL; |
519 | ceph_con_close(&s->s_con); | |
520 | ceph_put_mds_session(s); | |
86d8f67b | 521 | atomic_dec(&mdsc->num_sessions); |
2f2dc053 SW |
522 | } |
523 | ||
524 | /* | |
525 | * drop session refs in request. | |
526 | * | |
527 | * should be last request ref, or hold mdsc->mutex | |
528 | */ | |
529 | static void put_request_session(struct ceph_mds_request *req) | |
530 | { | |
531 | if (req->r_session) { | |
532 | ceph_put_mds_session(req->r_session); | |
533 | req->r_session = NULL; | |
534 | } | |
535 | } | |
536 | ||
153c8e6b | 537 | void ceph_mdsc_release_request(struct kref *kref) |
2f2dc053 | 538 | { |
153c8e6b SW |
539 | struct ceph_mds_request *req = container_of(kref, |
540 | struct ceph_mds_request, | |
541 | r_kref); | |
54008399 | 542 | destroy_reply_info(&req->r_reply_info); |
153c8e6b SW |
543 | if (req->r_request) |
544 | ceph_msg_put(req->r_request); | |
54008399 | 545 | if (req->r_reply) |
153c8e6b | 546 | ceph_msg_put(req->r_reply); |
153c8e6b | 547 | if (req->r_inode) { |
41b02e1f | 548 | ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); |
153c8e6b SW |
549 | iput(req->r_inode); |
550 | } | |
3dd69aab JL |
551 | if (req->r_parent) |
552 | ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); | |
e96a650a | 553 | iput(req->r_target_inode); |
153c8e6b SW |
554 | if (req->r_dentry) |
555 | dput(req->r_dentry); | |
844d87c3 SW |
556 | if (req->r_old_dentry) |
557 | dput(req->r_old_dentry); | |
558 | if (req->r_old_dentry_dir) { | |
41b02e1f SW |
559 | /* |
560 | * track (and drop pins for) r_old_dentry_dir | |
561 | * separately, since r_old_dentry's d_parent may have | |
562 | * changed between the dir mutex being dropped and | |
563 | * this request being freed. | |
564 | */ | |
565 | ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), | |
566 | CEPH_CAP_PIN); | |
41b02e1f | 567 | iput(req->r_old_dentry_dir); |
2f2dc053 | 568 | } |
153c8e6b SW |
569 | kfree(req->r_path1); |
570 | kfree(req->r_path2); | |
25e6bae3 YZ |
571 | if (req->r_pagelist) |
572 | ceph_pagelist_release(req->r_pagelist); | |
153c8e6b | 573 | put_request_session(req); |
37151668 | 574 | ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
153c8e6b | 575 | kfree(req); |
2f2dc053 SW |
576 | } |
577 | ||
fcd00b68 ID |
578 | DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node) |
579 | ||
2f2dc053 SW |
580 | /* |
581 | * lookup session, bump ref if found. | |
582 | * | |
583 | * called under mdsc->mutex. | |
584 | */ | |
fcd00b68 ID |
585 | static struct ceph_mds_request * |
586 | lookup_get_request(struct ceph_mds_client *mdsc, u64 tid) | |
2f2dc053 SW |
587 | { |
588 | struct ceph_mds_request *req; | |
44ca18f2 | 589 | |
fcd00b68 ID |
590 | req = lookup_request(&mdsc->request_tree, tid); |
591 | if (req) | |
592 | ceph_mdsc_get_request(req); | |
44ca18f2 | 593 | |
fcd00b68 | 594 | return req; |
2f2dc053 SW |
595 | } |
596 | ||
597 | /* | |
598 | * Register an in-flight request, and assign a tid. Link to directory | |
599 | * are modifying (if any). | |
600 | * | |
601 | * Called under mdsc->mutex. | |
602 | */ | |
603 | static void __register_request(struct ceph_mds_client *mdsc, | |
604 | struct ceph_mds_request *req, | |
605 | struct inode *dir) | |
606 | { | |
607 | req->r_tid = ++mdsc->last_tid; | |
608 | if (req->r_num_caps) | |
37151668 YS |
609 | ceph_reserve_caps(mdsc, &req->r_caps_reservation, |
610 | req->r_num_caps); | |
2f2dc053 SW |
611 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
612 | ceph_mdsc_get_request(req); | |
fcd00b68 | 613 | insert_request(&mdsc->request_tree, req); |
2f2dc053 | 614 | |
cb4276cc SW |
615 | req->r_uid = current_fsuid(); |
616 | req->r_gid = current_fsgid(); | |
617 | ||
e8a7b8b1 YZ |
618 | if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) |
619 | mdsc->oldest_tid = req->r_tid; | |
620 | ||
2f2dc053 | 621 | if (dir) { |
3b663780 | 622 | ihold(dir); |
2f2dc053 | 623 | req->r_unsafe_dir = dir; |
2f2dc053 SW |
624 | } |
625 | } | |
626 | ||
627 | static void __unregister_request(struct ceph_mds_client *mdsc, | |
628 | struct ceph_mds_request *req) | |
629 | { | |
630 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | |
e8a7b8b1 | 631 | |
df963ea8 JL |
632 | /* Never leave an unregistered request on an unsafe list! */ |
633 | list_del_init(&req->r_unsafe_item); | |
634 | ||
e8a7b8b1 YZ |
635 | if (req->r_tid == mdsc->oldest_tid) { |
636 | struct rb_node *p = rb_next(&req->r_node); | |
637 | mdsc->oldest_tid = 0; | |
638 | while (p) { | |
639 | struct ceph_mds_request *next_req = | |
640 | rb_entry(p, struct ceph_mds_request, r_node); | |
641 | if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { | |
642 | mdsc->oldest_tid = next_req->r_tid; | |
643 | break; | |
644 | } | |
645 | p = rb_next(p); | |
646 | } | |
647 | } | |
648 | ||
fcd00b68 | 649 | erase_request(&mdsc->request_tree, req); |
2f2dc053 | 650 | |
bc2de10d JL |
651 | if (req->r_unsafe_dir && |
652 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
2f2dc053 | 653 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); |
2f2dc053 SW |
654 | spin_lock(&ci->i_unsafe_lock); |
655 | list_del_init(&req->r_unsafe_dir_item); | |
656 | spin_unlock(&ci->i_unsafe_lock); | |
4c06ace8 | 657 | } |
bc2de10d JL |
658 | if (req->r_target_inode && |
659 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
68cd5b4b YZ |
660 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); |
661 | spin_lock(&ci->i_unsafe_lock); | |
662 | list_del_init(&req->r_unsafe_target_item); | |
663 | spin_unlock(&ci->i_unsafe_lock); | |
664 | } | |
3b663780 | 665 | |
4c06ace8 | 666 | if (req->r_unsafe_dir) { |
3b663780 SW |
667 | iput(req->r_unsafe_dir); |
668 | req->r_unsafe_dir = NULL; | |
2f2dc053 | 669 | } |
94aa8ae1 | 670 | |
fc55d2c9 YZ |
671 | complete_all(&req->r_safe_completion); |
672 | ||
94aa8ae1 | 673 | ceph_mdsc_put_request(req); |
2f2dc053 SW |
674 | } |
675 | ||
30c71233 JL |
676 | /* |
677 | * Walk back up the dentry tree until we hit a dentry representing a | |
678 | * non-snapshot inode. We do this using the rcu_read_lock (which must be held | |
679 | * when calling this) to ensure that the objects won't disappear while we're | |
680 | * working with them. Once we hit a candidate dentry, we attempt to take a | |
681 | * reference to it, and return that as the result. | |
682 | */ | |
f1075480 DC |
683 | static struct inode *get_nonsnap_parent(struct dentry *dentry) |
684 | { | |
685 | struct inode *inode = NULL; | |
30c71233 JL |
686 | |
687 | while (dentry && !IS_ROOT(dentry)) { | |
688 | inode = d_inode_rcu(dentry); | |
689 | if (!inode || ceph_snap(inode) == CEPH_NOSNAP) | |
690 | break; | |
691 | dentry = dentry->d_parent; | |
692 | } | |
693 | if (inode) | |
694 | inode = igrab(inode); | |
695 | return inode; | |
696 | } | |
697 | ||
2f2dc053 SW |
698 | /* |
699 | * Choose mds to send request to next. If there is a hint set in the | |
700 | * request (e.g., due to a prior forward hint from the mds), use that. | |
701 | * Otherwise, consult frag tree and/or caps to identify the | |
702 | * appropriate mds. If all else fails, choose randomly. | |
703 | * | |
704 | * Called under mdsc->mutex. | |
705 | */ | |
706 | static int __choose_mds(struct ceph_mds_client *mdsc, | |
707 | struct ceph_mds_request *req) | |
708 | { | |
709 | struct inode *inode; | |
710 | struct ceph_inode_info *ci; | |
711 | struct ceph_cap *cap; | |
712 | int mode = req->r_direct_mode; | |
713 | int mds = -1; | |
714 | u32 hash = req->r_direct_hash; | |
bc2de10d | 715 | bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); |
2f2dc053 SW |
716 | |
717 | /* | |
718 | * is there a specific mds we should try? ignore hint if we have | |
719 | * no session and the mds is not up (active or recovering). | |
720 | */ | |
721 | if (req->r_resend_mds >= 0 && | |
722 | (__have_session(mdsc, req->r_resend_mds) || | |
723 | ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { | |
724 | dout("choose_mds using resend_mds mds%d\n", | |
725 | req->r_resend_mds); | |
726 | return req->r_resend_mds; | |
727 | } | |
728 | ||
729 | if (mode == USE_RANDOM_MDS) | |
730 | goto random; | |
731 | ||
732 | inode = NULL; | |
733 | if (req->r_inode) { | |
734 | inode = req->r_inode; | |
30c71233 | 735 | ihold(inode); |
2f2dc053 | 736 | } else if (req->r_dentry) { |
d79698da | 737 | /* ignore race with rename; old or new d_parent is okay */ |
30c71233 JL |
738 | struct dentry *parent; |
739 | struct inode *dir; | |
740 | ||
741 | rcu_read_lock(); | |
742 | parent = req->r_dentry->d_parent; | |
3dd69aab | 743 | dir = req->r_parent ? : d_inode_rcu(parent); |
eb6bb1c5 | 744 | |
30c71233 JL |
745 | if (!dir || dir->i_sb != mdsc->fsc->sb) { |
746 | /* not this fs or parent went negative */ | |
2b0143b5 | 747 | inode = d_inode(req->r_dentry); |
30c71233 JL |
748 | if (inode) |
749 | ihold(inode); | |
eb6bb1c5 SW |
750 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { |
751 | /* direct snapped/virtual snapdir requests | |
752 | * based on parent dir inode */ | |
30c71233 | 753 | inode = get_nonsnap_parent(parent); |
eb6bb1c5 | 754 | dout("__choose_mds using nonsnap parent %p\n", inode); |
ca18bede | 755 | } else { |
eb6bb1c5 | 756 | /* dentry target */ |
2b0143b5 | 757 | inode = d_inode(req->r_dentry); |
ca18bede YZ |
758 | if (!inode || mode == USE_AUTH_MDS) { |
759 | /* dir + name */ | |
30c71233 | 760 | inode = igrab(dir); |
ca18bede YZ |
761 | hash = ceph_dentry_hash(dir, req->r_dentry); |
762 | is_hash = true; | |
30c71233 JL |
763 | } else { |
764 | ihold(inode); | |
ca18bede | 765 | } |
2f2dc053 | 766 | } |
30c71233 | 767 | rcu_read_unlock(); |
2f2dc053 | 768 | } |
eb6bb1c5 | 769 | |
2f2dc053 SW |
770 | dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, |
771 | (int)hash, mode); | |
772 | if (!inode) | |
773 | goto random; | |
774 | ci = ceph_inode(inode); | |
775 | ||
776 | if (is_hash && S_ISDIR(inode->i_mode)) { | |
777 | struct ceph_inode_frag frag; | |
778 | int found; | |
779 | ||
780 | ceph_choose_frag(ci, hash, &frag, &found); | |
781 | if (found) { | |
782 | if (mode == USE_ANY_MDS && frag.ndist > 0) { | |
783 | u8 r; | |
784 | ||
785 | /* choose a random replica */ | |
786 | get_random_bytes(&r, 1); | |
787 | r %= frag.ndist; | |
788 | mds = frag.dist[r]; | |
789 | dout("choose_mds %p %llx.%llx " | |
790 | "frag %u mds%d (%d/%d)\n", | |
791 | inode, ceph_vinop(inode), | |
d66bbd44 | 792 | frag.frag, mds, |
2f2dc053 | 793 | (int)r, frag.ndist); |
d66bbd44 SW |
794 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
795 | CEPH_MDS_STATE_ACTIVE) | |
30c71233 | 796 | goto out; |
2f2dc053 SW |
797 | } |
798 | ||
799 | /* since this file/dir wasn't known to be | |
800 | * replicated, then we want to look for the | |
801 | * authoritative mds. */ | |
802 | mode = USE_AUTH_MDS; | |
803 | if (frag.mds >= 0) { | |
804 | /* choose auth mds */ | |
805 | mds = frag.mds; | |
806 | dout("choose_mds %p %llx.%llx " | |
807 | "frag %u mds%d (auth)\n", | |
808 | inode, ceph_vinop(inode), frag.frag, mds); | |
d66bbd44 SW |
809 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
810 | CEPH_MDS_STATE_ACTIVE) | |
30c71233 | 811 | goto out; |
2f2dc053 SW |
812 | } |
813 | } | |
814 | } | |
815 | ||
be655596 | 816 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
817 | cap = NULL; |
818 | if (mode == USE_AUTH_MDS) | |
819 | cap = ci->i_auth_cap; | |
820 | if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) | |
821 | cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); | |
822 | if (!cap) { | |
be655596 | 823 | spin_unlock(&ci->i_ceph_lock); |
30c71233 | 824 | iput(inode); |
2f2dc053 SW |
825 | goto random; |
826 | } | |
827 | mds = cap->session->s_mds; | |
828 | dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", | |
829 | inode, ceph_vinop(inode), mds, | |
830 | cap == ci->i_auth_cap ? "auth " : "", cap); | |
be655596 | 831 | spin_unlock(&ci->i_ceph_lock); |
30c71233 JL |
832 | out: |
833 | iput(inode); | |
2f2dc053 SW |
834 | return mds; |
835 | ||
836 | random: | |
837 | mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); | |
838 | dout("choose_mds chose random mds%d\n", mds); | |
839 | return mds; | |
840 | } | |
841 | ||
842 | ||
843 | /* | |
844 | * session messages | |
845 | */ | |
846 | static struct ceph_msg *create_session_msg(u32 op, u64 seq) | |
847 | { | |
848 | struct ceph_msg *msg; | |
849 | struct ceph_mds_session_head *h; | |
850 | ||
b61c2763 SW |
851 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, |
852 | false); | |
a79832f2 | 853 | if (!msg) { |
2f2dc053 | 854 | pr_err("create_session_msg ENOMEM creating msg\n"); |
a79832f2 | 855 | return NULL; |
2f2dc053 SW |
856 | } |
857 | h = msg->front.iov_base; | |
858 | h->op = cpu_to_le32(op); | |
859 | h->seq = cpu_to_le64(seq); | |
dbd0c8bf JS |
860 | |
861 | return msg; | |
862 | } | |
863 | ||
864 | /* | |
865 | * session message, specialization for CEPH_SESSION_REQUEST_OPEN | |
866 | * to include additional client metadata fields. | |
867 | */ | |
868 | static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) | |
869 | { | |
870 | struct ceph_msg *msg; | |
871 | struct ceph_mds_session_head *h; | |
872 | int i = -1; | |
873 | int metadata_bytes = 0; | |
874 | int metadata_key_count = 0; | |
875 | struct ceph_options *opt = mdsc->fsc->client->options; | |
3f384954 | 876 | struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; |
dbd0c8bf JS |
877 | void *p; |
878 | ||
a6a5ce4f | 879 | const char* metadata[][2] = { |
dbd0c8bf | 880 | {"hostname", utsname()->nodename}, |
a6a5ce4f | 881 | {"kernel_version", utsname()->release}, |
3f384954 YZ |
882 | {"entity_id", opt->name ? : ""}, |
883 | {"root", fsopt->server_path ? : "/"}, | |
dbd0c8bf JS |
884 | {NULL, NULL} |
885 | }; | |
886 | ||
887 | /* Calculate serialized length of metadata */ | |
888 | metadata_bytes = 4; /* map length */ | |
889 | for (i = 0; metadata[i][0] != NULL; ++i) { | |
890 | metadata_bytes += 8 + strlen(metadata[i][0]) + | |
891 | strlen(metadata[i][1]); | |
892 | metadata_key_count++; | |
893 | } | |
894 | ||
895 | /* Allocate the message */ | |
896 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, | |
897 | GFP_NOFS, false); | |
898 | if (!msg) { | |
899 | pr_err("create_session_msg ENOMEM creating msg\n"); | |
900 | return NULL; | |
901 | } | |
902 | h = msg->front.iov_base; | |
903 | h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); | |
904 | h->seq = cpu_to_le64(seq); | |
905 | ||
906 | /* | |
907 | * Serialize client metadata into waiting buffer space, using | |
908 | * the format that userspace expects for map<string, string> | |
7cfa0313 JS |
909 | * |
910 | * ClientSession messages with metadata are v2 | |
dbd0c8bf | 911 | */ |
7cfa0313 JS |
912 | msg->hdr.version = cpu_to_le16(2); |
913 | msg->hdr.compat_version = cpu_to_le16(1); | |
dbd0c8bf JS |
914 | |
915 | /* The write pointer, following the session_head structure */ | |
916 | p = msg->front.iov_base + sizeof(*h); | |
917 | ||
918 | /* Number of entries in the map */ | |
919 | ceph_encode_32(&p, metadata_key_count); | |
920 | ||
921 | /* Two length-prefixed strings for each entry in the map */ | |
922 | for (i = 0; metadata[i][0] != NULL; ++i) { | |
923 | size_t const key_len = strlen(metadata[i][0]); | |
924 | size_t const val_len = strlen(metadata[i][1]); | |
925 | ||
926 | ceph_encode_32(&p, key_len); | |
927 | memcpy(p, metadata[i][0], key_len); | |
928 | p += key_len; | |
929 | ceph_encode_32(&p, val_len); | |
930 | memcpy(p, metadata[i][1], val_len); | |
931 | p += val_len; | |
932 | } | |
933 | ||
2f2dc053 SW |
934 | return msg; |
935 | } | |
936 | ||
937 | /* | |
938 | * send session open request. | |
939 | * | |
940 | * called under mdsc->mutex | |
941 | */ | |
942 | static int __open_session(struct ceph_mds_client *mdsc, | |
943 | struct ceph_mds_session *session) | |
944 | { | |
945 | struct ceph_msg *msg; | |
946 | int mstate; | |
947 | int mds = session->s_mds; | |
2f2dc053 SW |
948 | |
949 | /* wait for mds to go active? */ | |
950 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); | |
951 | dout("open_session to mds%d (%s)\n", mds, | |
952 | ceph_mds_state_name(mstate)); | |
953 | session->s_state = CEPH_MDS_SESSION_OPENING; | |
954 | session->s_renew_requested = jiffies; | |
955 | ||
956 | /* send connect message */ | |
dbd0c8bf | 957 | msg = create_session_open_msg(mdsc, session->s_seq); |
a79832f2 SW |
958 | if (!msg) |
959 | return -ENOMEM; | |
2f2dc053 | 960 | ceph_con_send(&session->s_con, msg); |
2f2dc053 SW |
961 | return 0; |
962 | } | |
963 | ||
ed0552a1 SW |
964 | /* |
965 | * open sessions for any export targets for the given mds | |
966 | * | |
967 | * called under mdsc->mutex | |
968 | */ | |
5d72d13c YZ |
969 | static struct ceph_mds_session * |
970 | __open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
971 | { | |
972 | struct ceph_mds_session *session; | |
973 | ||
974 | session = __ceph_lookup_mds_session(mdsc, target); | |
975 | if (!session) { | |
976 | session = register_session(mdsc, target); | |
977 | if (IS_ERR(session)) | |
978 | return session; | |
979 | } | |
980 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
981 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
982 | __open_session(mdsc, session); | |
983 | ||
984 | return session; | |
985 | } | |
986 | ||
987 | struct ceph_mds_session * | |
988 | ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
989 | { | |
990 | struct ceph_mds_session *session; | |
991 | ||
992 | dout("open_export_target_session to mds%d\n", target); | |
993 | ||
994 | mutex_lock(&mdsc->mutex); | |
995 | session = __open_export_target_session(mdsc, target); | |
996 | mutex_unlock(&mdsc->mutex); | |
997 | ||
998 | return session; | |
999 | } | |
1000 | ||
ed0552a1 SW |
1001 | static void __open_export_target_sessions(struct ceph_mds_client *mdsc, |
1002 | struct ceph_mds_session *session) | |
1003 | { | |
1004 | struct ceph_mds_info *mi; | |
1005 | struct ceph_mds_session *ts; | |
1006 | int i, mds = session->s_mds; | |
ed0552a1 | 1007 | |
76201b63 | 1008 | if (mds >= mdsc->mdsmap->m_num_mds) |
ed0552a1 | 1009 | return; |
5d72d13c | 1010 | |
ed0552a1 SW |
1011 | mi = &mdsc->mdsmap->m_info[mds]; |
1012 | dout("open_export_target_sessions for mds%d (%d targets)\n", | |
1013 | session->s_mds, mi->num_export_targets); | |
1014 | ||
1015 | for (i = 0; i < mi->num_export_targets; i++) { | |
5d72d13c YZ |
1016 | ts = __open_export_target_session(mdsc, mi->export_targets[i]); |
1017 | if (!IS_ERR(ts)) | |
1018 | ceph_put_mds_session(ts); | |
ed0552a1 SW |
1019 | } |
1020 | } | |
1021 | ||
154f42c2 SW |
1022 | void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, |
1023 | struct ceph_mds_session *session) | |
1024 | { | |
1025 | mutex_lock(&mdsc->mutex); | |
1026 | __open_export_target_sessions(mdsc, session); | |
1027 | mutex_unlock(&mdsc->mutex); | |
1028 | } | |
1029 | ||
2f2dc053 SW |
1030 | /* |
1031 | * session caps | |
1032 | */ | |
1033 | ||
745a8e3b YZ |
1034 | /* caller holds s_cap_lock, we drop it */ |
1035 | static void cleanup_cap_releases(struct ceph_mds_client *mdsc, | |
1036 | struct ceph_mds_session *session) | |
1037 | __releases(session->s_cap_lock) | |
2f2dc053 | 1038 | { |
745a8e3b YZ |
1039 | LIST_HEAD(tmp_list); |
1040 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1041 | session->s_num_cap_releases = 0; | |
1042 | spin_unlock(&session->s_cap_lock); | |
2f2dc053 | 1043 | |
745a8e3b YZ |
1044 | dout("cleanup_cap_releases mds%d\n", session->s_mds); |
1045 | while (!list_empty(&tmp_list)) { | |
1046 | struct ceph_cap *cap; | |
1047 | /* zero out the in-progress message */ | |
1048 | cap = list_first_entry(&tmp_list, | |
1049 | struct ceph_cap, session_caps); | |
1050 | list_del(&cap->session_caps); | |
1051 | ceph_put_cap(mdsc, cap); | |
2f2dc053 | 1052 | } |
2f2dc053 SW |
1053 | } |
1054 | ||
1c841a96 YZ |
1055 | static void cleanup_session_requests(struct ceph_mds_client *mdsc, |
1056 | struct ceph_mds_session *session) | |
1057 | { | |
1058 | struct ceph_mds_request *req; | |
1059 | struct rb_node *p; | |
1060 | ||
1061 | dout("cleanup_session_requests mds%d\n", session->s_mds); | |
1062 | mutex_lock(&mdsc->mutex); | |
1063 | while (!list_empty(&session->s_unsafe)) { | |
1064 | req = list_first_entry(&session->s_unsafe, | |
1065 | struct ceph_mds_request, r_unsafe_item); | |
3e0708b9 YZ |
1066 | pr_warn_ratelimited(" dropping unsafe request %llu\n", |
1067 | req->r_tid); | |
1c841a96 YZ |
1068 | __unregister_request(mdsc, req); |
1069 | } | |
1070 | /* zero r_attempts, so kick_requests() will re-send requests */ | |
1071 | p = rb_first(&mdsc->request_tree); | |
1072 | while (p) { | |
1073 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
1074 | p = rb_next(p); | |
1075 | if (req->r_session && | |
1076 | req->r_session->s_mds == session->s_mds) | |
1077 | req->r_attempts = 0; | |
1078 | } | |
1079 | mutex_unlock(&mdsc->mutex); | |
1080 | } | |
1081 | ||
2f2dc053 | 1082 | /* |
f818a736 SW |
1083 | * Helper to safely iterate over all caps associated with a session, with |
1084 | * special care taken to handle a racing __ceph_remove_cap(). | |
2f2dc053 | 1085 | * |
f818a736 | 1086 | * Caller must hold session s_mutex. |
2f2dc053 SW |
1087 | */ |
1088 | static int iterate_session_caps(struct ceph_mds_session *session, | |
1089 | int (*cb)(struct inode *, struct ceph_cap *, | |
1090 | void *), void *arg) | |
1091 | { | |
7c1332b8 SW |
1092 | struct list_head *p; |
1093 | struct ceph_cap *cap; | |
1094 | struct inode *inode, *last_inode = NULL; | |
1095 | struct ceph_cap *old_cap = NULL; | |
2f2dc053 SW |
1096 | int ret; |
1097 | ||
1098 | dout("iterate_session_caps %p mds%d\n", session, session->s_mds); | |
1099 | spin_lock(&session->s_cap_lock); | |
7c1332b8 SW |
1100 | p = session->s_caps.next; |
1101 | while (p != &session->s_caps) { | |
1102 | cap = list_entry(p, struct ceph_cap, session_caps); | |
2f2dc053 | 1103 | inode = igrab(&cap->ci->vfs_inode); |
7c1332b8 SW |
1104 | if (!inode) { |
1105 | p = p->next; | |
2f2dc053 | 1106 | continue; |
7c1332b8 SW |
1107 | } |
1108 | session->s_cap_iterator = cap; | |
2f2dc053 | 1109 | spin_unlock(&session->s_cap_lock); |
7c1332b8 SW |
1110 | |
1111 | if (last_inode) { | |
1112 | iput(last_inode); | |
1113 | last_inode = NULL; | |
1114 | } | |
1115 | if (old_cap) { | |
37151668 | 1116 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 SW |
1117 | old_cap = NULL; |
1118 | } | |
1119 | ||
2f2dc053 | 1120 | ret = cb(inode, cap, arg); |
7c1332b8 SW |
1121 | last_inode = inode; |
1122 | ||
2f2dc053 | 1123 | spin_lock(&session->s_cap_lock); |
7c1332b8 SW |
1124 | p = p->next; |
1125 | if (cap->ci == NULL) { | |
1126 | dout("iterate_session_caps finishing cap %p removal\n", | |
1127 | cap); | |
1128 | BUG_ON(cap->session != session); | |
745a8e3b | 1129 | cap->session = NULL; |
7c1332b8 SW |
1130 | list_del_init(&cap->session_caps); |
1131 | session->s_nr_caps--; | |
745a8e3b YZ |
1132 | if (cap->queue_release) { |
1133 | list_add_tail(&cap->session_caps, | |
1134 | &session->s_cap_releases); | |
1135 | session->s_num_cap_releases++; | |
1136 | } else { | |
1137 | old_cap = cap; /* put_cap it w/o locks held */ | |
1138 | } | |
7c1332b8 | 1139 | } |
5dacf091 SW |
1140 | if (ret < 0) |
1141 | goto out; | |
2f2dc053 | 1142 | } |
5dacf091 SW |
1143 | ret = 0; |
1144 | out: | |
7c1332b8 | 1145 | session->s_cap_iterator = NULL; |
2f2dc053 | 1146 | spin_unlock(&session->s_cap_lock); |
7c1332b8 | 1147 | |
e96a650a | 1148 | iput(last_inode); |
7c1332b8 | 1149 | if (old_cap) |
37151668 | 1150 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 | 1151 | |
5dacf091 | 1152 | return ret; |
2f2dc053 SW |
1153 | } |
1154 | ||
1155 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |
6c99f254 | 1156 | void *arg) |
2f2dc053 | 1157 | { |
6c93df5d | 1158 | struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg; |
2f2dc053 | 1159 | struct ceph_inode_info *ci = ceph_inode(inode); |
553adfd9 | 1160 | LIST_HEAD(to_remove); |
6c93df5d YZ |
1161 | bool drop = false; |
1162 | bool invalidate = false; | |
6c99f254 | 1163 | |
2f2dc053 SW |
1164 | dout("removing cap %p, ci is %p, inode is %p\n", |
1165 | cap, ci, &ci->vfs_inode); | |
be655596 | 1166 | spin_lock(&ci->i_ceph_lock); |
a096b09a | 1167 | __ceph_remove_cap(cap, false); |
571ade33 | 1168 | if (!ci->i_auth_cap) { |
553adfd9 | 1169 | struct ceph_cap_flush *cf; |
6c93df5d | 1170 | struct ceph_mds_client *mdsc = fsc->mdsc; |
6c99f254 | 1171 | |
77310320 YZ |
1172 | ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; |
1173 | ||
6c93df5d | 1174 | if (ci->i_wrbuffer_ref > 0 && |
52953d55 | 1175 | READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
6c93df5d YZ |
1176 | invalidate = true; |
1177 | ||
e4500b5e YZ |
1178 | while (!list_empty(&ci->i_cap_flush_list)) { |
1179 | cf = list_first_entry(&ci->i_cap_flush_list, | |
1180 | struct ceph_cap_flush, i_list); | |
8cdcc07d | 1181 | list_move(&cf->i_list, &to_remove); |
553adfd9 YZ |
1182 | } |
1183 | ||
6c99f254 | 1184 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 | 1185 | |
e4500b5e YZ |
1186 | list_for_each_entry(cf, &to_remove, i_list) |
1187 | list_del(&cf->g_list); | |
8310b089 | 1188 | |
6c99f254 | 1189 | if (!list_empty(&ci->i_dirty_item)) { |
3e0708b9 YZ |
1190 | pr_warn_ratelimited( |
1191 | " dropping dirty %s state for %p %lld\n", | |
6c99f254 SW |
1192 | ceph_cap_string(ci->i_dirty_caps), |
1193 | inode, ceph_ino(inode)); | |
1194 | ci->i_dirty_caps = 0; | |
1195 | list_del_init(&ci->i_dirty_item); | |
6c93df5d | 1196 | drop = true; |
6c99f254 SW |
1197 | } |
1198 | if (!list_empty(&ci->i_flushing_item)) { | |
3e0708b9 YZ |
1199 | pr_warn_ratelimited( |
1200 | " dropping dirty+flushing %s state for %p %lld\n", | |
6c99f254 SW |
1201 | ceph_cap_string(ci->i_flushing_caps), |
1202 | inode, ceph_ino(inode)); | |
1203 | ci->i_flushing_caps = 0; | |
1204 | list_del_init(&ci->i_flushing_item); | |
1205 | mdsc->num_cap_flushing--; | |
6c93df5d | 1206 | drop = true; |
6c99f254 | 1207 | } |
6c99f254 | 1208 | spin_unlock(&mdsc->cap_dirty_lock); |
553adfd9 | 1209 | |
f66fd9f0 | 1210 | if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { |
e4500b5e | 1211 | list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); |
f66fd9f0 YZ |
1212 | ci->i_prealloc_cap_flush = NULL; |
1213 | } | |
6c99f254 | 1214 | } |
be655596 | 1215 | spin_unlock(&ci->i_ceph_lock); |
553adfd9 YZ |
1216 | while (!list_empty(&to_remove)) { |
1217 | struct ceph_cap_flush *cf; | |
1218 | cf = list_first_entry(&to_remove, | |
e4500b5e YZ |
1219 | struct ceph_cap_flush, i_list); |
1220 | list_del(&cf->i_list); | |
f66fd9f0 | 1221 | ceph_free_cap_flush(cf); |
553adfd9 | 1222 | } |
77310320 YZ |
1223 | |
1224 | wake_up_all(&ci->i_cap_wq); | |
6c93df5d YZ |
1225 | if (invalidate) |
1226 | ceph_queue_invalidate(inode); | |
77310320 | 1227 | if (drop) |
6c99f254 | 1228 | iput(inode); |
2f2dc053 SW |
1229 | return 0; |
1230 | } | |
1231 | ||
1232 | /* | |
1233 | * caller must hold session s_mutex | |
1234 | */ | |
1235 | static void remove_session_caps(struct ceph_mds_session *session) | |
1236 | { | |
6c93df5d YZ |
1237 | struct ceph_fs_client *fsc = session->s_mdsc->fsc; |
1238 | struct super_block *sb = fsc->sb; | |
2f2dc053 | 1239 | dout("remove_session_caps on %p\n", session); |
6c93df5d | 1240 | iterate_session_caps(session, remove_session_caps_cb, fsc); |
6f60f889 | 1241 | |
c8799fc4 YZ |
1242 | wake_up_all(&fsc->mdsc->cap_flushing_wq); |
1243 | ||
6f60f889 YZ |
1244 | spin_lock(&session->s_cap_lock); |
1245 | if (session->s_nr_caps > 0) { | |
6f60f889 YZ |
1246 | struct inode *inode; |
1247 | struct ceph_cap *cap, *prev = NULL; | |
1248 | struct ceph_vino vino; | |
1249 | /* | |
1250 | * iterate_session_caps() skips inodes that are being | |
1251 | * deleted, we need to wait until deletions are complete. | |
1252 | * __wait_on_freeing_inode() is designed for the job, | |
1253 | * but it is not exported, so use lookup inode function | |
1254 | * to access it. | |
1255 | */ | |
1256 | while (!list_empty(&session->s_caps)) { | |
1257 | cap = list_entry(session->s_caps.next, | |
1258 | struct ceph_cap, session_caps); | |
1259 | if (cap == prev) | |
1260 | break; | |
1261 | prev = cap; | |
1262 | vino = cap->ci->i_vino; | |
1263 | spin_unlock(&session->s_cap_lock); | |
1264 | ||
ed284c49 | 1265 | inode = ceph_find_inode(sb, vino); |
6f60f889 YZ |
1266 | iput(inode); |
1267 | ||
1268 | spin_lock(&session->s_cap_lock); | |
1269 | } | |
1270 | } | |
745a8e3b YZ |
1271 | |
1272 | // drop cap expires and unlock s_cap_lock | |
1273 | cleanup_cap_releases(session->s_mdsc, session); | |
6f60f889 | 1274 | |
2f2dc053 | 1275 | BUG_ON(session->s_nr_caps > 0); |
6c99f254 | 1276 | BUG_ON(!list_empty(&session->s_cap_flushing)); |
2f2dc053 SW |
1277 | } |
1278 | ||
1279 | /* | |
1280 | * wake up any threads waiting on this session's caps. if the cap is | |
1281 | * old (didn't get renewed on the client reconnect), remove it now. | |
1282 | * | |
1283 | * caller must hold s_mutex. | |
1284 | */ | |
1285 | static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, | |
1286 | void *arg) | |
1287 | { | |
0dc2570f SW |
1288 | struct ceph_inode_info *ci = ceph_inode(inode); |
1289 | ||
0dc2570f | 1290 | if (arg) { |
be655596 | 1291 | spin_lock(&ci->i_ceph_lock); |
0dc2570f SW |
1292 | ci->i_wanted_max_size = 0; |
1293 | ci->i_requested_max_size = 0; | |
be655596 | 1294 | spin_unlock(&ci->i_ceph_lock); |
0dc2570f | 1295 | } |
e5360309 | 1296 | wake_up_all(&ci->i_cap_wq); |
2f2dc053 SW |
1297 | return 0; |
1298 | } | |
1299 | ||
0dc2570f SW |
1300 | static void wake_up_session_caps(struct ceph_mds_session *session, |
1301 | int reconnect) | |
2f2dc053 SW |
1302 | { |
1303 | dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); | |
0dc2570f SW |
1304 | iterate_session_caps(session, wake_up_session_cb, |
1305 | (void *)(unsigned long)reconnect); | |
2f2dc053 SW |
1306 | } |
1307 | ||
1308 | /* | |
1309 | * Send periodic message to MDS renewing all currently held caps. The | |
1310 | * ack will reset the expiration for all caps from this session. | |
1311 | * | |
1312 | * caller holds s_mutex | |
1313 | */ | |
1314 | static int send_renew_caps(struct ceph_mds_client *mdsc, | |
1315 | struct ceph_mds_session *session) | |
1316 | { | |
1317 | struct ceph_msg *msg; | |
1318 | int state; | |
1319 | ||
1320 | if (time_after_eq(jiffies, session->s_cap_ttl) && | |
1321 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) | |
1322 | pr_info("mds%d caps stale\n", session->s_mds); | |
e4cb4cb8 | 1323 | session->s_renew_requested = jiffies; |
2f2dc053 SW |
1324 | |
1325 | /* do not try to renew caps until a recovering mds has reconnected | |
1326 | * with its clients. */ | |
1327 | state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); | |
1328 | if (state < CEPH_MDS_STATE_RECONNECT) { | |
1329 | dout("send_renew_caps ignoring mds%d (%s)\n", | |
1330 | session->s_mds, ceph_mds_state_name(state)); | |
1331 | return 0; | |
1332 | } | |
1333 | ||
1334 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, | |
1335 | ceph_mds_state_name(state)); | |
2f2dc053 SW |
1336 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
1337 | ++session->s_renew_seq); | |
a79832f2 SW |
1338 | if (!msg) |
1339 | return -ENOMEM; | |
2f2dc053 SW |
1340 | ceph_con_send(&session->s_con, msg); |
1341 | return 0; | |
1342 | } | |
1343 | ||
186e4f7a YZ |
1344 | static int send_flushmsg_ack(struct ceph_mds_client *mdsc, |
1345 | struct ceph_mds_session *session, u64 seq) | |
1346 | { | |
1347 | struct ceph_msg *msg; | |
1348 | ||
1349 | dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", | |
a687ecaf | 1350 | session->s_mds, ceph_session_state_name(session->s_state), seq); |
186e4f7a YZ |
1351 | msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); |
1352 | if (!msg) | |
1353 | return -ENOMEM; | |
1354 | ceph_con_send(&session->s_con, msg); | |
1355 | return 0; | |
1356 | } | |
1357 | ||
1358 | ||
2f2dc053 SW |
1359 | /* |
1360 | * Note new cap ttl, and any transition from stale -> not stale (fresh?). | |
0dc2570f SW |
1361 | * |
1362 | * Called under session->s_mutex | |
2f2dc053 SW |
1363 | */ |
1364 | static void renewed_caps(struct ceph_mds_client *mdsc, | |
1365 | struct ceph_mds_session *session, int is_renew) | |
1366 | { | |
1367 | int was_stale; | |
1368 | int wake = 0; | |
1369 | ||
1370 | spin_lock(&session->s_cap_lock); | |
1ce208a6 | 1371 | was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); |
2f2dc053 SW |
1372 | |
1373 | session->s_cap_ttl = session->s_renew_requested + | |
1374 | mdsc->mdsmap->m_session_timeout*HZ; | |
1375 | ||
1376 | if (was_stale) { | |
1377 | if (time_before(jiffies, session->s_cap_ttl)) { | |
1378 | pr_info("mds%d caps renewed\n", session->s_mds); | |
1379 | wake = 1; | |
1380 | } else { | |
1381 | pr_info("mds%d caps still stale\n", session->s_mds); | |
1382 | } | |
1383 | } | |
1384 | dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", | |
1385 | session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", | |
1386 | time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); | |
1387 | spin_unlock(&session->s_cap_lock); | |
1388 | ||
1389 | if (wake) | |
0dc2570f | 1390 | wake_up_session_caps(session, 0); |
2f2dc053 SW |
1391 | } |
1392 | ||
1393 | /* | |
1394 | * send a session close request | |
1395 | */ | |
1396 | static int request_close_session(struct ceph_mds_client *mdsc, | |
1397 | struct ceph_mds_session *session) | |
1398 | { | |
1399 | struct ceph_msg *msg; | |
2f2dc053 SW |
1400 | |
1401 | dout("request_close_session mds%d state %s seq %lld\n", | |
a687ecaf | 1402 | session->s_mds, ceph_session_state_name(session->s_state), |
2f2dc053 SW |
1403 | session->s_seq); |
1404 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); | |
a79832f2 SW |
1405 | if (!msg) |
1406 | return -ENOMEM; | |
1407 | ceph_con_send(&session->s_con, msg); | |
fcff415c | 1408 | return 1; |
2f2dc053 SW |
1409 | } |
1410 | ||
1411 | /* | |
1412 | * Called with s_mutex held. | |
1413 | */ | |
1414 | static int __close_session(struct ceph_mds_client *mdsc, | |
1415 | struct ceph_mds_session *session) | |
1416 | { | |
1417 | if (session->s_state >= CEPH_MDS_SESSION_CLOSING) | |
1418 | return 0; | |
1419 | session->s_state = CEPH_MDS_SESSION_CLOSING; | |
1420 | return request_close_session(mdsc, session); | |
1421 | } | |
1422 | ||
1423 | /* | |
1424 | * Trim old(er) caps. | |
1425 | * | |
1426 | * Because we can't cache an inode without one or more caps, we do | |
1427 | * this indirectly: if a cap is unused, we prune its aliases, at which | |
1428 | * point the inode will hopefully get dropped to. | |
1429 | * | |
1430 | * Yes, this is a bit sloppy. Our only real goal here is to respond to | |
1431 | * memory pressure from the MDS, though, so it needn't be perfect. | |
1432 | */ | |
1433 | static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) | |
1434 | { | |
1435 | struct ceph_mds_session *session = arg; | |
1436 | struct ceph_inode_info *ci = ceph_inode(inode); | |
979abfdd | 1437 | int used, wanted, oissued, mine; |
2f2dc053 SW |
1438 | |
1439 | if (session->s_trim_caps <= 0) | |
1440 | return -1; | |
1441 | ||
be655596 | 1442 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
1443 | mine = cap->issued | cap->implemented; |
1444 | used = __ceph_caps_used(ci); | |
979abfdd | 1445 | wanted = __ceph_caps_file_wanted(ci); |
2f2dc053 SW |
1446 | oissued = __ceph_caps_issued_other(ci, cap); |
1447 | ||
979abfdd | 1448 | dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", |
2f2dc053 | 1449 | inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), |
979abfdd YZ |
1450 | ceph_cap_string(used), ceph_cap_string(wanted)); |
1451 | if (cap == ci->i_auth_cap) { | |
622f3e25 YZ |
1452 | if (ci->i_dirty_caps || ci->i_flushing_caps || |
1453 | !list_empty(&ci->i_cap_snaps)) | |
979abfdd YZ |
1454 | goto out; |
1455 | if ((used | wanted) & CEPH_CAP_ANY_WR) | |
1456 | goto out; | |
1457 | } | |
5e804ac4 YZ |
1458 | /* The inode has cached pages, but it's no longer used. |
1459 | * we can safely drop it */ | |
1460 | if (wanted == 0 && used == CEPH_CAP_FILE_CACHE && | |
1461 | !(oissued & CEPH_CAP_FILE_CACHE)) { | |
1462 | used = 0; | |
1463 | oissued = 0; | |
1464 | } | |
979abfdd | 1465 | if ((used | wanted) & ~oissued & mine) |
2f2dc053 SW |
1466 | goto out; /* we need these caps */ |
1467 | ||
1468 | session->s_trim_caps--; | |
1469 | if (oissued) { | |
1470 | /* we aren't the only cap.. just remove us */ | |
a096b09a | 1471 | __ceph_remove_cap(cap, true); |
2f2dc053 | 1472 | } else { |
5e804ac4 | 1473 | /* try dropping referring dentries */ |
be655596 | 1474 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1475 | d_prune_aliases(inode); |
1476 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | |
1477 | inode, cap, atomic_read(&inode->i_count)); | |
1478 | return 0; | |
1479 | } | |
1480 | ||
1481 | out: | |
be655596 | 1482 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1483 | return 0; |
1484 | } | |
1485 | ||
1486 | /* | |
1487 | * Trim session cap count down to some max number. | |
1488 | */ | |
1489 | static int trim_caps(struct ceph_mds_client *mdsc, | |
1490 | struct ceph_mds_session *session, | |
1491 | int max_caps) | |
1492 | { | |
1493 | int trim_caps = session->s_nr_caps - max_caps; | |
1494 | ||
1495 | dout("trim_caps mds%d start: %d / %d, trim %d\n", | |
1496 | session->s_mds, session->s_nr_caps, max_caps, trim_caps); | |
1497 | if (trim_caps > 0) { | |
1498 | session->s_trim_caps = trim_caps; | |
1499 | iterate_session_caps(session, trim_caps_cb, session); | |
1500 | dout("trim_caps mds%d done: %d / %d, trimmed %d\n", | |
1501 | session->s_mds, session->s_nr_caps, max_caps, | |
1502 | trim_caps - session->s_trim_caps); | |
5dacf091 | 1503 | session->s_trim_caps = 0; |
2f2dc053 | 1504 | } |
a56371d9 | 1505 | |
a56371d9 | 1506 | ceph_send_cap_releases(mdsc, session); |
2f2dc053 SW |
1507 | return 0; |
1508 | } | |
1509 | ||
8310b089 YZ |
1510 | static int check_caps_flush(struct ceph_mds_client *mdsc, |
1511 | u64 want_flush_tid) | |
1512 | { | |
8310b089 YZ |
1513 | int ret = 1; |
1514 | ||
1515 | spin_lock(&mdsc->cap_dirty_lock); | |
e4500b5e YZ |
1516 | if (!list_empty(&mdsc->cap_flush_list)) { |
1517 | struct ceph_cap_flush *cf = | |
1518 | list_first_entry(&mdsc->cap_flush_list, | |
1519 | struct ceph_cap_flush, g_list); | |
1520 | if (cf->tid <= want_flush_tid) { | |
1521 | dout("check_caps_flush still flushing tid " | |
1522 | "%llu <= %llu\n", cf->tid, want_flush_tid); | |
1523 | ret = 0; | |
1524 | } | |
8310b089 YZ |
1525 | } |
1526 | spin_unlock(&mdsc->cap_dirty_lock); | |
1527 | return ret; | |
d3383a8e YZ |
1528 | } |
1529 | ||
2f2dc053 SW |
1530 | /* |
1531 | * flush all dirty inode data to disk. | |
1532 | * | |
8310b089 | 1533 | * returns true if we've flushed through want_flush_tid |
2f2dc053 | 1534 | */ |
affbc19a | 1535 | static void wait_caps_flush(struct ceph_mds_client *mdsc, |
0e294387 | 1536 | u64 want_flush_tid) |
2f2dc053 | 1537 | { |
0e294387 | 1538 | dout("check_caps_flush want %llu\n", want_flush_tid); |
8310b089 YZ |
1539 | |
1540 | wait_event(mdsc->cap_flushing_wq, | |
1541 | check_caps_flush(mdsc, want_flush_tid)); | |
1542 | ||
1543 | dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); | |
2f2dc053 SW |
1544 | } |
1545 | ||
1546 | /* | |
1547 | * called under s_mutex | |
1548 | */ | |
3d7ded4d SW |
1549 | void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
1550 | struct ceph_mds_session *session) | |
2f2dc053 | 1551 | { |
745a8e3b YZ |
1552 | struct ceph_msg *msg = NULL; |
1553 | struct ceph_mds_cap_release *head; | |
1554 | struct ceph_mds_cap_item *item; | |
92475f05 | 1555 | struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; |
745a8e3b YZ |
1556 | struct ceph_cap *cap; |
1557 | LIST_HEAD(tmp_list); | |
1558 | int num_cap_releases; | |
92475f05 JL |
1559 | __le32 barrier, *cap_barrier; |
1560 | ||
1561 | down_read(&osdc->lock); | |
1562 | barrier = cpu_to_le32(osdc->epoch_barrier); | |
1563 | up_read(&osdc->lock); | |
2f2dc053 | 1564 | |
0f8605f2 | 1565 | spin_lock(&session->s_cap_lock); |
745a8e3b YZ |
1566 | again: |
1567 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1568 | num_cap_releases = session->s_num_cap_releases; | |
1569 | session->s_num_cap_releases = 0; | |
2f2dc053 | 1570 | spin_unlock(&session->s_cap_lock); |
e01a5946 | 1571 | |
745a8e3b YZ |
1572 | while (!list_empty(&tmp_list)) { |
1573 | if (!msg) { | |
1574 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, | |
09cbfeaf | 1575 | PAGE_SIZE, GFP_NOFS, false); |
745a8e3b YZ |
1576 | if (!msg) |
1577 | goto out_err; | |
1578 | head = msg->front.iov_base; | |
1579 | head->num = cpu_to_le32(0); | |
1580 | msg->front.iov_len = sizeof(*head); | |
92475f05 JL |
1581 | |
1582 | msg->hdr.version = cpu_to_le16(2); | |
1583 | msg->hdr.compat_version = cpu_to_le16(1); | |
745a8e3b | 1584 | } |
92475f05 | 1585 | |
745a8e3b YZ |
1586 | cap = list_first_entry(&tmp_list, struct ceph_cap, |
1587 | session_caps); | |
1588 | list_del(&cap->session_caps); | |
1589 | num_cap_releases--; | |
e01a5946 | 1590 | |
00bd8edb | 1591 | head = msg->front.iov_base; |
745a8e3b YZ |
1592 | le32_add_cpu(&head->num, 1); |
1593 | item = msg->front.iov_base + msg->front.iov_len; | |
1594 | item->ino = cpu_to_le64(cap->cap_ino); | |
1595 | item->cap_id = cpu_to_le64(cap->cap_id); | |
1596 | item->migrate_seq = cpu_to_le32(cap->mseq); | |
1597 | item->seq = cpu_to_le32(cap->issue_seq); | |
1598 | msg->front.iov_len += sizeof(*item); | |
1599 | ||
1600 | ceph_put_cap(mdsc, cap); | |
1601 | ||
1602 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { | |
92475f05 JL |
1603 | // Append cap_barrier field |
1604 | cap_barrier = msg->front.iov_base + msg->front.iov_len; | |
1605 | *cap_barrier = barrier; | |
1606 | msg->front.iov_len += sizeof(*cap_barrier); | |
1607 | ||
745a8e3b YZ |
1608 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
1609 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1610 | ceph_con_send(&session->s_con, msg); | |
1611 | msg = NULL; | |
1612 | } | |
00bd8edb | 1613 | } |
e01a5946 | 1614 | |
745a8e3b | 1615 | BUG_ON(num_cap_releases != 0); |
e01a5946 | 1616 | |
745a8e3b YZ |
1617 | spin_lock(&session->s_cap_lock); |
1618 | if (!list_empty(&session->s_cap_releases)) | |
1619 | goto again; | |
1620 | spin_unlock(&session->s_cap_lock); | |
1621 | ||
1622 | if (msg) { | |
92475f05 JL |
1623 | // Append cap_barrier field |
1624 | cap_barrier = msg->front.iov_base + msg->front.iov_len; | |
1625 | *cap_barrier = barrier; | |
1626 | msg->front.iov_len += sizeof(*cap_barrier); | |
1627 | ||
745a8e3b YZ |
1628 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
1629 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1630 | ceph_con_send(&session->s_con, msg); | |
e01a5946 | 1631 | } |
745a8e3b YZ |
1632 | return; |
1633 | out_err: | |
1634 | pr_err("send_cap_releases mds%d, failed to allocate message\n", | |
1635 | session->s_mds); | |
1636 | spin_lock(&session->s_cap_lock); | |
1637 | list_splice(&tmp_list, &session->s_cap_releases); | |
1638 | session->s_num_cap_releases += num_cap_releases; | |
1639 | spin_unlock(&session->s_cap_lock); | |
e01a5946 SW |
1640 | } |
1641 | ||
2f2dc053 SW |
1642 | /* |
1643 | * requests | |
1644 | */ | |
1645 | ||
54008399 YZ |
1646 | int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, |
1647 | struct inode *dir) | |
1648 | { | |
1649 | struct ceph_inode_info *ci = ceph_inode(dir); | |
1650 | struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; | |
1651 | struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; | |
2a5beea3 | 1652 | size_t size = sizeof(struct ceph_mds_reply_dir_entry); |
54008399 YZ |
1653 | int order, num_entries; |
1654 | ||
1655 | spin_lock(&ci->i_ceph_lock); | |
1656 | num_entries = ci->i_files + ci->i_subdirs; | |
1657 | spin_unlock(&ci->i_ceph_lock); | |
1658 | num_entries = max(num_entries, 1); | |
1659 | num_entries = min(num_entries, opt->max_readdir); | |
1660 | ||
1661 | order = get_order(size * num_entries); | |
1662 | while (order >= 0) { | |
2a5beea3 YZ |
1663 | rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL | |
1664 | __GFP_NOWARN, | |
1665 | order); | |
1666 | if (rinfo->dir_entries) | |
54008399 YZ |
1667 | break; |
1668 | order--; | |
1669 | } | |
2a5beea3 | 1670 | if (!rinfo->dir_entries) |
54008399 YZ |
1671 | return -ENOMEM; |
1672 | ||
1673 | num_entries = (PAGE_SIZE << order) / size; | |
1674 | num_entries = min(num_entries, opt->max_readdir); | |
1675 | ||
1676 | rinfo->dir_buf_size = PAGE_SIZE << order; | |
1677 | req->r_num_caps = num_entries + 1; | |
1678 | req->r_args.readdir.max_entries = cpu_to_le32(num_entries); | |
1679 | req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); | |
1680 | return 0; | |
1681 | } | |
1682 | ||
2f2dc053 SW |
1683 | /* |
1684 | * Create an mds request. | |
1685 | */ | |
1686 | struct ceph_mds_request * | |
1687 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |
1688 | { | |
1689 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); | |
1690 | ||
1691 | if (!req) | |
1692 | return ERR_PTR(-ENOMEM); | |
1693 | ||
b4556396 | 1694 | mutex_init(&req->r_fill_mutex); |
37151668 | 1695 | req->r_mdsc = mdsc; |
2f2dc053 SW |
1696 | req->r_started = jiffies; |
1697 | req->r_resend_mds = -1; | |
1698 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | |
68cd5b4b | 1699 | INIT_LIST_HEAD(&req->r_unsafe_target_item); |
2f2dc053 | 1700 | req->r_fmode = -1; |
153c8e6b | 1701 | kref_init(&req->r_kref); |
fcd00b68 | 1702 | RB_CLEAR_NODE(&req->r_node); |
2f2dc053 SW |
1703 | INIT_LIST_HEAD(&req->r_wait); |
1704 | init_completion(&req->r_completion); | |
1705 | init_completion(&req->r_safe_completion); | |
1706 | INIT_LIST_HEAD(&req->r_unsafe_item); | |
1707 | ||
56199016 | 1708 | req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran); |
b8e69066 | 1709 | |
2f2dc053 SW |
1710 | req->r_op = op; |
1711 | req->r_direct_mode = mode; | |
1712 | return req; | |
1713 | } | |
1714 | ||
1715 | /* | |
44ca18f2 | 1716 | * return oldest (lowest) request, tid in request tree, 0 if none. |
2f2dc053 SW |
1717 | * |
1718 | * called under mdsc->mutex. | |
1719 | */ | |
44ca18f2 SW |
1720 | static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) |
1721 | { | |
1722 | if (RB_EMPTY_ROOT(&mdsc->request_tree)) | |
1723 | return NULL; | |
1724 | return rb_entry(rb_first(&mdsc->request_tree), | |
1725 | struct ceph_mds_request, r_node); | |
1726 | } | |
1727 | ||
e8a7b8b1 | 1728 | static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) |
2f2dc053 | 1729 | { |
e8a7b8b1 | 1730 | return mdsc->oldest_tid; |
2f2dc053 SW |
1731 | } |
1732 | ||
1733 | /* | |
1734 | * Build a dentry's path. Allocate on heap; caller must kfree. Based | |
1735 | * on build_path_from_dentry in fs/cifs/dir.c. | |
1736 | * | |
1737 | * If @stop_on_nosnap, generate path relative to the first non-snapped | |
1738 | * inode. | |
1739 | * | |
1740 | * Encode hidden .snap dirs as a double /, i.e. | |
1741 | * foo/.snap/bar -> foo//bar | |
1742 | */ | |
1743 | char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, | |
1744 | int stop_on_nosnap) | |
1745 | { | |
1746 | struct dentry *temp; | |
1747 | char *path; | |
1748 | int len, pos; | |
1b71fe2e | 1749 | unsigned seq; |
2f2dc053 SW |
1750 | |
1751 | if (dentry == NULL) | |
1752 | return ERR_PTR(-EINVAL); | |
1753 | ||
1754 | retry: | |
1755 | len = 0; | |
1b71fe2e AV |
1756 | seq = read_seqbegin(&rename_lock); |
1757 | rcu_read_lock(); | |
2f2dc053 | 1758 | for (temp = dentry; !IS_ROOT(temp);) { |
2b0143b5 | 1759 | struct inode *inode = d_inode(temp); |
2f2dc053 SW |
1760 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) |
1761 | len++; /* slash only */ | |
1762 | else if (stop_on_nosnap && inode && | |
1763 | ceph_snap(inode) == CEPH_NOSNAP) | |
1764 | break; | |
1765 | else | |
1766 | len += 1 + temp->d_name.len; | |
1767 | temp = temp->d_parent; | |
2f2dc053 | 1768 | } |
1b71fe2e | 1769 | rcu_read_unlock(); |
2f2dc053 SW |
1770 | if (len) |
1771 | len--; /* no leading '/' */ | |
1772 | ||
1773 | path = kmalloc(len+1, GFP_NOFS); | |
1774 | if (path == NULL) | |
1775 | return ERR_PTR(-ENOMEM); | |
1776 | pos = len; | |
1777 | path[pos] = 0; /* trailing null */ | |
1b71fe2e | 1778 | rcu_read_lock(); |
2f2dc053 | 1779 | for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { |
1b71fe2e | 1780 | struct inode *inode; |
2f2dc053 | 1781 | |
1b71fe2e | 1782 | spin_lock(&temp->d_lock); |
2b0143b5 | 1783 | inode = d_inode(temp); |
2f2dc053 | 1784 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
104648ad | 1785 | dout("build_path path+%d: %p SNAPDIR\n", |
2f2dc053 SW |
1786 | pos, temp); |
1787 | } else if (stop_on_nosnap && inode && | |
1788 | ceph_snap(inode) == CEPH_NOSNAP) { | |
9d5a09e6 | 1789 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1790 | break; |
1791 | } else { | |
1792 | pos -= temp->d_name.len; | |
1b71fe2e AV |
1793 | if (pos < 0) { |
1794 | spin_unlock(&temp->d_lock); | |
2f2dc053 | 1795 | break; |
1b71fe2e | 1796 | } |
2f2dc053 SW |
1797 | strncpy(path + pos, temp->d_name.name, |
1798 | temp->d_name.len); | |
2f2dc053 | 1799 | } |
1b71fe2e | 1800 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1801 | if (pos) |
1802 | path[--pos] = '/'; | |
1803 | temp = temp->d_parent; | |
2f2dc053 | 1804 | } |
1b71fe2e AV |
1805 | rcu_read_unlock(); |
1806 | if (pos != 0 || read_seqretry(&rename_lock, seq)) { | |
104648ad | 1807 | pr_err("build_path did not end path lookup where " |
2f2dc053 SW |
1808 | "expected, namelen is %d, pos is %d\n", len, pos); |
1809 | /* presumably this is only possible if racing with a | |
1810 | rename of one of the parent directories (we can not | |
1811 | lock the dentries above us to prevent this, but | |
1812 | retrying should be harmless) */ | |
1813 | kfree(path); | |
1814 | goto retry; | |
1815 | } | |
1816 | ||
2b0143b5 | 1817 | *base = ceph_ino(d_inode(temp)); |
2f2dc053 | 1818 | *plen = len; |
104648ad | 1819 | dout("build_path on %p %d built %llx '%.*s'\n", |
84d08fa8 | 1820 | dentry, d_count(dentry), *base, len, path); |
2f2dc053 SW |
1821 | return path; |
1822 | } | |
1823 | ||
fd36a717 | 1824 | static int build_dentry_path(struct dentry *dentry, struct inode *dir, |
2f2dc053 SW |
1825 | const char **ppath, int *ppathlen, u64 *pino, |
1826 | int *pfreepath) | |
1827 | { | |
1828 | char *path; | |
1829 | ||
c6b0b656 | 1830 | rcu_read_lock(); |
fd36a717 JL |
1831 | if (!dir) |
1832 | dir = d_inode_rcu(dentry->d_parent); | |
c6b0b656 JL |
1833 | if (dir && ceph_snap(dir) == CEPH_NOSNAP) { |
1834 | *pino = ceph_ino(dir); | |
1835 | rcu_read_unlock(); | |
2f2dc053 SW |
1836 | *ppath = dentry->d_name.name; |
1837 | *ppathlen = dentry->d_name.len; | |
1838 | return 0; | |
1839 | } | |
c6b0b656 | 1840 | rcu_read_unlock(); |
2f2dc053 SW |
1841 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); |
1842 | if (IS_ERR(path)) | |
1843 | return PTR_ERR(path); | |
1844 | *ppath = path; | |
1845 | *pfreepath = 1; | |
1846 | return 0; | |
1847 | } | |
1848 | ||
1849 | static int build_inode_path(struct inode *inode, | |
1850 | const char **ppath, int *ppathlen, u64 *pino, | |
1851 | int *pfreepath) | |
1852 | { | |
1853 | struct dentry *dentry; | |
1854 | char *path; | |
1855 | ||
1856 | if (ceph_snap(inode) == CEPH_NOSNAP) { | |
1857 | *pino = ceph_ino(inode); | |
1858 | *ppathlen = 0; | |
1859 | return 0; | |
1860 | } | |
1861 | dentry = d_find_alias(inode); | |
1862 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1863 | dput(dentry); | |
1864 | if (IS_ERR(path)) | |
1865 | return PTR_ERR(path); | |
1866 | *ppath = path; | |
1867 | *pfreepath = 1; | |
1868 | return 0; | |
1869 | } | |
1870 | ||
1871 | /* | |
1872 | * request arguments may be specified via an inode *, a dentry *, or | |
1873 | * an explicit ino+path. | |
1874 | */ | |
1875 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | |
fd36a717 JL |
1876 | struct inode *rdiri, const char *rpath, |
1877 | u64 rino, const char **ppath, int *pathlen, | |
2f2dc053 SW |
1878 | u64 *ino, int *freepath) |
1879 | { | |
1880 | int r = 0; | |
1881 | ||
1882 | if (rinode) { | |
1883 | r = build_inode_path(rinode, ppath, pathlen, ino, freepath); | |
1884 | dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), | |
1885 | ceph_snap(rinode)); | |
1886 | } else if (rdentry) { | |
fd36a717 JL |
1887 | r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, |
1888 | freepath); | |
2f2dc053 SW |
1889 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, |
1890 | *ppath); | |
795858db | 1891 | } else if (rpath || rino) { |
2f2dc053 SW |
1892 | *ino = rino; |
1893 | *ppath = rpath; | |
b000056a | 1894 | *pathlen = rpath ? strlen(rpath) : 0; |
2f2dc053 SW |
1895 | dout(" path %.*s\n", *pathlen, rpath); |
1896 | } | |
1897 | ||
1898 | return r; | |
1899 | } | |
1900 | ||
1901 | /* | |
1902 | * called under mdsc->mutex | |
1903 | */ | |
1904 | static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |
1905 | struct ceph_mds_request *req, | |
6e6f0923 | 1906 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
1907 | { |
1908 | struct ceph_msg *msg; | |
1909 | struct ceph_mds_request_head *head; | |
1910 | const char *path1 = NULL; | |
1911 | const char *path2 = NULL; | |
1912 | u64 ino1 = 0, ino2 = 0; | |
1913 | int pathlen1 = 0, pathlen2 = 0; | |
1914 | int freepath1 = 0, freepath2 = 0; | |
1915 | int len; | |
1916 | u16 releases; | |
1917 | void *p, *end; | |
1918 | int ret; | |
1919 | ||
1920 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | |
3dd69aab | 1921 | req->r_parent, req->r_path1, req->r_ino1.ino, |
2f2dc053 SW |
1922 | &path1, &pathlen1, &ino1, &freepath1); |
1923 | if (ret < 0) { | |
1924 | msg = ERR_PTR(ret); | |
1925 | goto out; | |
1926 | } | |
1927 | ||
1928 | ret = set_request_path_attr(NULL, req->r_old_dentry, | |
fd36a717 | 1929 | req->r_old_dentry_dir, |
2f2dc053 SW |
1930 | req->r_path2, req->r_ino2.ino, |
1931 | &path2, &pathlen2, &ino2, &freepath2); | |
1932 | if (ret < 0) { | |
1933 | msg = ERR_PTR(ret); | |
1934 | goto out_free1; | |
1935 | } | |
1936 | ||
1937 | len = sizeof(*head) + | |
b8e69066 | 1938 | pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + |
777d738a | 1939 | sizeof(struct ceph_timespec); |
2f2dc053 SW |
1940 | |
1941 | /* calculate (max) length for cap releases */ | |
1942 | len += sizeof(struct ceph_mds_request_release) * | |
1943 | (!!req->r_inode_drop + !!req->r_dentry_drop + | |
1944 | !!req->r_old_inode_drop + !!req->r_old_dentry_drop); | |
1945 | if (req->r_dentry_drop) | |
1946 | len += req->r_dentry->d_name.len; | |
1947 | if (req->r_old_dentry_drop) | |
1948 | len += req->r_old_dentry->d_name.len; | |
1949 | ||
b61c2763 | 1950 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); |
a79832f2 SW |
1951 | if (!msg) { |
1952 | msg = ERR_PTR(-ENOMEM); | |
2f2dc053 | 1953 | goto out_free2; |
a79832f2 | 1954 | } |
2f2dc053 | 1955 | |
7cfa0313 | 1956 | msg->hdr.version = cpu_to_le16(2); |
6df058c0 SW |
1957 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
1958 | ||
2f2dc053 SW |
1959 | head = msg->front.iov_base; |
1960 | p = msg->front.iov_base + sizeof(*head); | |
1961 | end = msg->front.iov_base + msg->front.iov_len; | |
1962 | ||
1963 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); | |
1964 | head->op = cpu_to_le32(req->r_op); | |
ff3d0046 EB |
1965 | head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); |
1966 | head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); | |
2f2dc053 SW |
1967 | head->args = req->r_args; |
1968 | ||
1969 | ceph_encode_filepath(&p, end, ino1, path1); | |
1970 | ceph_encode_filepath(&p, end, ino2, path2); | |
1971 | ||
e979cf50 SW |
1972 | /* make note of release offset, in case we need to replay */ |
1973 | req->r_request_release_offset = p - msg->front.iov_base; | |
1974 | ||
2f2dc053 SW |
1975 | /* cap releases */ |
1976 | releases = 0; | |
1977 | if (req->r_inode_drop) | |
1978 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 1979 | req->r_inode ? req->r_inode : d_inode(req->r_dentry), |
2f2dc053 SW |
1980 | mds, req->r_inode_drop, req->r_inode_unless, 0); |
1981 | if (req->r_dentry_drop) | |
1982 | releases += ceph_encode_dentry_release(&p, req->r_dentry, | |
3dd69aab | 1983 | req->r_parent, mds, req->r_dentry_drop, |
ca6c8ae0 | 1984 | req->r_dentry_unless); |
2f2dc053 SW |
1985 | if (req->r_old_dentry_drop) |
1986 | releases += ceph_encode_dentry_release(&p, req->r_old_dentry, | |
ca6c8ae0 JL |
1987 | req->r_old_dentry_dir, mds, |
1988 | req->r_old_dentry_drop, | |
1989 | req->r_old_dentry_unless); | |
2f2dc053 SW |
1990 | if (req->r_old_inode_drop) |
1991 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 1992 | d_inode(req->r_old_dentry), |
2f2dc053 | 1993 | mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); |
6e6f0923 YZ |
1994 | |
1995 | if (drop_cap_releases) { | |
1996 | releases = 0; | |
1997 | p = msg->front.iov_base + req->r_request_release_offset; | |
1998 | } | |
1999 | ||
2f2dc053 SW |
2000 | head->num_releases = cpu_to_le16(releases); |
2001 | ||
b8e69066 | 2002 | /* time stamp */ |
1f041a89 YZ |
2003 | { |
2004 | struct ceph_timespec ts; | |
2005 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2006 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2007 | } | |
b8e69066 | 2008 | |
2f2dc053 SW |
2009 | BUG_ON(p > end); |
2010 | msg->front.iov_len = p - msg->front.iov_base; | |
2011 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
2012 | ||
25e6bae3 YZ |
2013 | if (req->r_pagelist) { |
2014 | struct ceph_pagelist *pagelist = req->r_pagelist; | |
0e1a5ee6 | 2015 | refcount_inc(&pagelist->refcnt); |
25e6bae3 YZ |
2016 | ceph_msg_data_add_pagelist(msg, pagelist); |
2017 | msg->hdr.data_len = cpu_to_le32(pagelist->length); | |
2018 | } else { | |
2019 | msg->hdr.data_len = 0; | |
ebf18f47 | 2020 | } |
02afca6c | 2021 | |
2f2dc053 SW |
2022 | msg->hdr.data_off = cpu_to_le16(0); |
2023 | ||
2024 | out_free2: | |
2025 | if (freepath2) | |
2026 | kfree((char *)path2); | |
2027 | out_free1: | |
2028 | if (freepath1) | |
2029 | kfree((char *)path1); | |
2030 | out: | |
2031 | return msg; | |
2032 | } | |
2033 | ||
2034 | /* | |
2035 | * called under mdsc->mutex if error, under no mutex if | |
2036 | * success. | |
2037 | */ | |
2038 | static void complete_request(struct ceph_mds_client *mdsc, | |
2039 | struct ceph_mds_request *req) | |
2040 | { | |
2041 | if (req->r_callback) | |
2042 | req->r_callback(mdsc, req); | |
2043 | else | |
03066f23 | 2044 | complete_all(&req->r_completion); |
2f2dc053 SW |
2045 | } |
2046 | ||
2047 | /* | |
2048 | * called under mdsc->mutex | |
2049 | */ | |
2050 | static int __prepare_send_request(struct ceph_mds_client *mdsc, | |
2051 | struct ceph_mds_request *req, | |
6e6f0923 | 2052 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
2053 | { |
2054 | struct ceph_mds_request_head *rhead; | |
2055 | struct ceph_msg *msg; | |
2056 | int flags = 0; | |
2057 | ||
2f2dc053 | 2058 | req->r_attempts++; |
e55b71f8 GF |
2059 | if (req->r_inode) { |
2060 | struct ceph_cap *cap = | |
2061 | ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); | |
2062 | ||
2063 | if (cap) | |
2064 | req->r_sent_on_mseq = cap->mseq; | |
2065 | else | |
2066 | req->r_sent_on_mseq = -1; | |
2067 | } | |
2f2dc053 SW |
2068 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
2069 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | |
2070 | ||
bc2de10d | 2071 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
c5c9a0bf | 2072 | void *p; |
01a92f17 SW |
2073 | /* |
2074 | * Replay. Do not regenerate message (and rebuild | |
2075 | * paths, etc.); just use the original message. | |
2076 | * Rebuilding paths will break for renames because | |
2077 | * d_move mangles the src name. | |
2078 | */ | |
2079 | msg = req->r_request; | |
2080 | rhead = msg->front.iov_base; | |
2081 | ||
2082 | flags = le32_to_cpu(rhead->flags); | |
2083 | flags |= CEPH_MDS_FLAG_REPLAY; | |
2084 | rhead->flags = cpu_to_le32(flags); | |
2085 | ||
2086 | if (req->r_target_inode) | |
2087 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | |
2088 | ||
2089 | rhead->num_retry = req->r_attempts - 1; | |
e979cf50 SW |
2090 | |
2091 | /* remove cap/dentry releases from message */ | |
2092 | rhead->num_releases = 0; | |
c5c9a0bf YZ |
2093 | |
2094 | /* time stamp */ | |
2095 | p = msg->front.iov_base + req->r_request_release_offset; | |
1f041a89 YZ |
2096 | { |
2097 | struct ceph_timespec ts; | |
2098 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2099 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2100 | } | |
c5c9a0bf YZ |
2101 | |
2102 | msg->front.iov_len = p - msg->front.iov_base; | |
2103 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
01a92f17 SW |
2104 | return 0; |
2105 | } | |
2106 | ||
2f2dc053 SW |
2107 | if (req->r_request) { |
2108 | ceph_msg_put(req->r_request); | |
2109 | req->r_request = NULL; | |
2110 | } | |
6e6f0923 | 2111 | msg = create_request_message(mdsc, req, mds, drop_cap_releases); |
2f2dc053 | 2112 | if (IS_ERR(msg)) { |
e1518c7c | 2113 | req->r_err = PTR_ERR(msg); |
a79832f2 | 2114 | return PTR_ERR(msg); |
2f2dc053 SW |
2115 | } |
2116 | req->r_request = msg; | |
2117 | ||
2118 | rhead = msg->front.iov_base; | |
2f2dc053 | 2119 | rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); |
bc2de10d | 2120 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
2f2dc053 | 2121 | flags |= CEPH_MDS_FLAG_REPLAY; |
3dd69aab | 2122 | if (req->r_parent) |
2f2dc053 SW |
2123 | flags |= CEPH_MDS_FLAG_WANT_DENTRY; |
2124 | rhead->flags = cpu_to_le32(flags); | |
2125 | rhead->num_fwd = req->r_num_fwd; | |
2126 | rhead->num_retry = req->r_attempts - 1; | |
01a92f17 | 2127 | rhead->ino = 0; |
2f2dc053 | 2128 | |
3dd69aab | 2129 | dout(" r_parent = %p\n", req->r_parent); |
2f2dc053 SW |
2130 | return 0; |
2131 | } | |
2132 | ||
2133 | /* | |
2134 | * send request, or put it on the appropriate wait list. | |
2135 | */ | |
2136 | static int __do_request(struct ceph_mds_client *mdsc, | |
2137 | struct ceph_mds_request *req) | |
2138 | { | |
2139 | struct ceph_mds_session *session = NULL; | |
2140 | int mds = -1; | |
48fec5d0 | 2141 | int err = 0; |
2f2dc053 | 2142 | |
bc2de10d JL |
2143 | if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
2144 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) | |
eb1b8af3 | 2145 | __unregister_request(mdsc, req); |
2f2dc053 | 2146 | goto out; |
eb1b8af3 | 2147 | } |
2f2dc053 SW |
2148 | |
2149 | if (req->r_timeout && | |
2150 | time_after_eq(jiffies, req->r_started + req->r_timeout)) { | |
2151 | dout("do_request timed out\n"); | |
2152 | err = -EIO; | |
2153 | goto finish; | |
2154 | } | |
52953d55 | 2155 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { |
48fec5d0 YZ |
2156 | dout("do_request forced umount\n"); |
2157 | err = -EIO; | |
2158 | goto finish; | |
2159 | } | |
52953d55 | 2160 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { |
e9e427f0 YZ |
2161 | if (mdsc->mdsmap_err) { |
2162 | err = mdsc->mdsmap_err; | |
2163 | dout("do_request mdsmap err %d\n", err); | |
2164 | goto finish; | |
2165 | } | |
cc8e8342 YZ |
2166 | if (mdsc->mdsmap->m_epoch == 0) { |
2167 | dout("do_request no mdsmap, waiting for map\n"); | |
2168 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2169 | goto finish; | |
2170 | } | |
e9e427f0 YZ |
2171 | if (!(mdsc->fsc->mount_options->flags & |
2172 | CEPH_MOUNT_OPT_MOUNTWAIT) && | |
2173 | !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { | |
2174 | err = -ENOENT; | |
2175 | pr_info("probably no mds server is up\n"); | |
2176 | goto finish; | |
2177 | } | |
2178 | } | |
2f2dc053 | 2179 | |
dc69e2e9 SW |
2180 | put_request_session(req); |
2181 | ||
2f2dc053 SW |
2182 | mds = __choose_mds(mdsc, req); |
2183 | if (mds < 0 || | |
2184 | ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { | |
2185 | dout("do_request no mds or not active, waiting for map\n"); | |
2186 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2187 | goto out; | |
2188 | } | |
2189 | ||
2190 | /* get, open session */ | |
2191 | session = __ceph_lookup_mds_session(mdsc, mds); | |
9c423956 | 2192 | if (!session) { |
2f2dc053 | 2193 | session = register_session(mdsc, mds); |
9c423956 SW |
2194 | if (IS_ERR(session)) { |
2195 | err = PTR_ERR(session); | |
2196 | goto finish; | |
2197 | } | |
2198 | } | |
dc69e2e9 SW |
2199 | req->r_session = get_session(session); |
2200 | ||
2f2dc053 | 2201 | dout("do_request mds%d session %p state %s\n", mds, session, |
a687ecaf | 2202 | ceph_session_state_name(session->s_state)); |
2f2dc053 SW |
2203 | if (session->s_state != CEPH_MDS_SESSION_OPEN && |
2204 | session->s_state != CEPH_MDS_SESSION_HUNG) { | |
fcff415c YZ |
2205 | if (session->s_state == CEPH_MDS_SESSION_REJECTED) { |
2206 | err = -EACCES; | |
2207 | goto out_session; | |
2208 | } | |
2f2dc053 SW |
2209 | if (session->s_state == CEPH_MDS_SESSION_NEW || |
2210 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
2211 | __open_session(mdsc, session); | |
2212 | list_add(&req->r_wait, &session->s_waiting); | |
2213 | goto out_session; | |
2214 | } | |
2215 | ||
2216 | /* send request */ | |
2f2dc053 SW |
2217 | req->r_resend_mds = -1; /* forget any previous mds hint */ |
2218 | ||
2219 | if (req->r_request_started == 0) /* note request start time */ | |
2220 | req->r_request_started = jiffies; | |
2221 | ||
6e6f0923 | 2222 | err = __prepare_send_request(mdsc, req, mds, false); |
2f2dc053 SW |
2223 | if (!err) { |
2224 | ceph_msg_get(req->r_request); | |
2225 | ceph_con_send(&session->s_con, req->r_request); | |
2226 | } | |
2227 | ||
2228 | out_session: | |
2229 | ceph_put_mds_session(session); | |
48fec5d0 YZ |
2230 | finish: |
2231 | if (err) { | |
2232 | dout("__do_request early error %d\n", err); | |
2233 | req->r_err = err; | |
2234 | complete_request(mdsc, req); | |
2235 | __unregister_request(mdsc, req); | |
2236 | } | |
2f2dc053 SW |
2237 | out: |
2238 | return err; | |
2f2dc053 SW |
2239 | } |
2240 | ||
2241 | /* | |
2242 | * called under mdsc->mutex | |
2243 | */ | |
2244 | static void __wake_requests(struct ceph_mds_client *mdsc, | |
2245 | struct list_head *head) | |
2246 | { | |
ed75ec2c YZ |
2247 | struct ceph_mds_request *req; |
2248 | LIST_HEAD(tmp_list); | |
2249 | ||
2250 | list_splice_init(head, &tmp_list); | |
2f2dc053 | 2251 | |
ed75ec2c YZ |
2252 | while (!list_empty(&tmp_list)) { |
2253 | req = list_entry(tmp_list.next, | |
2254 | struct ceph_mds_request, r_wait); | |
2f2dc053 | 2255 | list_del_init(&req->r_wait); |
7971bd92 | 2256 | dout(" wake request %p tid %llu\n", req, req->r_tid); |
2f2dc053 SW |
2257 | __do_request(mdsc, req); |
2258 | } | |
2259 | } | |
2260 | ||
2261 | /* | |
2262 | * Wake up threads with requests pending for @mds, so that they can | |
29790f26 | 2263 | * resubmit their requests to a possibly different mds. |
2f2dc053 | 2264 | */ |
29790f26 | 2265 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2f2dc053 | 2266 | { |
44ca18f2 | 2267 | struct ceph_mds_request *req; |
282c1052 | 2268 | struct rb_node *p = rb_first(&mdsc->request_tree); |
2f2dc053 SW |
2269 | |
2270 | dout("kick_requests mds%d\n", mds); | |
282c1052 | 2271 | while (p) { |
44ca18f2 | 2272 | req = rb_entry(p, struct ceph_mds_request, r_node); |
282c1052 | 2273 | p = rb_next(p); |
bc2de10d | 2274 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
44ca18f2 | 2275 | continue; |
3de22be6 YZ |
2276 | if (req->r_attempts > 0) |
2277 | continue; /* only new requests */ | |
44ca18f2 SW |
2278 | if (req->r_session && |
2279 | req->r_session->s_mds == mds) { | |
2280 | dout(" kicking tid %llu\n", req->r_tid); | |
03974e81 | 2281 | list_del_init(&req->r_wait); |
44ca18f2 | 2282 | __do_request(mdsc, req); |
2f2dc053 SW |
2283 | } |
2284 | } | |
2285 | } | |
2286 | ||
2287 | void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, | |
2288 | struct ceph_mds_request *req) | |
2289 | { | |
2290 | dout("submit_request on %p\n", req); | |
2291 | mutex_lock(&mdsc->mutex); | |
2292 | __register_request(mdsc, req, NULL); | |
2293 | __do_request(mdsc, req); | |
2294 | mutex_unlock(&mdsc->mutex); | |
2295 | } | |
2296 | ||
2297 | /* | |
2298 | * Synchrously perform an mds request. Take care of all of the | |
2299 | * session setup, forwarding, retry details. | |
2300 | */ | |
2301 | int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, | |
2302 | struct inode *dir, | |
2303 | struct ceph_mds_request *req) | |
2304 | { | |
2305 | int err; | |
2306 | ||
2307 | dout("do_request on %p\n", req); | |
2308 | ||
3dd69aab | 2309 | /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ |
2f2dc053 SW |
2310 | if (req->r_inode) |
2311 | ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); | |
3dd69aab JL |
2312 | if (req->r_parent) |
2313 | ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); | |
844d87c3 | 2314 | if (req->r_old_dentry_dir) |
41b02e1f SW |
2315 | ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), |
2316 | CEPH_CAP_PIN); | |
2f2dc053 SW |
2317 | |
2318 | /* issue */ | |
2319 | mutex_lock(&mdsc->mutex); | |
2320 | __register_request(mdsc, req, dir); | |
2321 | __do_request(mdsc, req); | |
2322 | ||
e1518c7c SW |
2323 | if (req->r_err) { |
2324 | err = req->r_err; | |
e1518c7c | 2325 | goto out; |
2f2dc053 SW |
2326 | } |
2327 | ||
e1518c7c SW |
2328 | /* wait */ |
2329 | mutex_unlock(&mdsc->mutex); | |
2330 | dout("do_request waiting\n"); | |
5be73034 | 2331 | if (!req->r_timeout && req->r_wait_for_completion) { |
9280be24 | 2332 | err = req->r_wait_for_completion(mdsc, req); |
e1518c7c | 2333 | } else { |
5be73034 ID |
2334 | long timeleft = wait_for_completion_killable_timeout( |
2335 | &req->r_completion, | |
2336 | ceph_timeout_jiffies(req->r_timeout)); | |
2337 | if (timeleft > 0) | |
2338 | err = 0; | |
2339 | else if (!timeleft) | |
2340 | err = -EIO; /* timed out */ | |
2341 | else | |
2342 | err = timeleft; /* killed */ | |
e1518c7c SW |
2343 | } |
2344 | dout("do_request waited, got %d\n", err); | |
2345 | mutex_lock(&mdsc->mutex); | |
5b1daecd | 2346 | |
e1518c7c | 2347 | /* only abort if we didn't race with a real reply */ |
bc2de10d | 2348 | if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
e1518c7c SW |
2349 | err = le32_to_cpu(req->r_reply_info.head->result); |
2350 | } else if (err < 0) { | |
2351 | dout("aborted request %lld with %d\n", req->r_tid, err); | |
b4556396 SW |
2352 | |
2353 | /* | |
2354 | * ensure we aren't running concurrently with | |
2355 | * ceph_fill_trace or ceph_readdir_prepopulate, which | |
2356 | * rely on locks (dir mutex) held by our caller. | |
2357 | */ | |
2358 | mutex_lock(&req->r_fill_mutex); | |
e1518c7c | 2359 | req->r_err = err; |
bc2de10d | 2360 | set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); |
b4556396 | 2361 | mutex_unlock(&req->r_fill_mutex); |
5b1daecd | 2362 | |
3dd69aab | 2363 | if (req->r_parent && |
167c9e35 SW |
2364 | (req->r_op & CEPH_MDS_OP_WRITE)) |
2365 | ceph_invalidate_dir_request(req); | |
2f2dc053 | 2366 | } else { |
e1518c7c | 2367 | err = req->r_err; |
2f2dc053 | 2368 | } |
2f2dc053 | 2369 | |
e1518c7c SW |
2370 | out: |
2371 | mutex_unlock(&mdsc->mutex); | |
2f2dc053 SW |
2372 | dout("do_request %p done, result %d\n", req, err); |
2373 | return err; | |
2374 | } | |
2375 | ||
167c9e35 | 2376 | /* |
2f276c51 | 2377 | * Invalidate dir's completeness, dentry lease state on an aborted MDS |
167c9e35 SW |
2378 | * namespace request. |
2379 | */ | |
2380 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) | |
2381 | { | |
3dd69aab | 2382 | struct inode *inode = req->r_parent; |
167c9e35 | 2383 | |
2f276c51 | 2384 | dout("invalidate_dir_request %p (complete, lease(s))\n", inode); |
167c9e35 | 2385 | |
2f276c51 | 2386 | ceph_dir_clear_complete(inode); |
167c9e35 SW |
2387 | if (req->r_dentry) |
2388 | ceph_invalidate_dentry_lease(req->r_dentry); | |
2389 | if (req->r_old_dentry) | |
2390 | ceph_invalidate_dentry_lease(req->r_old_dentry); | |
2391 | } | |
2392 | ||
2f2dc053 SW |
2393 | /* |
2394 | * Handle mds reply. | |
2395 | * | |
2396 | * We take the session mutex and parse and process the reply immediately. | |
2397 | * This preserves the logical ordering of replies, capabilities, etc., sent | |
2398 | * by the MDS as they are applied to our local cache. | |
2399 | */ | |
2400 | static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |
2401 | { | |
2402 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2403 | struct ceph_mds_request *req; | |
2404 | struct ceph_mds_reply_head *head = msg->front.iov_base; | |
2405 | struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ | |
982d6011 | 2406 | struct ceph_snap_realm *realm; |
2f2dc053 SW |
2407 | u64 tid; |
2408 | int err, result; | |
2600d2dd | 2409 | int mds = session->s_mds; |
2f2dc053 | 2410 | |
2f2dc053 SW |
2411 | if (msg->front.iov_len < sizeof(*head)) { |
2412 | pr_err("mdsc_handle_reply got corrupt (short) reply\n"); | |
9ec7cab1 | 2413 | ceph_msg_dump(msg); |
2f2dc053 SW |
2414 | return; |
2415 | } | |
2416 | ||
2417 | /* get request, session */ | |
6df058c0 | 2418 | tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 | 2419 | mutex_lock(&mdsc->mutex); |
fcd00b68 | 2420 | req = lookup_get_request(mdsc, tid); |
2f2dc053 SW |
2421 | if (!req) { |
2422 | dout("handle_reply on unknown tid %llu\n", tid); | |
2423 | mutex_unlock(&mdsc->mutex); | |
2424 | return; | |
2425 | } | |
2426 | dout("handle_reply %p\n", req); | |
2f2dc053 SW |
2427 | |
2428 | /* correct session? */ | |
d96d6049 | 2429 | if (req->r_session != session) { |
2f2dc053 SW |
2430 | pr_err("mdsc_handle_reply got %llu on session mds%d" |
2431 | " not mds%d\n", tid, session->s_mds, | |
2432 | req->r_session ? req->r_session->s_mds : -1); | |
2433 | mutex_unlock(&mdsc->mutex); | |
2434 | goto out; | |
2435 | } | |
2436 | ||
2437 | /* dup? */ | |
bc2de10d JL |
2438 | if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || |
2439 | (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { | |
f3ae1b97 | 2440 | pr_warn("got a dup %s reply on %llu from mds%d\n", |
2f2dc053 SW |
2441 | head->safe ? "safe" : "unsafe", tid, mds); |
2442 | mutex_unlock(&mdsc->mutex); | |
2443 | goto out; | |
2444 | } | |
bc2de10d | 2445 | if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { |
f3ae1b97 | 2446 | pr_warn("got unsafe after safe on %llu from mds%d\n", |
85792d0d SW |
2447 | tid, mds); |
2448 | mutex_unlock(&mdsc->mutex); | |
2449 | goto out; | |
2450 | } | |
2f2dc053 SW |
2451 | |
2452 | result = le32_to_cpu(head->result); | |
2453 | ||
2454 | /* | |
e55b71f8 GF |
2455 | * Handle an ESTALE |
2456 | * if we're not talking to the authority, send to them | |
2457 | * if the authority has changed while we weren't looking, | |
2458 | * send to new authority | |
2459 | * Otherwise we just have to return an ESTALE | |
2f2dc053 SW |
2460 | */ |
2461 | if (result == -ESTALE) { | |
e55b71f8 | 2462 | dout("got ESTALE on request %llu", req->r_tid); |
51da8e8c | 2463 | req->r_resend_mds = -1; |
ca18bede | 2464 | if (req->r_direct_mode != USE_AUTH_MDS) { |
e55b71f8 GF |
2465 | dout("not using auth, setting for that now"); |
2466 | req->r_direct_mode = USE_AUTH_MDS; | |
2f2dc053 SW |
2467 | __do_request(mdsc, req); |
2468 | mutex_unlock(&mdsc->mutex); | |
2469 | goto out; | |
e55b71f8 | 2470 | } else { |
ca18bede YZ |
2471 | int mds = __choose_mds(mdsc, req); |
2472 | if (mds >= 0 && mds != req->r_session->s_mds) { | |
2473 | dout("but auth changed, so resending"); | |
e55b71f8 GF |
2474 | __do_request(mdsc, req); |
2475 | mutex_unlock(&mdsc->mutex); | |
2476 | goto out; | |
2477 | } | |
2f2dc053 | 2478 | } |
e55b71f8 | 2479 | dout("have to return ESTALE on request %llu", req->r_tid); |
2f2dc053 SW |
2480 | } |
2481 | ||
e55b71f8 | 2482 | |
2f2dc053 | 2483 | if (head->safe) { |
bc2de10d | 2484 | set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags); |
2f2dc053 | 2485 | __unregister_request(mdsc, req); |
2f2dc053 | 2486 | |
bc2de10d | 2487 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
2f2dc053 SW |
2488 | /* |
2489 | * We already handled the unsafe response, now do the | |
2490 | * cleanup. No need to examine the response; the MDS | |
2491 | * doesn't include any result info in the safe | |
2492 | * response. And even if it did, there is nothing | |
2493 | * useful we could do with a revised return value. | |
2494 | */ | |
2495 | dout("got safe reply %llu, mds%d\n", tid, mds); | |
2f2dc053 SW |
2496 | |
2497 | /* last unsafe request during umount? */ | |
44ca18f2 | 2498 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
03066f23 | 2499 | complete_all(&mdsc->safe_umount_waiters); |
2f2dc053 SW |
2500 | mutex_unlock(&mdsc->mutex); |
2501 | goto out; | |
2502 | } | |
e1518c7c | 2503 | } else { |
bc2de10d | 2504 | set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags); |
2f2dc053 | 2505 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); |
4c06ace8 YZ |
2506 | if (req->r_unsafe_dir) { |
2507 | struct ceph_inode_info *ci = | |
2508 | ceph_inode(req->r_unsafe_dir); | |
2509 | spin_lock(&ci->i_unsafe_lock); | |
2510 | list_add_tail(&req->r_unsafe_dir_item, | |
2511 | &ci->i_unsafe_dirops); | |
2512 | spin_unlock(&ci->i_unsafe_lock); | |
2513 | } | |
2f2dc053 SW |
2514 | } |
2515 | ||
2516 | dout("handle_reply tid %lld result %d\n", tid, result); | |
2517 | rinfo = &req->r_reply_info; | |
14303d20 | 2518 | err = parse_reply_info(msg, rinfo, session->s_con.peer_features); |
2f2dc053 SW |
2519 | mutex_unlock(&mdsc->mutex); |
2520 | ||
2521 | mutex_lock(&session->s_mutex); | |
2522 | if (err < 0) { | |
25933abd | 2523 | pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); |
9ec7cab1 | 2524 | ceph_msg_dump(msg); |
2f2dc053 SW |
2525 | goto out_err; |
2526 | } | |
2527 | ||
2528 | /* snap trace */ | |
982d6011 | 2529 | realm = NULL; |
2f2dc053 SW |
2530 | if (rinfo->snapblob_len) { |
2531 | down_write(&mdsc->snap_rwsem); | |
2532 | ceph_update_snap_trace(mdsc, rinfo->snapblob, | |
982d6011 YZ |
2533 | rinfo->snapblob + rinfo->snapblob_len, |
2534 | le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, | |
2535 | &realm); | |
2f2dc053 SW |
2536 | downgrade_write(&mdsc->snap_rwsem); |
2537 | } else { | |
2538 | down_read(&mdsc->snap_rwsem); | |
2539 | } | |
2540 | ||
2541 | /* insert trace into our cache */ | |
b4556396 | 2542 | mutex_lock(&req->r_fill_mutex); |
315f2408 | 2543 | current->journal_info = req; |
f5a03b08 | 2544 | err = ceph_fill_trace(mdsc->fsc->sb, req); |
2f2dc053 | 2545 | if (err == 0) { |
6e8575fa | 2546 | if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || |
81c6aea5 | 2547 | req->r_op == CEPH_MDS_OP_LSSNAP)) |
2f2dc053 | 2548 | ceph_readdir_prepopulate(req, req->r_session); |
37151668 | 2549 | ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
2f2dc053 | 2550 | } |
315f2408 | 2551 | current->journal_info = NULL; |
b4556396 | 2552 | mutex_unlock(&req->r_fill_mutex); |
2f2dc053 SW |
2553 | |
2554 | up_read(&mdsc->snap_rwsem); | |
982d6011 YZ |
2555 | if (realm) |
2556 | ceph_put_snap_realm(mdsc, realm); | |
68cd5b4b | 2557 | |
bc2de10d JL |
2558 | if (err == 0 && req->r_target_inode && |
2559 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
68cd5b4b YZ |
2560 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); |
2561 | spin_lock(&ci->i_unsafe_lock); | |
2562 | list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); | |
2563 | spin_unlock(&ci->i_unsafe_lock); | |
2564 | } | |
2f2dc053 | 2565 | out_err: |
e1518c7c | 2566 | mutex_lock(&mdsc->mutex); |
bc2de10d | 2567 | if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
e1518c7c SW |
2568 | if (err) { |
2569 | req->r_err = err; | |
2570 | } else { | |
5fdb1389 | 2571 | req->r_reply = ceph_msg_get(msg); |
bc2de10d | 2572 | set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); |
e1518c7c | 2573 | } |
2f2dc053 | 2574 | } else { |
e1518c7c | 2575 | dout("reply arrived after request %lld was aborted\n", tid); |
2f2dc053 | 2576 | } |
e1518c7c | 2577 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 2578 | |
2f2dc053 SW |
2579 | mutex_unlock(&session->s_mutex); |
2580 | ||
2581 | /* kick calling process */ | |
2582 | complete_request(mdsc, req); | |
2583 | out: | |
2584 | ceph_mdsc_put_request(req); | |
2585 | return; | |
2586 | } | |
2587 | ||
2588 | ||
2589 | ||
2590 | /* | |
2591 | * handle mds notification that our request has been forwarded. | |
2592 | */ | |
2600d2dd SW |
2593 | static void handle_forward(struct ceph_mds_client *mdsc, |
2594 | struct ceph_mds_session *session, | |
2595 | struct ceph_msg *msg) | |
2f2dc053 SW |
2596 | { |
2597 | struct ceph_mds_request *req; | |
a1ea787c | 2598 | u64 tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2599 | u32 next_mds; |
2600 | u32 fwd_seq; | |
2f2dc053 SW |
2601 | int err = -EINVAL; |
2602 | void *p = msg->front.iov_base; | |
2603 | void *end = p + msg->front.iov_len; | |
2f2dc053 | 2604 | |
a1ea787c | 2605 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
c89136ea SW |
2606 | next_mds = ceph_decode_32(&p); |
2607 | fwd_seq = ceph_decode_32(&p); | |
2f2dc053 SW |
2608 | |
2609 | mutex_lock(&mdsc->mutex); | |
fcd00b68 | 2610 | req = lookup_get_request(mdsc, tid); |
2f2dc053 | 2611 | if (!req) { |
2a8e5e36 | 2612 | dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); |
2f2dc053 SW |
2613 | goto out; /* dup reply? */ |
2614 | } | |
2615 | ||
bc2de10d | 2616 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
2a8e5e36 SW |
2617 | dout("forward tid %llu aborted, unregistering\n", tid); |
2618 | __unregister_request(mdsc, req); | |
2619 | } else if (fwd_seq <= req->r_num_fwd) { | |
2620 | dout("forward tid %llu to mds%d - old seq %d <= %d\n", | |
2f2dc053 SW |
2621 | tid, next_mds, req->r_num_fwd, fwd_seq); |
2622 | } else { | |
2623 | /* resend. forward race not possible; mds would drop */ | |
2a8e5e36 SW |
2624 | dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); |
2625 | BUG_ON(req->r_err); | |
bc2de10d | 2626 | BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); |
3de22be6 | 2627 | req->r_attempts = 0; |
2f2dc053 SW |
2628 | req->r_num_fwd = fwd_seq; |
2629 | req->r_resend_mds = next_mds; | |
2630 | put_request_session(req); | |
2631 | __do_request(mdsc, req); | |
2632 | } | |
2633 | ceph_mdsc_put_request(req); | |
2634 | out: | |
2635 | mutex_unlock(&mdsc->mutex); | |
2636 | return; | |
2637 | ||
2638 | bad: | |
2639 | pr_err("mdsc_handle_forward decode error err=%d\n", err); | |
2640 | } | |
2641 | ||
2642 | /* | |
2643 | * handle a mds session control message | |
2644 | */ | |
2645 | static void handle_session(struct ceph_mds_session *session, | |
2646 | struct ceph_msg *msg) | |
2647 | { | |
2648 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2649 | u32 op; | |
2650 | u64 seq; | |
2600d2dd | 2651 | int mds = session->s_mds; |
2f2dc053 SW |
2652 | struct ceph_mds_session_head *h = msg->front.iov_base; |
2653 | int wake = 0; | |
2654 | ||
2f2dc053 SW |
2655 | /* decode */ |
2656 | if (msg->front.iov_len != sizeof(*h)) | |
2657 | goto bad; | |
2658 | op = le32_to_cpu(h->op); | |
2659 | seq = le64_to_cpu(h->seq); | |
2660 | ||
2661 | mutex_lock(&mdsc->mutex); | |
0a07fc8c YZ |
2662 | if (op == CEPH_SESSION_CLOSE) { |
2663 | get_session(session); | |
2600d2dd | 2664 | __unregister_session(mdsc, session); |
0a07fc8c | 2665 | } |
2f2dc053 SW |
2666 | /* FIXME: this ttl calculation is generous */ |
2667 | session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; | |
2668 | mutex_unlock(&mdsc->mutex); | |
2669 | ||
2670 | mutex_lock(&session->s_mutex); | |
2671 | ||
2672 | dout("handle_session mds%d %s %p state %s seq %llu\n", | |
2673 | mds, ceph_session_op_name(op), session, | |
a687ecaf | 2674 | ceph_session_state_name(session->s_state), seq); |
2f2dc053 SW |
2675 | |
2676 | if (session->s_state == CEPH_MDS_SESSION_HUNG) { | |
2677 | session->s_state = CEPH_MDS_SESSION_OPEN; | |
2678 | pr_info("mds%d came back\n", session->s_mds); | |
2679 | } | |
2680 | ||
2681 | switch (op) { | |
2682 | case CEPH_SESSION_OPEN: | |
29790f26 SW |
2683 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2684 | pr_info("mds%d reconnect success\n", session->s_mds); | |
2f2dc053 SW |
2685 | session->s_state = CEPH_MDS_SESSION_OPEN; |
2686 | renewed_caps(mdsc, session, 0); | |
2687 | wake = 1; | |
2688 | if (mdsc->stopping) | |
2689 | __close_session(mdsc, session); | |
2690 | break; | |
2691 | ||
2692 | case CEPH_SESSION_RENEWCAPS: | |
2693 | if (session->s_renew_seq == seq) | |
2694 | renewed_caps(mdsc, session, 1); | |
2695 | break; | |
2696 | ||
2697 | case CEPH_SESSION_CLOSE: | |
29790f26 SW |
2698 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2699 | pr_info("mds%d reconnect denied\n", session->s_mds); | |
1c841a96 | 2700 | cleanup_session_requests(mdsc, session); |
2f2dc053 | 2701 | remove_session_caps(session); |
656e4382 | 2702 | wake = 2; /* for good measure */ |
f3c60c59 | 2703 | wake_up_all(&mdsc->session_close_wq); |
2f2dc053 SW |
2704 | break; |
2705 | ||
2706 | case CEPH_SESSION_STALE: | |
2707 | pr_info("mds%d caps went stale, renewing\n", | |
2708 | session->s_mds); | |
d8fb02ab | 2709 | spin_lock(&session->s_gen_ttl_lock); |
2f2dc053 | 2710 | session->s_cap_gen++; |
1ce208a6 | 2711 | session->s_cap_ttl = jiffies - 1; |
d8fb02ab | 2712 | spin_unlock(&session->s_gen_ttl_lock); |
2f2dc053 SW |
2713 | send_renew_caps(mdsc, session); |
2714 | break; | |
2715 | ||
2716 | case CEPH_SESSION_RECALL_STATE: | |
2717 | trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); | |
2718 | break; | |
2719 | ||
186e4f7a YZ |
2720 | case CEPH_SESSION_FLUSHMSG: |
2721 | send_flushmsg_ack(mdsc, session, seq); | |
2722 | break; | |
2723 | ||
03f4fcb0 YZ |
2724 | case CEPH_SESSION_FORCE_RO: |
2725 | dout("force_session_readonly %p\n", session); | |
2726 | spin_lock(&session->s_cap_lock); | |
2727 | session->s_readonly = true; | |
2728 | spin_unlock(&session->s_cap_lock); | |
2729 | wake_up_session_caps(session, 0); | |
2730 | break; | |
2731 | ||
fcff415c YZ |
2732 | case CEPH_SESSION_REJECT: |
2733 | WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); | |
2734 | pr_info("mds%d rejected session\n", session->s_mds); | |
2735 | session->s_state = CEPH_MDS_SESSION_REJECTED; | |
2736 | cleanup_session_requests(mdsc, session); | |
2737 | remove_session_caps(session); | |
2738 | wake = 2; /* for good measure */ | |
2739 | break; | |
2740 | ||
2f2dc053 SW |
2741 | default: |
2742 | pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); | |
2743 | WARN_ON(1); | |
2744 | } | |
2745 | ||
2746 | mutex_unlock(&session->s_mutex); | |
2747 | if (wake) { | |
2748 | mutex_lock(&mdsc->mutex); | |
2749 | __wake_requests(mdsc, &session->s_waiting); | |
656e4382 YZ |
2750 | if (wake == 2) |
2751 | kick_requests(mdsc, mds); | |
2f2dc053 SW |
2752 | mutex_unlock(&mdsc->mutex); |
2753 | } | |
0a07fc8c YZ |
2754 | if (op == CEPH_SESSION_CLOSE) |
2755 | ceph_put_mds_session(session); | |
2f2dc053 SW |
2756 | return; |
2757 | ||
2758 | bad: | |
2759 | pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, | |
2760 | (int)msg->front.iov_len); | |
9ec7cab1 | 2761 | ceph_msg_dump(msg); |
2f2dc053 SW |
2762 | return; |
2763 | } | |
2764 | ||
2765 | ||
2766 | /* | |
2767 | * called under session->mutex. | |
2768 | */ | |
2769 | static void replay_unsafe_requests(struct ceph_mds_client *mdsc, | |
2770 | struct ceph_mds_session *session) | |
2771 | { | |
2772 | struct ceph_mds_request *req, *nreq; | |
3de22be6 | 2773 | struct rb_node *p; |
2f2dc053 SW |
2774 | int err; |
2775 | ||
2776 | dout("replay_unsafe_requests mds%d\n", session->s_mds); | |
2777 | ||
2778 | mutex_lock(&mdsc->mutex); | |
2779 | list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { | |
6e6f0923 | 2780 | err = __prepare_send_request(mdsc, req, session->s_mds, true); |
2f2dc053 SW |
2781 | if (!err) { |
2782 | ceph_msg_get(req->r_request); | |
2783 | ceph_con_send(&session->s_con, req->r_request); | |
2784 | } | |
2785 | } | |
3de22be6 YZ |
2786 | |
2787 | /* | |
2788 | * also re-send old requests when MDS enters reconnect stage. So that MDS | |
2789 | * can process completed request in clientreplay stage. | |
2790 | */ | |
2791 | p = rb_first(&mdsc->request_tree); | |
2792 | while (p) { | |
2793 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
2794 | p = rb_next(p); | |
bc2de10d | 2795 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
3de22be6 YZ |
2796 | continue; |
2797 | if (req->r_attempts == 0) | |
2798 | continue; /* only old requests */ | |
2799 | if (req->r_session && | |
2800 | req->r_session->s_mds == session->s_mds) { | |
6e6f0923 YZ |
2801 | err = __prepare_send_request(mdsc, req, |
2802 | session->s_mds, true); | |
3de22be6 YZ |
2803 | if (!err) { |
2804 | ceph_msg_get(req->r_request); | |
2805 | ceph_con_send(&session->s_con, req->r_request); | |
2806 | } | |
2807 | } | |
2808 | } | |
2f2dc053 SW |
2809 | mutex_unlock(&mdsc->mutex); |
2810 | } | |
2811 | ||
2812 | /* | |
2813 | * Encode information about a cap for a reconnect with the MDS. | |
2814 | */ | |
2f2dc053 SW |
2815 | static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, |
2816 | void *arg) | |
2817 | { | |
20cb34ae SW |
2818 | union { |
2819 | struct ceph_mds_cap_reconnect v2; | |
2820 | struct ceph_mds_cap_reconnect_v1 v1; | |
2821 | } rec; | |
2f2dc053 | 2822 | struct ceph_inode_info *ci; |
20cb34ae SW |
2823 | struct ceph_reconnect_state *recon_state = arg; |
2824 | struct ceph_pagelist *pagelist = recon_state->pagelist; | |
2f2dc053 SW |
2825 | char *path; |
2826 | int pathlen, err; | |
2827 | u64 pathbase; | |
3469ed0d | 2828 | u64 snap_follows; |
2f2dc053 SW |
2829 | struct dentry *dentry; |
2830 | ||
2831 | ci = cap->ci; | |
2832 | ||
2833 | dout(" adding %p ino %llx.%llx cap %p %lld %s\n", | |
2834 | inode, ceph_vinop(inode), cap, cap->cap_id, | |
2835 | ceph_cap_string(cap->issued)); | |
93cea5be SW |
2836 | err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); |
2837 | if (err) | |
2838 | return err; | |
2f2dc053 SW |
2839 | |
2840 | dentry = d_find_alias(inode); | |
2841 | if (dentry) { | |
2842 | path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); | |
2843 | if (IS_ERR(path)) { | |
2844 | err = PTR_ERR(path); | |
e072f8aa | 2845 | goto out_dput; |
2f2dc053 SW |
2846 | } |
2847 | } else { | |
2848 | path = NULL; | |
2849 | pathlen = 0; | |
4eacd4cb | 2850 | pathbase = 0; |
2f2dc053 | 2851 | } |
2f2dc053 | 2852 | |
be655596 | 2853 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
2854 | cap->seq = 0; /* reset cap seq */ |
2855 | cap->issue_seq = 0; /* and issue_seq */ | |
667ca05c | 2856 | cap->mseq = 0; /* and migrate_seq */ |
99a9c273 | 2857 | cap->cap_gen = cap->session->s_cap_gen; |
20cb34ae | 2858 | |
121f22a1 | 2859 | if (recon_state->msg_version >= 2) { |
20cb34ae SW |
2860 | rec.v2.cap_id = cpu_to_le64(cap->cap_id); |
2861 | rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2862 | rec.v2.issued = cpu_to_le32(cap->issued); | |
2863 | rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2864 | rec.v2.pathbase = cpu_to_le64(pathbase); | |
2865 | rec.v2.flock_len = 0; | |
20cb34ae SW |
2866 | } else { |
2867 | rec.v1.cap_id = cpu_to_le64(cap->cap_id); | |
2868 | rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2869 | rec.v1.issued = cpu_to_le32(cap->issued); | |
2870 | rec.v1.size = cpu_to_le64(inode->i_size); | |
2871 | ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); | |
2872 | ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); | |
2873 | rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2874 | rec.v1.pathbase = cpu_to_le64(pathbase); | |
20cb34ae | 2875 | } |
3469ed0d YZ |
2876 | |
2877 | if (list_empty(&ci->i_cap_snaps)) { | |
2878 | snap_follows = 0; | |
2879 | } else { | |
2880 | struct ceph_cap_snap *capsnap = | |
2881 | list_first_entry(&ci->i_cap_snaps, | |
2882 | struct ceph_cap_snap, ci_item); | |
2883 | snap_follows = capsnap->follows; | |
20cb34ae | 2884 | } |
be655596 | 2885 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 | 2886 | |
121f22a1 | 2887 | if (recon_state->msg_version >= 2) { |
40819f6f | 2888 | int num_fcntl_locks, num_flock_locks; |
39be95e9 | 2889 | struct ceph_filelock *flocks; |
121f22a1 YZ |
2890 | size_t struct_len, total_len = 0; |
2891 | u8 struct_v = 0; | |
39be95e9 JS |
2892 | |
2893 | encode_again: | |
39be95e9 | 2894 | ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); |
39be95e9 JS |
2895 | flocks = kmalloc((num_fcntl_locks+num_flock_locks) * |
2896 | sizeof(struct ceph_filelock), GFP_NOFS); | |
2897 | if (!flocks) { | |
2898 | err = -ENOMEM; | |
2899 | goto out_free; | |
2900 | } | |
39be95e9 JS |
2901 | err = ceph_encode_locks_to_buffer(inode, flocks, |
2902 | num_fcntl_locks, | |
2903 | num_flock_locks); | |
39be95e9 JS |
2904 | if (err) { |
2905 | kfree(flocks); | |
2906 | if (err == -ENOSPC) | |
2907 | goto encode_again; | |
2908 | goto out_free; | |
2909 | } | |
121f22a1 YZ |
2910 | |
2911 | if (recon_state->msg_version >= 3) { | |
2912 | /* version, compat_version and struct_len */ | |
2913 | total_len = 2 * sizeof(u8) + sizeof(u32); | |
3469ed0d | 2914 | struct_v = 2; |
121f22a1 | 2915 | } |
39be95e9 JS |
2916 | /* |
2917 | * number of encoded locks is stable, so copy to pagelist | |
2918 | */ | |
121f22a1 YZ |
2919 | struct_len = 2 * sizeof(u32) + |
2920 | (num_fcntl_locks + num_flock_locks) * | |
2921 | sizeof(struct ceph_filelock); | |
2922 | rec.v2.flock_len = cpu_to_le32(struct_len); | |
2923 | ||
2924 | struct_len += sizeof(rec.v2); | |
2925 | struct_len += sizeof(u32) + pathlen; | |
2926 | ||
3469ed0d YZ |
2927 | if (struct_v >= 2) |
2928 | struct_len += sizeof(u64); /* snap_follows */ | |
2929 | ||
121f22a1 YZ |
2930 | total_len += struct_len; |
2931 | err = ceph_pagelist_reserve(pagelist, total_len); | |
2932 | ||
2933 | if (!err) { | |
2934 | if (recon_state->msg_version >= 3) { | |
2935 | ceph_pagelist_encode_8(pagelist, struct_v); | |
2936 | ceph_pagelist_encode_8(pagelist, 1); | |
2937 | ceph_pagelist_encode_32(pagelist, struct_len); | |
2938 | } | |
2939 | ceph_pagelist_encode_string(pagelist, path, pathlen); | |
2940 | ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); | |
2941 | ceph_locks_to_pagelist(flocks, pagelist, | |
2942 | num_fcntl_locks, | |
2943 | num_flock_locks); | |
3469ed0d YZ |
2944 | if (struct_v >= 2) |
2945 | ceph_pagelist_encode_64(pagelist, snap_follows); | |
121f22a1 | 2946 | } |
39be95e9 | 2947 | kfree(flocks); |
3612abbd | 2948 | } else { |
121f22a1 YZ |
2949 | size_t size = sizeof(u32) + pathlen + sizeof(rec.v1); |
2950 | err = ceph_pagelist_reserve(pagelist, size); | |
2951 | if (!err) { | |
2952 | ceph_pagelist_encode_string(pagelist, path, pathlen); | |
2953 | ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); | |
2954 | } | |
40819f6f | 2955 | } |
44c99757 YZ |
2956 | |
2957 | recon_state->nr_caps++; | |
e072f8aa | 2958 | out_free: |
2f2dc053 | 2959 | kfree(path); |
e072f8aa | 2960 | out_dput: |
2f2dc053 | 2961 | dput(dentry); |
93cea5be | 2962 | return err; |
2f2dc053 SW |
2963 | } |
2964 | ||
2965 | ||
2966 | /* | |
2967 | * If an MDS fails and recovers, clients need to reconnect in order to | |
2968 | * reestablish shared state. This includes all caps issued through | |
2969 | * this session _and_ the snap_realm hierarchy. Because it's not | |
2970 | * clear which snap realms the mds cares about, we send everything we | |
2971 | * know about.. that ensures we'll then get any new info the | |
2972 | * recovering MDS might have. | |
2973 | * | |
2974 | * This is a relatively heavyweight operation, but it's rare. | |
2975 | * | |
2976 | * called with mdsc->mutex held. | |
2977 | */ | |
34b6c855 SW |
2978 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
2979 | struct ceph_mds_session *session) | |
2f2dc053 | 2980 | { |
2f2dc053 | 2981 | struct ceph_msg *reply; |
a105f00c | 2982 | struct rb_node *p; |
34b6c855 | 2983 | int mds = session->s_mds; |
9abf82b8 | 2984 | int err = -ENOMEM; |
44c99757 | 2985 | int s_nr_caps; |
93cea5be | 2986 | struct ceph_pagelist *pagelist; |
20cb34ae | 2987 | struct ceph_reconnect_state recon_state; |
2f2dc053 | 2988 | |
34b6c855 | 2989 | pr_info("mds%d reconnect start\n", mds); |
2f2dc053 | 2990 | |
93cea5be SW |
2991 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); |
2992 | if (!pagelist) | |
2993 | goto fail_nopagelist; | |
2994 | ceph_pagelist_init(pagelist); | |
2995 | ||
b61c2763 | 2996 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); |
a79832f2 | 2997 | if (!reply) |
93cea5be | 2998 | goto fail_nomsg; |
93cea5be | 2999 | |
34b6c855 SW |
3000 | mutex_lock(&session->s_mutex); |
3001 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; | |
3002 | session->s_seq = 0; | |
2f2dc053 | 3003 | |
2f2dc053 | 3004 | dout("session %p state %s\n", session, |
a687ecaf | 3005 | ceph_session_state_name(session->s_state)); |
2f2dc053 | 3006 | |
99a9c273 YZ |
3007 | spin_lock(&session->s_gen_ttl_lock); |
3008 | session->s_cap_gen++; | |
3009 | spin_unlock(&session->s_gen_ttl_lock); | |
3010 | ||
3011 | spin_lock(&session->s_cap_lock); | |
03f4fcb0 YZ |
3012 | /* don't know if session is readonly */ |
3013 | session->s_readonly = 0; | |
99a9c273 YZ |
3014 | /* |
3015 | * notify __ceph_remove_cap() that we are composing cap reconnect. | |
3016 | * If a cap get released before being added to the cap reconnect, | |
3017 | * __ceph_remove_cap() should skip queuing cap release. | |
3018 | */ | |
3019 | session->s_cap_reconnect = 1; | |
e01a5946 | 3020 | /* drop old cap expires; we're about to reestablish that state */ |
745a8e3b | 3021 | cleanup_cap_releases(mdsc, session); |
e01a5946 | 3022 | |
5d23371f | 3023 | /* trim unused caps to reduce MDS's cache rejoin time */ |
c0bd50e2 YZ |
3024 | if (mdsc->fsc->sb->s_root) |
3025 | shrink_dcache_parent(mdsc->fsc->sb->s_root); | |
5d23371f YZ |
3026 | |
3027 | ceph_con_close(&session->s_con); | |
3028 | ceph_con_open(&session->s_con, | |
3029 | CEPH_ENTITY_TYPE_MDS, mds, | |
3030 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
3031 | ||
3032 | /* replay unsafe requests */ | |
3033 | replay_unsafe_requests(mdsc, session); | |
3034 | ||
3035 | down_read(&mdsc->snap_rwsem); | |
3036 | ||
2f2dc053 | 3037 | /* traverse this session's caps */ |
44c99757 YZ |
3038 | s_nr_caps = session->s_nr_caps; |
3039 | err = ceph_pagelist_encode_32(pagelist, s_nr_caps); | |
93cea5be SW |
3040 | if (err) |
3041 | goto fail; | |
20cb34ae | 3042 | |
44c99757 | 3043 | recon_state.nr_caps = 0; |
20cb34ae | 3044 | recon_state.pagelist = pagelist; |
121f22a1 YZ |
3045 | if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) |
3046 | recon_state.msg_version = 3; | |
3047 | else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK) | |
3048 | recon_state.msg_version = 2; | |
3049 | else | |
3050 | recon_state.msg_version = 1; | |
20cb34ae | 3051 | err = iterate_session_caps(session, encode_caps_cb, &recon_state); |
2f2dc053 | 3052 | if (err < 0) |
9abf82b8 | 3053 | goto fail; |
2f2dc053 | 3054 | |
99a9c273 YZ |
3055 | spin_lock(&session->s_cap_lock); |
3056 | session->s_cap_reconnect = 0; | |
3057 | spin_unlock(&session->s_cap_lock); | |
3058 | ||
2f2dc053 SW |
3059 | /* |
3060 | * snaprealms. we provide mds with the ino, seq (version), and | |
3061 | * parent for all of our realms. If the mds has any newer info, | |
3062 | * it will tell us. | |
3063 | */ | |
a105f00c SW |
3064 | for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { |
3065 | struct ceph_snap_realm *realm = | |
3066 | rb_entry(p, struct ceph_snap_realm, node); | |
93cea5be | 3067 | struct ceph_mds_snaprealm_reconnect sr_rec; |
2f2dc053 SW |
3068 | |
3069 | dout(" adding snap realm %llx seq %lld parent %llx\n", | |
3070 | realm->ino, realm->seq, realm->parent_ino); | |
93cea5be SW |
3071 | sr_rec.ino = cpu_to_le64(realm->ino); |
3072 | sr_rec.seq = cpu_to_le64(realm->seq); | |
3073 | sr_rec.parent = cpu_to_le64(realm->parent_ino); | |
3074 | err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); | |
3075 | if (err) | |
3076 | goto fail; | |
2f2dc053 | 3077 | } |
2f2dc053 | 3078 | |
121f22a1 | 3079 | reply->hdr.version = cpu_to_le16(recon_state.msg_version); |
44c99757 YZ |
3080 | |
3081 | /* raced with cap release? */ | |
3082 | if (s_nr_caps != recon_state.nr_caps) { | |
3083 | struct page *page = list_first_entry(&pagelist->head, | |
3084 | struct page, lru); | |
3085 | __le32 *addr = kmap_atomic(page); | |
3086 | *addr = cpu_to_le32(recon_state.nr_caps); | |
3087 | kunmap_atomic(addr); | |
ebf18f47 | 3088 | } |
44c99757 YZ |
3089 | |
3090 | reply->hdr.data_len = cpu_to_le32(pagelist->length); | |
3091 | ceph_msg_data_add_pagelist(reply, pagelist); | |
e548e9b9 YZ |
3092 | |
3093 | ceph_early_kick_flushing_caps(mdsc, session); | |
3094 | ||
2f2dc053 SW |
3095 | ceph_con_send(&session->s_con, reply); |
3096 | ||
9abf82b8 SW |
3097 | mutex_unlock(&session->s_mutex); |
3098 | ||
3099 | mutex_lock(&mdsc->mutex); | |
3100 | __wake_requests(mdsc, &session->s_waiting); | |
3101 | mutex_unlock(&mdsc->mutex); | |
3102 | ||
2f2dc053 | 3103 | up_read(&mdsc->snap_rwsem); |
2f2dc053 SW |
3104 | return; |
3105 | ||
93cea5be | 3106 | fail: |
2f2dc053 | 3107 | ceph_msg_put(reply); |
9abf82b8 SW |
3108 | up_read(&mdsc->snap_rwsem); |
3109 | mutex_unlock(&session->s_mutex); | |
93cea5be SW |
3110 | fail_nomsg: |
3111 | ceph_pagelist_release(pagelist); | |
93cea5be | 3112 | fail_nopagelist: |
9abf82b8 | 3113 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
9abf82b8 | 3114 | return; |
2f2dc053 SW |
3115 | } |
3116 | ||
3117 | ||
3118 | /* | |
3119 | * compare old and new mdsmaps, kicking requests | |
3120 | * and closing out old connections as necessary | |
3121 | * | |
3122 | * called under mdsc->mutex. | |
3123 | */ | |
3124 | static void check_new_map(struct ceph_mds_client *mdsc, | |
3125 | struct ceph_mdsmap *newmap, | |
3126 | struct ceph_mdsmap *oldmap) | |
3127 | { | |
3128 | int i; | |
3129 | int oldstate, newstate; | |
3130 | struct ceph_mds_session *s; | |
3131 | ||
3132 | dout("check_new_map new %u old %u\n", | |
3133 | newmap->m_epoch, oldmap->m_epoch); | |
3134 | ||
76201b63 | 3135 | for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) { |
2f2dc053 SW |
3136 | if (mdsc->sessions[i] == NULL) |
3137 | continue; | |
3138 | s = mdsc->sessions[i]; | |
3139 | oldstate = ceph_mdsmap_get_state(oldmap, i); | |
3140 | newstate = ceph_mdsmap_get_state(newmap, i); | |
3141 | ||
0deb01c9 | 3142 | dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", |
2f2dc053 | 3143 | i, ceph_mds_state_name(oldstate), |
0deb01c9 | 3144 | ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", |
2f2dc053 | 3145 | ceph_mds_state_name(newstate), |
0deb01c9 | 3146 | ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", |
a687ecaf | 3147 | ceph_session_state_name(s->s_state)); |
2f2dc053 | 3148 | |
76201b63 | 3149 | if (i >= newmap->m_num_mds || |
3e8f43a0 | 3150 | memcmp(ceph_mdsmap_get_addr(oldmap, i), |
2f2dc053 SW |
3151 | ceph_mdsmap_get_addr(newmap, i), |
3152 | sizeof(struct ceph_entity_addr))) { | |
3153 | if (s->s_state == CEPH_MDS_SESSION_OPENING) { | |
3154 | /* the session never opened, just close it | |
3155 | * out now */ | |
0a07fc8c | 3156 | get_session(s); |
2600d2dd | 3157 | __unregister_session(mdsc, s); |
2f2dc053 | 3158 | __wake_requests(mdsc, &s->s_waiting); |
0a07fc8c | 3159 | ceph_put_mds_session(s); |
2827528d YZ |
3160 | } else if (i >= newmap->m_num_mds) { |
3161 | /* force close session for stopped mds */ | |
3162 | get_session(s); | |
2600d2dd | 3163 | __unregister_session(mdsc, s); |
2827528d YZ |
3164 | __wake_requests(mdsc, &s->s_waiting); |
3165 | kick_requests(mdsc, i); | |
3166 | mutex_unlock(&mdsc->mutex); | |
3167 | ||
3168 | mutex_lock(&s->s_mutex); | |
3169 | cleanup_session_requests(mdsc, s); | |
3170 | remove_session_caps(s); | |
3171 | mutex_unlock(&s->s_mutex); | |
3172 | ||
3173 | ceph_put_mds_session(s); | |
3174 | ||
3175 | mutex_lock(&mdsc->mutex); | |
2f2dc053 SW |
3176 | } else { |
3177 | /* just close it */ | |
3178 | mutex_unlock(&mdsc->mutex); | |
3179 | mutex_lock(&s->s_mutex); | |
3180 | mutex_lock(&mdsc->mutex); | |
3181 | ceph_con_close(&s->s_con); | |
3182 | mutex_unlock(&s->s_mutex); | |
3183 | s->s_state = CEPH_MDS_SESSION_RESTARTING; | |
3184 | } | |
2f2dc053 SW |
3185 | } else if (oldstate == newstate) { |
3186 | continue; /* nothing new with this mds */ | |
3187 | } | |
3188 | ||
3189 | /* | |
3190 | * send reconnect? | |
3191 | */ | |
3192 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && | |
34b6c855 SW |
3193 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
3194 | mutex_unlock(&mdsc->mutex); | |
3195 | send_mds_reconnect(mdsc, s); | |
3196 | mutex_lock(&mdsc->mutex); | |
3197 | } | |
2f2dc053 SW |
3198 | |
3199 | /* | |
29790f26 | 3200 | * kick request on any mds that has gone active. |
2f2dc053 SW |
3201 | */ |
3202 | if (oldstate < CEPH_MDS_STATE_ACTIVE && | |
3203 | newstate >= CEPH_MDS_STATE_ACTIVE) { | |
29790f26 SW |
3204 | if (oldstate != CEPH_MDS_STATE_CREATING && |
3205 | oldstate != CEPH_MDS_STATE_STARTING) | |
3206 | pr_info("mds%d recovery completed\n", s->s_mds); | |
3207 | kick_requests(mdsc, i); | |
2f2dc053 | 3208 | ceph_kick_flushing_caps(mdsc, s); |
0dc2570f | 3209 | wake_up_session_caps(s, 1); |
2f2dc053 SW |
3210 | } |
3211 | } | |
cb170a22 | 3212 | |
76201b63 | 3213 | for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) { |
cb170a22 SW |
3214 | s = mdsc->sessions[i]; |
3215 | if (!s) | |
3216 | continue; | |
3217 | if (!ceph_mdsmap_is_laggy(newmap, i)) | |
3218 | continue; | |
3219 | if (s->s_state == CEPH_MDS_SESSION_OPEN || | |
3220 | s->s_state == CEPH_MDS_SESSION_HUNG || | |
3221 | s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3222 | dout(" connecting to export targets of laggy mds%d\n", | |
3223 | i); | |
3224 | __open_export_target_sessions(mdsc, s); | |
3225 | } | |
3226 | } | |
2f2dc053 SW |
3227 | } |
3228 | ||
3229 | ||
3230 | ||
3231 | /* | |
3232 | * leases | |
3233 | */ | |
3234 | ||
3235 | /* | |
3236 | * caller must hold session s_mutex, dentry->d_lock | |
3237 | */ | |
3238 | void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) | |
3239 | { | |
3240 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
3241 | ||
3242 | ceph_put_mds_session(di->lease_session); | |
3243 | di->lease_session = NULL; | |
3244 | } | |
3245 | ||
2600d2dd SW |
3246 | static void handle_lease(struct ceph_mds_client *mdsc, |
3247 | struct ceph_mds_session *session, | |
3248 | struct ceph_msg *msg) | |
2f2dc053 | 3249 | { |
3d14c5d2 | 3250 | struct super_block *sb = mdsc->fsc->sb; |
2f2dc053 | 3251 | struct inode *inode; |
2f2dc053 SW |
3252 | struct dentry *parent, *dentry; |
3253 | struct ceph_dentry_info *di; | |
2600d2dd | 3254 | int mds = session->s_mds; |
2f2dc053 | 3255 | struct ceph_mds_lease *h = msg->front.iov_base; |
1e5ea23d | 3256 | u32 seq; |
2f2dc053 | 3257 | struct ceph_vino vino; |
2f2dc053 SW |
3258 | struct qstr dname; |
3259 | int release = 0; | |
3260 | ||
2f2dc053 SW |
3261 | dout("handle_lease from mds%d\n", mds); |
3262 | ||
3263 | /* decode */ | |
3264 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) | |
3265 | goto bad; | |
3266 | vino.ino = le64_to_cpu(h->ino); | |
3267 | vino.snap = CEPH_NOSNAP; | |
1e5ea23d | 3268 | seq = le32_to_cpu(h->seq); |
2f2dc053 SW |
3269 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); |
3270 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); | |
3271 | if (dname.len != get_unaligned_le32(h+1)) | |
3272 | goto bad; | |
3273 | ||
2f2dc053 SW |
3274 | /* lookup inode */ |
3275 | inode = ceph_find_inode(sb, vino); | |
2f90b852 SW |
3276 | dout("handle_lease %s, ino %llx %p %.*s\n", |
3277 | ceph_lease_op_name(h->action), vino.ino, inode, | |
1e5ea23d | 3278 | dname.len, dname.name); |
6cd3bcad YZ |
3279 | |
3280 | mutex_lock(&session->s_mutex); | |
3281 | session->s_seq++; | |
3282 | ||
2f2dc053 SW |
3283 | if (inode == NULL) { |
3284 | dout("handle_lease no inode %llx\n", vino.ino); | |
3285 | goto release; | |
3286 | } | |
2f2dc053 SW |
3287 | |
3288 | /* dentry */ | |
3289 | parent = d_find_alias(inode); | |
3290 | if (!parent) { | |
3291 | dout("no parent dentry on inode %p\n", inode); | |
3292 | WARN_ON(1); | |
3293 | goto release; /* hrm... */ | |
3294 | } | |
8387ff25 | 3295 | dname.hash = full_name_hash(parent, dname.name, dname.len); |
2f2dc053 SW |
3296 | dentry = d_lookup(parent, &dname); |
3297 | dput(parent); | |
3298 | if (!dentry) | |
3299 | goto release; | |
3300 | ||
3301 | spin_lock(&dentry->d_lock); | |
3302 | di = ceph_dentry(dentry); | |
3303 | switch (h->action) { | |
3304 | case CEPH_MDS_LEASE_REVOKE: | |
3d8eb7a9 | 3305 | if (di->lease_session == session) { |
1e5ea23d SW |
3306 | if (ceph_seq_cmp(di->lease_seq, seq) > 0) |
3307 | h->seq = cpu_to_le32(di->lease_seq); | |
2f2dc053 SW |
3308 | __ceph_mdsc_drop_dentry_lease(dentry); |
3309 | } | |
3310 | release = 1; | |
3311 | break; | |
3312 | ||
3313 | case CEPH_MDS_LEASE_RENEW: | |
3d8eb7a9 | 3314 | if (di->lease_session == session && |
2f2dc053 SW |
3315 | di->lease_gen == session->s_cap_gen && |
3316 | di->lease_renew_from && | |
3317 | di->lease_renew_after == 0) { | |
3318 | unsigned long duration = | |
3563dbdd | 3319 | msecs_to_jiffies(le32_to_cpu(h->duration_ms)); |
2f2dc053 | 3320 | |
1e5ea23d | 3321 | di->lease_seq = seq; |
9b16f03c | 3322 | di->time = di->lease_renew_from + duration; |
2f2dc053 SW |
3323 | di->lease_renew_after = di->lease_renew_from + |
3324 | (duration >> 1); | |
3325 | di->lease_renew_from = 0; | |
3326 | } | |
3327 | break; | |
3328 | } | |
3329 | spin_unlock(&dentry->d_lock); | |
3330 | dput(dentry); | |
3331 | ||
3332 | if (!release) | |
3333 | goto out; | |
3334 | ||
3335 | release: | |
3336 | /* let's just reuse the same message */ | |
3337 | h->action = CEPH_MDS_LEASE_REVOKE_ACK; | |
3338 | ceph_msg_get(msg); | |
3339 | ceph_con_send(&session->s_con, msg); | |
3340 | ||
3341 | out: | |
3342 | iput(inode); | |
3343 | mutex_unlock(&session->s_mutex); | |
2f2dc053 SW |
3344 | return; |
3345 | ||
3346 | bad: | |
3347 | pr_err("corrupt lease message\n"); | |
9ec7cab1 | 3348 | ceph_msg_dump(msg); |
2f2dc053 SW |
3349 | } |
3350 | ||
3351 | void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, | |
3352 | struct inode *inode, | |
3353 | struct dentry *dentry, char action, | |
3354 | u32 seq) | |
3355 | { | |
3356 | struct ceph_msg *msg; | |
3357 | struct ceph_mds_lease *lease; | |
3358 | int len = sizeof(*lease) + sizeof(u32); | |
3359 | int dnamelen = 0; | |
3360 | ||
3361 | dout("lease_send_msg inode %p dentry %p %s to mds%d\n", | |
3362 | inode, dentry, ceph_lease_op_name(action), session->s_mds); | |
3363 | dnamelen = dentry->d_name.len; | |
3364 | len += dnamelen; | |
3365 | ||
b61c2763 | 3366 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); |
a79832f2 | 3367 | if (!msg) |
2f2dc053 SW |
3368 | return; |
3369 | lease = msg->front.iov_base; | |
3370 | lease->action = action; | |
2f2dc053 SW |
3371 | lease->ino = cpu_to_le64(ceph_vino(inode).ino); |
3372 | lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); | |
3373 | lease->seq = cpu_to_le32(seq); | |
3374 | put_unaligned_le32(dnamelen, lease + 1); | |
3375 | memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); | |
3376 | ||
3377 | /* | |
3378 | * if this is a preemptive lease RELEASE, no need to | |
3379 | * flush request stream, since the actual request will | |
3380 | * soon follow. | |
3381 | */ | |
3382 | msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); | |
3383 | ||
3384 | ceph_con_send(&session->s_con, msg); | |
3385 | } | |
3386 | ||
2f2dc053 SW |
3387 | /* |
3388 | * drop all leases (and dentry refs) in preparation for umount | |
3389 | */ | |
3390 | static void drop_leases(struct ceph_mds_client *mdsc) | |
3391 | { | |
3392 | int i; | |
3393 | ||
3394 | dout("drop_leases\n"); | |
3395 | mutex_lock(&mdsc->mutex); | |
3396 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3397 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3398 | if (!s) | |
3399 | continue; | |
3400 | mutex_unlock(&mdsc->mutex); | |
3401 | mutex_lock(&s->s_mutex); | |
3402 | mutex_unlock(&s->s_mutex); | |
3403 | ceph_put_mds_session(s); | |
3404 | mutex_lock(&mdsc->mutex); | |
3405 | } | |
3406 | mutex_unlock(&mdsc->mutex); | |
3407 | } | |
3408 | ||
3409 | ||
3410 | ||
3411 | /* | |
3412 | * delayed work -- periodically trim expired leases, renew caps with mds | |
3413 | */ | |
3414 | static void schedule_delayed(struct ceph_mds_client *mdsc) | |
3415 | { | |
3416 | int delay = 5; | |
3417 | unsigned hz = round_jiffies_relative(HZ * delay); | |
3418 | schedule_delayed_work(&mdsc->delayed_work, hz); | |
3419 | } | |
3420 | ||
3421 | static void delayed_work(struct work_struct *work) | |
3422 | { | |
3423 | int i; | |
3424 | struct ceph_mds_client *mdsc = | |
3425 | container_of(work, struct ceph_mds_client, delayed_work.work); | |
3426 | int renew_interval; | |
3427 | int renew_caps; | |
3428 | ||
3429 | dout("mdsc delayed_work\n"); | |
afcdaea3 | 3430 | ceph_check_delayed_caps(mdsc); |
2f2dc053 SW |
3431 | |
3432 | mutex_lock(&mdsc->mutex); | |
3433 | renew_interval = mdsc->mdsmap->m_session_timeout >> 2; | |
3434 | renew_caps = time_after_eq(jiffies, HZ*renew_interval + | |
3435 | mdsc->last_renew_caps); | |
3436 | if (renew_caps) | |
3437 | mdsc->last_renew_caps = jiffies; | |
3438 | ||
3439 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3440 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3441 | if (s == NULL) | |
3442 | continue; | |
3443 | if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3444 | dout("resending session close request for mds%d\n", | |
3445 | s->s_mds); | |
3446 | request_close_session(mdsc, s); | |
3447 | ceph_put_mds_session(s); | |
3448 | continue; | |
3449 | } | |
3450 | if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
3451 | if (s->s_state == CEPH_MDS_SESSION_OPEN) { | |
3452 | s->s_state = CEPH_MDS_SESSION_HUNG; | |
3453 | pr_info("mds%d hung\n", s->s_mds); | |
3454 | } | |
3455 | } | |
3456 | if (s->s_state < CEPH_MDS_SESSION_OPEN) { | |
3457 | /* this mds is failed or recovering, just wait */ | |
3458 | ceph_put_mds_session(s); | |
3459 | continue; | |
3460 | } | |
3461 | mutex_unlock(&mdsc->mutex); | |
3462 | ||
3463 | mutex_lock(&s->s_mutex); | |
3464 | if (renew_caps) | |
3465 | send_renew_caps(mdsc, s); | |
3466 | else | |
3467 | ceph_con_keepalive(&s->s_con); | |
aab53dd9 SW |
3468 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
3469 | s->s_state == CEPH_MDS_SESSION_HUNG) | |
3d7ded4d | 3470 | ceph_send_cap_releases(mdsc, s); |
2f2dc053 SW |
3471 | mutex_unlock(&s->s_mutex); |
3472 | ceph_put_mds_session(s); | |
3473 | ||
3474 | mutex_lock(&mdsc->mutex); | |
3475 | } | |
3476 | mutex_unlock(&mdsc->mutex); | |
3477 | ||
3478 | schedule_delayed(mdsc); | |
3479 | } | |
3480 | ||
3d14c5d2 | 3481 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
2f2dc053 | 3482 | |
2f2dc053 | 3483 | { |
3d14c5d2 YS |
3484 | struct ceph_mds_client *mdsc; |
3485 | ||
3486 | mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); | |
3487 | if (!mdsc) | |
3488 | return -ENOMEM; | |
3489 | mdsc->fsc = fsc; | |
3490 | fsc->mdsc = mdsc; | |
2f2dc053 SW |
3491 | mutex_init(&mdsc->mutex); |
3492 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | |
fb3101b6 | 3493 | if (mdsc->mdsmap == NULL) { |
3494 | kfree(mdsc); | |
2d06eeb8 | 3495 | return -ENOMEM; |
fb3101b6 | 3496 | } |
2d06eeb8 | 3497 | |
2f2dc053 | 3498 | init_completion(&mdsc->safe_umount_waiters); |
f3c60c59 | 3499 | init_waitqueue_head(&mdsc->session_close_wq); |
2f2dc053 SW |
3500 | INIT_LIST_HEAD(&mdsc->waiting_for_map); |
3501 | mdsc->sessions = NULL; | |
86d8f67b | 3502 | atomic_set(&mdsc->num_sessions, 0); |
2f2dc053 SW |
3503 | mdsc->max_sessions = 0; |
3504 | mdsc->stopping = 0; | |
affbc19a | 3505 | mdsc->last_snap_seq = 0; |
2f2dc053 | 3506 | init_rwsem(&mdsc->snap_rwsem); |
a105f00c | 3507 | mdsc->snap_realms = RB_ROOT; |
2f2dc053 SW |
3508 | INIT_LIST_HEAD(&mdsc->snap_empty); |
3509 | spin_lock_init(&mdsc->snap_empty_lock); | |
3510 | mdsc->last_tid = 0; | |
e8a7b8b1 | 3511 | mdsc->oldest_tid = 0; |
44ca18f2 | 3512 | mdsc->request_tree = RB_ROOT; |
2f2dc053 SW |
3513 | INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); |
3514 | mdsc->last_renew_caps = jiffies; | |
3515 | INIT_LIST_HEAD(&mdsc->cap_delay_list); | |
3516 | spin_lock_init(&mdsc->cap_delay_lock); | |
3517 | INIT_LIST_HEAD(&mdsc->snap_flush_list); | |
3518 | spin_lock_init(&mdsc->snap_flush_lock); | |
553adfd9 | 3519 | mdsc->last_cap_flush_tid = 1; |
e4500b5e | 3520 | INIT_LIST_HEAD(&mdsc->cap_flush_list); |
2f2dc053 | 3521 | INIT_LIST_HEAD(&mdsc->cap_dirty); |
db354052 | 3522 | INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); |
2f2dc053 SW |
3523 | mdsc->num_cap_flushing = 0; |
3524 | spin_lock_init(&mdsc->cap_dirty_lock); | |
3525 | init_waitqueue_head(&mdsc->cap_flushing_wq); | |
3526 | spin_lock_init(&mdsc->dentry_lru_lock); | |
3527 | INIT_LIST_HEAD(&mdsc->dentry_lru); | |
2d06eeb8 | 3528 | |
37151668 | 3529 | ceph_caps_init(mdsc); |
3d14c5d2 | 3530 | ceph_adjust_min_caps(mdsc, fsc->min_caps); |
37151668 | 3531 | |
10183a69 YZ |
3532 | init_rwsem(&mdsc->pool_perm_rwsem); |
3533 | mdsc->pool_perm_tree = RB_ROOT; | |
3534 | ||
5f44f142 | 3535 | return 0; |
2f2dc053 SW |
3536 | } |
3537 | ||
3538 | /* | |
3539 | * Wait for safe replies on open mds requests. If we time out, drop | |
3540 | * all requests from the tree to avoid dangling dentry refs. | |
3541 | */ | |
3542 | static void wait_requests(struct ceph_mds_client *mdsc) | |
3543 | { | |
a319bf56 | 3544 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 | 3545 | struct ceph_mds_request *req; |
2f2dc053 SW |
3546 | |
3547 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3548 | if (__get_oldest_req(mdsc)) { |
2f2dc053 | 3549 | mutex_unlock(&mdsc->mutex); |
44ca18f2 | 3550 | |
2f2dc053 SW |
3551 | dout("wait_requests waiting for requests\n"); |
3552 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, | |
a319bf56 | 3553 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3554 | |
3555 | /* tear down remaining requests */ | |
44ca18f2 SW |
3556 | mutex_lock(&mdsc->mutex); |
3557 | while ((req = __get_oldest_req(mdsc))) { | |
2f2dc053 SW |
3558 | dout("wait_requests timed out on tid %llu\n", |
3559 | req->r_tid); | |
44ca18f2 | 3560 | __unregister_request(mdsc, req); |
2f2dc053 SW |
3561 | } |
3562 | } | |
3563 | mutex_unlock(&mdsc->mutex); | |
3564 | dout("wait_requests done\n"); | |
3565 | } | |
3566 | ||
3567 | /* | |
3568 | * called before mount is ro, and before dentries are torn down. | |
3569 | * (hmm, does this still race with new lookups?) | |
3570 | */ | |
3571 | void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |
3572 | { | |
3573 | dout("pre_umount\n"); | |
3574 | mdsc->stopping = 1; | |
3575 | ||
3576 | drop_leases(mdsc); | |
afcdaea3 | 3577 | ceph_flush_dirty_caps(mdsc); |
2f2dc053 | 3578 | wait_requests(mdsc); |
17c688c3 SW |
3579 | |
3580 | /* | |
3581 | * wait for reply handlers to drop their request refs and | |
3582 | * their inode/dcache refs | |
3583 | */ | |
3584 | ceph_msgr_flush(); | |
2f2dc053 SW |
3585 | } |
3586 | ||
3587 | /* | |
3588 | * wait for all write mds requests to flush. | |
3589 | */ | |
3590 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) | |
3591 | { | |
80fc7314 | 3592 | struct ceph_mds_request *req = NULL, *nextreq; |
44ca18f2 | 3593 | struct rb_node *n; |
2f2dc053 SW |
3594 | |
3595 | mutex_lock(&mdsc->mutex); | |
3596 | dout("wait_unsafe_requests want %lld\n", want_tid); | |
80fc7314 | 3597 | restart: |
44ca18f2 SW |
3598 | req = __get_oldest_req(mdsc); |
3599 | while (req && req->r_tid <= want_tid) { | |
80fc7314 SW |
3600 | /* find next request */ |
3601 | n = rb_next(&req->r_node); | |
3602 | if (n) | |
3603 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); | |
3604 | else | |
3605 | nextreq = NULL; | |
e8a7b8b1 YZ |
3606 | if (req->r_op != CEPH_MDS_OP_SETFILELOCK && |
3607 | (req->r_op & CEPH_MDS_OP_WRITE)) { | |
44ca18f2 SW |
3608 | /* write op */ |
3609 | ceph_mdsc_get_request(req); | |
80fc7314 SW |
3610 | if (nextreq) |
3611 | ceph_mdsc_get_request(nextreq); | |
44ca18f2 SW |
3612 | mutex_unlock(&mdsc->mutex); |
3613 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", | |
3614 | req->r_tid, want_tid); | |
3615 | wait_for_completion(&req->r_safe_completion); | |
3616 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3617 | ceph_mdsc_put_request(req); |
80fc7314 SW |
3618 | if (!nextreq) |
3619 | break; /* next dne before, so we're done! */ | |
3620 | if (RB_EMPTY_NODE(&nextreq->r_node)) { | |
3621 | /* next request was removed from tree */ | |
3622 | ceph_mdsc_put_request(nextreq); | |
3623 | goto restart; | |
3624 | } | |
3625 | ceph_mdsc_put_request(nextreq); /* won't go away */ | |
44ca18f2 | 3626 | } |
80fc7314 | 3627 | req = nextreq; |
2f2dc053 SW |
3628 | } |
3629 | mutex_unlock(&mdsc->mutex); | |
3630 | dout("wait_unsafe_requests done\n"); | |
3631 | } | |
3632 | ||
3633 | void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |
3634 | { | |
0e294387 | 3635 | u64 want_tid, want_flush; |
2f2dc053 | 3636 | |
52953d55 | 3637 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
56b7cf95 SW |
3638 | return; |
3639 | ||
2f2dc053 SW |
3640 | dout("sync\n"); |
3641 | mutex_lock(&mdsc->mutex); | |
3642 | want_tid = mdsc->last_tid; | |
2f2dc053 | 3643 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 3644 | |
afcdaea3 | 3645 | ceph_flush_dirty_caps(mdsc); |
d3383a8e | 3646 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 | 3647 | want_flush = mdsc->last_cap_flush_tid; |
c8799fc4 YZ |
3648 | if (!list_empty(&mdsc->cap_flush_list)) { |
3649 | struct ceph_cap_flush *cf = | |
3650 | list_last_entry(&mdsc->cap_flush_list, | |
3651 | struct ceph_cap_flush, g_list); | |
3652 | cf->wake = true; | |
3653 | } | |
d3383a8e YZ |
3654 | spin_unlock(&mdsc->cap_dirty_lock); |
3655 | ||
0e294387 YZ |
3656 | dout("sync want tid %lld flush_seq %lld\n", |
3657 | want_tid, want_flush); | |
2f2dc053 SW |
3658 | |
3659 | wait_unsafe_requests(mdsc, want_tid); | |
0e294387 | 3660 | wait_caps_flush(mdsc, want_flush); |
2f2dc053 SW |
3661 | } |
3662 | ||
f3c60c59 SW |
3663 | /* |
3664 | * true if all sessions are closed, or we force unmount | |
3665 | */ | |
fcff415c | 3666 | static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) |
f3c60c59 | 3667 | { |
52953d55 | 3668 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
f3c60c59 | 3669 | return true; |
fcff415c | 3670 | return atomic_read(&mdsc->num_sessions) <= skipped; |
f3c60c59 | 3671 | } |
2f2dc053 SW |
3672 | |
3673 | /* | |
3674 | * called after sb is ro. | |
3675 | */ | |
3676 | void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |
3677 | { | |
a319bf56 | 3678 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 SW |
3679 | struct ceph_mds_session *session; |
3680 | int i; | |
fcff415c | 3681 | int skipped = 0; |
2f2dc053 SW |
3682 | |
3683 | dout("close_sessions\n"); | |
3684 | ||
2f2dc053 | 3685 | /* close sessions */ |
f3c60c59 SW |
3686 | mutex_lock(&mdsc->mutex); |
3687 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3688 | session = __ceph_lookup_mds_session(mdsc, i); | |
3689 | if (!session) | |
3690 | continue; | |
2f2dc053 | 3691 | mutex_unlock(&mdsc->mutex); |
f3c60c59 | 3692 | mutex_lock(&session->s_mutex); |
fcff415c YZ |
3693 | if (__close_session(mdsc, session) <= 0) |
3694 | skipped++; | |
f3c60c59 SW |
3695 | mutex_unlock(&session->s_mutex); |
3696 | ceph_put_mds_session(session); | |
2f2dc053 SW |
3697 | mutex_lock(&mdsc->mutex); |
3698 | } | |
f3c60c59 SW |
3699 | mutex_unlock(&mdsc->mutex); |
3700 | ||
3701 | dout("waiting for sessions to close\n"); | |
fcff415c YZ |
3702 | wait_event_timeout(mdsc->session_close_wq, |
3703 | done_closing_sessions(mdsc, skipped), | |
a319bf56 | 3704 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3705 | |
3706 | /* tear down remaining sessions */ | |
f3c60c59 | 3707 | mutex_lock(&mdsc->mutex); |
2f2dc053 SW |
3708 | for (i = 0; i < mdsc->max_sessions; i++) { |
3709 | if (mdsc->sessions[i]) { | |
3710 | session = get_session(mdsc->sessions[i]); | |
2600d2dd | 3711 | __unregister_session(mdsc, session); |
2f2dc053 SW |
3712 | mutex_unlock(&mdsc->mutex); |
3713 | mutex_lock(&session->s_mutex); | |
3714 | remove_session_caps(session); | |
3715 | mutex_unlock(&session->s_mutex); | |
3716 | ceph_put_mds_session(session); | |
3717 | mutex_lock(&mdsc->mutex); | |
3718 | } | |
3719 | } | |
2f2dc053 | 3720 | WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
2f2dc053 SW |
3721 | mutex_unlock(&mdsc->mutex); |
3722 | ||
3723 | ceph_cleanup_empty_realms(mdsc); | |
3724 | ||
3725 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3726 | ||
3727 | dout("stopped\n"); | |
3728 | } | |
3729 | ||
48fec5d0 YZ |
3730 | void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) |
3731 | { | |
3732 | struct ceph_mds_session *session; | |
3733 | int mds; | |
3734 | ||
3735 | dout("force umount\n"); | |
3736 | ||
3737 | mutex_lock(&mdsc->mutex); | |
3738 | for (mds = 0; mds < mdsc->max_sessions; mds++) { | |
3739 | session = __ceph_lookup_mds_session(mdsc, mds); | |
3740 | if (!session) | |
3741 | continue; | |
3742 | mutex_unlock(&mdsc->mutex); | |
3743 | mutex_lock(&session->s_mutex); | |
3744 | __close_session(mdsc, session); | |
3745 | if (session->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3746 | cleanup_session_requests(mdsc, session); | |
3747 | remove_session_caps(session); | |
3748 | } | |
3749 | mutex_unlock(&session->s_mutex); | |
3750 | ceph_put_mds_session(session); | |
3751 | mutex_lock(&mdsc->mutex); | |
3752 | kick_requests(mdsc, mds); | |
3753 | } | |
3754 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3755 | mutex_unlock(&mdsc->mutex); | |
3756 | } | |
3757 | ||
3d14c5d2 | 3758 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
2f2dc053 SW |
3759 | { |
3760 | dout("stop\n"); | |
3761 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3762 | if (mdsc->mdsmap) | |
3763 | ceph_mdsmap_destroy(mdsc->mdsmap); | |
3764 | kfree(mdsc->sessions); | |
37151668 | 3765 | ceph_caps_finalize(mdsc); |
10183a69 | 3766 | ceph_pool_perm_destroy(mdsc); |
2f2dc053 SW |
3767 | } |
3768 | ||
3d14c5d2 YS |
3769 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) |
3770 | { | |
3771 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
ef550f6f | 3772 | dout("mdsc_destroy %p\n", mdsc); |
ef550f6f SW |
3773 | |
3774 | /* flush out any connection work with references to us */ | |
3775 | ceph_msgr_flush(); | |
3776 | ||
62a65f36 YZ |
3777 | ceph_mdsc_stop(mdsc); |
3778 | ||
3d14c5d2 YS |
3779 | fsc->mdsc = NULL; |
3780 | kfree(mdsc); | |
ef550f6f | 3781 | dout("mdsc_destroy %p done\n", mdsc); |
3d14c5d2 YS |
3782 | } |
3783 | ||
430afbad YZ |
3784 | void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
3785 | { | |
3786 | struct ceph_fs_client *fsc = mdsc->fsc; | |
3787 | const char *mds_namespace = fsc->mount_options->mds_namespace; | |
3788 | void *p = msg->front.iov_base; | |
3789 | void *end = p + msg->front.iov_len; | |
3790 | u32 epoch; | |
3791 | u32 map_len; | |
3792 | u32 num_fs; | |
3793 | u32 mount_fscid = (u32)-1; | |
3794 | u8 struct_v, struct_cv; | |
3795 | int err = -EINVAL; | |
3796 | ||
3797 | ceph_decode_need(&p, end, sizeof(u32), bad); | |
3798 | epoch = ceph_decode_32(&p); | |
3799 | ||
3800 | dout("handle_fsmap epoch %u\n", epoch); | |
3801 | ||
3802 | ceph_decode_need(&p, end, 2 + sizeof(u32), bad); | |
3803 | struct_v = ceph_decode_8(&p); | |
3804 | struct_cv = ceph_decode_8(&p); | |
3805 | map_len = ceph_decode_32(&p); | |
3806 | ||
3807 | ceph_decode_need(&p, end, sizeof(u32) * 3, bad); | |
3808 | p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */ | |
3809 | ||
3810 | num_fs = ceph_decode_32(&p); | |
3811 | while (num_fs-- > 0) { | |
3812 | void *info_p, *info_end; | |
3813 | u32 info_len; | |
3814 | u8 info_v, info_cv; | |
3815 | u32 fscid, namelen; | |
3816 | ||
3817 | ceph_decode_need(&p, end, 2 + sizeof(u32), bad); | |
3818 | info_v = ceph_decode_8(&p); | |
3819 | info_cv = ceph_decode_8(&p); | |
3820 | info_len = ceph_decode_32(&p); | |
3821 | ceph_decode_need(&p, end, info_len, bad); | |
3822 | info_p = p; | |
3823 | info_end = p + info_len; | |
3824 | p = info_end; | |
3825 | ||
3826 | ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad); | |
3827 | fscid = ceph_decode_32(&info_p); | |
3828 | namelen = ceph_decode_32(&info_p); | |
3829 | ceph_decode_need(&info_p, info_end, namelen, bad); | |
3830 | ||
3831 | if (mds_namespace && | |
3832 | strlen(mds_namespace) == namelen && | |
3833 | !strncmp(mds_namespace, (char *)info_p, namelen)) { | |
3834 | mount_fscid = fscid; | |
3835 | break; | |
3836 | } | |
3837 | } | |
3838 | ||
3839 | ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch); | |
3840 | if (mount_fscid != (u32)-1) { | |
3841 | fsc->client->monc.fs_cluster_id = mount_fscid; | |
3842 | ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, | |
3843 | 0, true); | |
3844 | ceph_monc_renew_subs(&fsc->client->monc); | |
3845 | } else { | |
3846 | err = -ENOENT; | |
3847 | goto err_out; | |
3848 | } | |
3849 | return; | |
3850 | bad: | |
3851 | pr_err("error decoding fsmap\n"); | |
3852 | err_out: | |
3853 | mutex_lock(&mdsc->mutex); | |
3854 | mdsc->mdsmap_err = -ENOENT; | |
3855 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3856 | mutex_unlock(&mdsc->mutex); | |
3857 | return; | |
3858 | } | |
2f2dc053 SW |
3859 | |
3860 | /* | |
3861 | * handle mds map update. | |
3862 | */ | |
430afbad | 3863 | void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
2f2dc053 SW |
3864 | { |
3865 | u32 epoch; | |
3866 | u32 maplen; | |
3867 | void *p = msg->front.iov_base; | |
3868 | void *end = p + msg->front.iov_len; | |
3869 | struct ceph_mdsmap *newmap, *oldmap; | |
3870 | struct ceph_fsid fsid; | |
3871 | int err = -EINVAL; | |
3872 | ||
3873 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); | |
3874 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
3d14c5d2 | 3875 | if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
0743304d | 3876 | return; |
c89136ea SW |
3877 | epoch = ceph_decode_32(&p); |
3878 | maplen = ceph_decode_32(&p); | |
2f2dc053 SW |
3879 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); |
3880 | ||
3881 | /* do we need it? */ | |
2f2dc053 SW |
3882 | mutex_lock(&mdsc->mutex); |
3883 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { | |
3884 | dout("handle_map epoch %u <= our %u\n", | |
3885 | epoch, mdsc->mdsmap->m_epoch); | |
3886 | mutex_unlock(&mdsc->mutex); | |
3887 | return; | |
3888 | } | |
3889 | ||
3890 | newmap = ceph_mdsmap_decode(&p, end); | |
3891 | if (IS_ERR(newmap)) { | |
3892 | err = PTR_ERR(newmap); | |
3893 | goto bad_unlock; | |
3894 | } | |
3895 | ||
3896 | /* swap into place */ | |
3897 | if (mdsc->mdsmap) { | |
3898 | oldmap = mdsc->mdsmap; | |
3899 | mdsc->mdsmap = newmap; | |
3900 | check_new_map(mdsc, newmap, oldmap); | |
3901 | ceph_mdsmap_destroy(oldmap); | |
3902 | } else { | |
3903 | mdsc->mdsmap = newmap; /* first mds map */ | |
3904 | } | |
3d14c5d2 | 3905 | mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
2f2dc053 SW |
3906 | |
3907 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
82dcabad ID |
3908 | ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, |
3909 | mdsc->mdsmap->m_epoch); | |
2f2dc053 SW |
3910 | |
3911 | mutex_unlock(&mdsc->mutex); | |
3912 | schedule_delayed(mdsc); | |
3913 | return; | |
3914 | ||
3915 | bad_unlock: | |
3916 | mutex_unlock(&mdsc->mutex); | |
3917 | bad: | |
3918 | pr_err("error decoding mdsmap %d\n", err); | |
3919 | return; | |
3920 | } | |
3921 | ||
3922 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
3923 | { | |
3924 | struct ceph_mds_session *s = con->private; | |
3925 | ||
3926 | if (get_session(s)) { | |
3997c01d | 3927 | dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref)); |
2f2dc053 SW |
3928 | return con; |
3929 | } | |
3930 | dout("mdsc con_get %p FAIL\n", s); | |
3931 | return NULL; | |
3932 | } | |
3933 | ||
3934 | static void con_put(struct ceph_connection *con) | |
3935 | { | |
3936 | struct ceph_mds_session *s = con->private; | |
3937 | ||
3997c01d | 3938 | dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1); |
2f2dc053 SW |
3939 | ceph_put_mds_session(s); |
3940 | } | |
3941 | ||
3942 | /* | |
3943 | * if the client is unresponsive for long enough, the mds will kill | |
3944 | * the session entirely. | |
3945 | */ | |
3946 | static void peer_reset(struct ceph_connection *con) | |
3947 | { | |
3948 | struct ceph_mds_session *s = con->private; | |
7e70f0ed | 3949 | struct ceph_mds_client *mdsc = s->s_mdsc; |
2f2dc053 | 3950 | |
f3ae1b97 | 3951 | pr_warn("mds%d closed our session\n", s->s_mds); |
7e70f0ed | 3952 | send_mds_reconnect(mdsc, s); |
2f2dc053 SW |
3953 | } |
3954 | ||
3955 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
3956 | { | |
3957 | struct ceph_mds_session *s = con->private; | |
3958 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3959 | int type = le16_to_cpu(msg->hdr.type); | |
3960 | ||
2600d2dd SW |
3961 | mutex_lock(&mdsc->mutex); |
3962 | if (__verify_registered_session(mdsc, s) < 0) { | |
3963 | mutex_unlock(&mdsc->mutex); | |
3964 | goto out; | |
3965 | } | |
3966 | mutex_unlock(&mdsc->mutex); | |
3967 | ||
2f2dc053 SW |
3968 | switch (type) { |
3969 | case CEPH_MSG_MDS_MAP: | |
430afbad YZ |
3970 | ceph_mdsc_handle_mdsmap(mdsc, msg); |
3971 | break; | |
3972 | case CEPH_MSG_FS_MAP_USER: | |
3973 | ceph_mdsc_handle_fsmap(mdsc, msg); | |
2f2dc053 SW |
3974 | break; |
3975 | case CEPH_MSG_CLIENT_SESSION: | |
3976 | handle_session(s, msg); | |
3977 | break; | |
3978 | case CEPH_MSG_CLIENT_REPLY: | |
3979 | handle_reply(s, msg); | |
3980 | break; | |
3981 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: | |
2600d2dd | 3982 | handle_forward(mdsc, s, msg); |
2f2dc053 SW |
3983 | break; |
3984 | case CEPH_MSG_CLIENT_CAPS: | |
3985 | ceph_handle_caps(s, msg); | |
3986 | break; | |
3987 | case CEPH_MSG_CLIENT_SNAP: | |
2600d2dd | 3988 | ceph_handle_snap(mdsc, s, msg); |
2f2dc053 SW |
3989 | break; |
3990 | case CEPH_MSG_CLIENT_LEASE: | |
2600d2dd | 3991 | handle_lease(mdsc, s, msg); |
2f2dc053 SW |
3992 | break; |
3993 | ||
3994 | default: | |
3995 | pr_err("received unknown message type %d %s\n", type, | |
3996 | ceph_msg_type_name(type)); | |
3997 | } | |
2600d2dd | 3998 | out: |
2f2dc053 SW |
3999 | ceph_msg_put(msg); |
4000 | } | |
4001 | ||
4e7a5dcd SW |
4002 | /* |
4003 | * authentication | |
4004 | */ | |
a3530df3 AE |
4005 | |
4006 | /* | |
4007 | * Note: returned pointer is the address of a structure that's | |
4008 | * managed separately. Caller must *not* attempt to free it. | |
4009 | */ | |
4010 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 4011 | int *proto, int force_new) |
4e7a5dcd SW |
4012 | { |
4013 | struct ceph_mds_session *s = con->private; | |
4014 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4015 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
74f1869f | 4016 | struct ceph_auth_handshake *auth = &s->s_auth; |
4e7a5dcd | 4017 | |
74f1869f | 4018 | if (force_new && auth->authorizer) { |
6c1ea260 | 4019 | ceph_auth_destroy_authorizer(auth->authorizer); |
74f1869f | 4020 | auth->authorizer = NULL; |
4e7a5dcd | 4021 | } |
27859f97 SW |
4022 | if (!auth->authorizer) { |
4023 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
4024 | auth); | |
0bed9b5c SW |
4025 | if (ret) |
4026 | return ERR_PTR(ret); | |
27859f97 SW |
4027 | } else { |
4028 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
4029 | auth); | |
a255651d | 4030 | if (ret) |
a3530df3 | 4031 | return ERR_PTR(ret); |
4e7a5dcd | 4032 | } |
4e7a5dcd | 4033 | *proto = ac->protocol; |
74f1869f | 4034 | |
a3530df3 | 4035 | return auth; |
4e7a5dcd SW |
4036 | } |
4037 | ||
4038 | ||
0dde5848 | 4039 | static int verify_authorizer_reply(struct ceph_connection *con) |
4e7a5dcd SW |
4040 | { |
4041 | struct ceph_mds_session *s = con->private; | |
4042 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4043 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd | 4044 | |
0dde5848 | 4045 | return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer); |
4e7a5dcd SW |
4046 | } |
4047 | ||
9bd2e6f8 SW |
4048 | static int invalidate_authorizer(struct ceph_connection *con) |
4049 | { | |
4050 | struct ceph_mds_session *s = con->private; | |
4051 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4052 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
9bd2e6f8 | 4053 | |
27859f97 | 4054 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
9bd2e6f8 | 4055 | |
3d14c5d2 | 4056 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
9bd2e6f8 SW |
4057 | } |
4058 | ||
53ded495 AE |
4059 | static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, |
4060 | struct ceph_msg_header *hdr, int *skip) | |
4061 | { | |
4062 | struct ceph_msg *msg; | |
4063 | int type = (int) le16_to_cpu(hdr->type); | |
4064 | int front_len = (int) le32_to_cpu(hdr->front_len); | |
4065 | ||
4066 | if (con->in_msg) | |
4067 | return con->in_msg; | |
4068 | ||
4069 | *skip = 0; | |
4070 | msg = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
4071 | if (!msg) { | |
4072 | pr_err("unable to allocate msg type %d len %d\n", | |
4073 | type, front_len); | |
4074 | return NULL; | |
4075 | } | |
53ded495 AE |
4076 | |
4077 | return msg; | |
4078 | } | |
4079 | ||
79dbd1ba | 4080 | static int mds_sign_message(struct ceph_msg *msg) |
33d07337 | 4081 | { |
79dbd1ba | 4082 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 4083 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 4084 | |
33d07337 YZ |
4085 | return ceph_auth_sign_message(auth, msg); |
4086 | } | |
4087 | ||
79dbd1ba | 4088 | static int mds_check_message_signature(struct ceph_msg *msg) |
33d07337 | 4089 | { |
79dbd1ba | 4090 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 4091 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 4092 | |
33d07337 YZ |
4093 | return ceph_auth_check_message_signature(auth, msg); |
4094 | } | |
4095 | ||
9e32789f | 4096 | static const struct ceph_connection_operations mds_con_ops = { |
2f2dc053 SW |
4097 | .get = con_get, |
4098 | .put = con_put, | |
4099 | .dispatch = dispatch, | |
4e7a5dcd SW |
4100 | .get_authorizer = get_authorizer, |
4101 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 4102 | .invalidate_authorizer = invalidate_authorizer, |
2f2dc053 | 4103 | .peer_reset = peer_reset, |
53ded495 | 4104 | .alloc_msg = mds_alloc_msg, |
79dbd1ba ID |
4105 | .sign_message = mds_sign_message, |
4106 | .check_message_signature = mds_check_message_signature, | |
2f2dc053 SW |
4107 | }; |
4108 | ||
2f2dc053 | 4109 | /* eof */ |