]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
3d14c5d2 | 2 | #include <linux/ceph/ceph_debug.h> |
2f2dc053 | 3 | |
496e5955 | 4 | #include <linux/fs.h> |
2f2dc053 | 5 | #include <linux/wait.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
54008399 | 7 | #include <linux/gfp.h> |
2f2dc053 | 8 | #include <linux/sched.h> |
3d14c5d2 YS |
9 | #include <linux/debugfs.h> |
10 | #include <linux/seq_file.h> | |
3e0708b9 | 11 | #include <linux/ratelimit.h> |
2f2dc053 | 12 | |
2f2dc053 | 13 | #include "super.h" |
3d14c5d2 YS |
14 | #include "mds_client.h" |
15 | ||
1fe60e51 | 16 | #include <linux/ceph/ceph_features.h> |
3d14c5d2 YS |
17 | #include <linux/ceph/messenger.h> |
18 | #include <linux/ceph/decode.h> | |
19 | #include <linux/ceph/pagelist.h> | |
20 | #include <linux/ceph/auth.h> | |
21 | #include <linux/ceph/debugfs.h> | |
2f2dc053 SW |
22 | |
23 | /* | |
24 | * A cluster of MDS (metadata server) daemons is responsible for | |
25 | * managing the file system namespace (the directory hierarchy and | |
26 | * inodes) and for coordinating shared access to storage. Metadata is | |
27 | * partitioning hierarchically across a number of servers, and that | |
28 | * partition varies over time as the cluster adjusts the distribution | |
29 | * in order to balance load. | |
30 | * | |
31 | * The MDS client is primarily responsible to managing synchronous | |
32 | * metadata requests for operations like open, unlink, and so forth. | |
33 | * If there is a MDS failure, we find out about it when we (possibly | |
34 | * request and) receive a new MDS map, and can resubmit affected | |
35 | * requests. | |
36 | * | |
37 | * For the most part, though, we take advantage of a lossless | |
38 | * communications channel to the MDS, and do not need to worry about | |
39 | * timing out or resubmitting requests. | |
40 | * | |
41 | * We maintain a stateful "session" with each MDS we interact with. | |
42 | * Within each session, we sent periodic heartbeat messages to ensure | |
43 | * any capabilities or leases we have been issues remain valid. If | |
44 | * the session times out and goes stale, our leases and capabilities | |
45 | * are no longer valid. | |
46 | */ | |
47 | ||
20cb34ae | 48 | struct ceph_reconnect_state { |
44c99757 | 49 | int nr_caps; |
20cb34ae | 50 | struct ceph_pagelist *pagelist; |
121f22a1 | 51 | unsigned msg_version; |
20cb34ae SW |
52 | }; |
53 | ||
2f2dc053 SW |
54 | static void __wake_requests(struct ceph_mds_client *mdsc, |
55 | struct list_head *head); | |
1de82206 | 56 | static void ceph_cap_release_work(struct work_struct *work); |
2f2dc053 | 57 | |
9e32789f | 58 | static const struct ceph_connection_operations mds_con_ops; |
2f2dc053 SW |
59 | |
60 | ||
61 | /* | |
62 | * mds reply parsing | |
63 | */ | |
64 | ||
65 | /* | |
66 | * parse individual inode info | |
67 | */ | |
68 | static int parse_reply_info_in(void **p, void *end, | |
14303d20 | 69 | struct ceph_mds_reply_info_in *info, |
12b4629a | 70 | u64 features) |
2f2dc053 SW |
71 | { |
72 | int err = -EIO; | |
73 | ||
74 | info->in = *p; | |
75 | *p += sizeof(struct ceph_mds_reply_inode) + | |
76 | sizeof(*info->in->fragtree.splits) * | |
77 | le32_to_cpu(info->in->fragtree.nsplits); | |
78 | ||
79 | ceph_decode_32_safe(p, end, info->symlink_len, bad); | |
80 | ceph_decode_need(p, end, info->symlink_len, bad); | |
81 | info->symlink = *p; | |
82 | *p += info->symlink_len; | |
83 | ||
14303d20 SW |
84 | if (features & CEPH_FEATURE_DIRLAYOUTHASH) |
85 | ceph_decode_copy_safe(p, end, &info->dir_layout, | |
86 | sizeof(info->dir_layout), bad); | |
87 | else | |
88 | memset(&info->dir_layout, 0, sizeof(info->dir_layout)); | |
89 | ||
2f2dc053 SW |
90 | ceph_decode_32_safe(p, end, info->xattr_len, bad); |
91 | ceph_decode_need(p, end, info->xattr_len, bad); | |
92 | info->xattr_data = *p; | |
93 | *p += info->xattr_len; | |
fb01d1f8 YZ |
94 | |
95 | if (features & CEPH_FEATURE_MDS_INLINE_DATA) { | |
96 | ceph_decode_64_safe(p, end, info->inline_version, bad); | |
97 | ceph_decode_32_safe(p, end, info->inline_len, bad); | |
98 | ceph_decode_need(p, end, info->inline_len, bad); | |
99 | info->inline_data = *p; | |
100 | *p += info->inline_len; | |
101 | } else | |
102 | info->inline_version = CEPH_INLINE_NONE; | |
103 | ||
779fe0fb YZ |
104 | info->pool_ns_len = 0; |
105 | info->pool_ns_data = NULL; | |
5ea5c5e0 YZ |
106 | if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) { |
107 | ceph_decode_32_safe(p, end, info->pool_ns_len, bad); | |
779fe0fb YZ |
108 | if (info->pool_ns_len > 0) { |
109 | ceph_decode_need(p, end, info->pool_ns_len, bad); | |
110 | info->pool_ns_data = *p; | |
111 | *p += info->pool_ns_len; | |
112 | } | |
5ea5c5e0 YZ |
113 | } |
114 | ||
2f2dc053 SW |
115 | return 0; |
116 | bad: | |
117 | return err; | |
118 | } | |
119 | ||
120 | /* | |
121 | * parse a normal reply, which may contain a (dir+)dentry and/or a | |
122 | * target inode. | |
123 | */ | |
124 | static int parse_reply_info_trace(void **p, void *end, | |
14303d20 | 125 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 126 | u64 features) |
2f2dc053 SW |
127 | { |
128 | int err; | |
129 | ||
130 | if (info->head->is_dentry) { | |
14303d20 | 131 | err = parse_reply_info_in(p, end, &info->diri, features); |
2f2dc053 SW |
132 | if (err < 0) |
133 | goto out_bad; | |
134 | ||
135 | if (unlikely(*p + sizeof(*info->dirfrag) > end)) | |
136 | goto bad; | |
137 | info->dirfrag = *p; | |
138 | *p += sizeof(*info->dirfrag) + | |
139 | sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); | |
140 | if (unlikely(*p > end)) | |
141 | goto bad; | |
142 | ||
143 | ceph_decode_32_safe(p, end, info->dname_len, bad); | |
144 | ceph_decode_need(p, end, info->dname_len, bad); | |
145 | info->dname = *p; | |
146 | *p += info->dname_len; | |
147 | info->dlease = *p; | |
148 | *p += sizeof(*info->dlease); | |
149 | } | |
150 | ||
151 | if (info->head->is_target) { | |
14303d20 | 152 | err = parse_reply_info_in(p, end, &info->targeti, features); |
2f2dc053 SW |
153 | if (err < 0) |
154 | goto out_bad; | |
155 | } | |
156 | ||
157 | if (unlikely(*p != end)) | |
158 | goto bad; | |
159 | return 0; | |
160 | ||
161 | bad: | |
162 | err = -EIO; | |
163 | out_bad: | |
164 | pr_err("problem parsing mds trace %d\n", err); | |
165 | return err; | |
166 | } | |
167 | ||
168 | /* | |
169 | * parse readdir results | |
170 | */ | |
171 | static int parse_reply_info_dir(void **p, void *end, | |
14303d20 | 172 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 173 | u64 features) |
2f2dc053 SW |
174 | { |
175 | u32 num, i = 0; | |
176 | int err; | |
177 | ||
178 | info->dir_dir = *p; | |
179 | if (*p + sizeof(*info->dir_dir) > end) | |
180 | goto bad; | |
181 | *p += sizeof(*info->dir_dir) + | |
182 | sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); | |
183 | if (*p > end) | |
184 | goto bad; | |
185 | ||
186 | ceph_decode_need(p, end, sizeof(num) + 2, bad); | |
c89136ea | 187 | num = ceph_decode_32(p); |
956d39d6 YZ |
188 | { |
189 | u16 flags = ceph_decode_16(p); | |
190 | info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); | |
191 | info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); | |
f3c4ebe6 | 192 | info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); |
79162547 | 193 | info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH); |
956d39d6 | 194 | } |
2f2dc053 SW |
195 | if (num == 0) |
196 | goto done; | |
197 | ||
2a5beea3 YZ |
198 | BUG_ON(!info->dir_entries); |
199 | if ((unsigned long)(info->dir_entries + num) > | |
200 | (unsigned long)info->dir_entries + info->dir_buf_size) { | |
54008399 YZ |
201 | pr_err("dir contents are larger than expected\n"); |
202 | WARN_ON(1); | |
203 | goto bad; | |
204 | } | |
2f2dc053 | 205 | |
54008399 | 206 | info->dir_nr = num; |
2f2dc053 | 207 | while (num) { |
2a5beea3 | 208 | struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; |
2f2dc053 SW |
209 | /* dentry */ |
210 | ceph_decode_need(p, end, sizeof(u32)*2, bad); | |
2a5beea3 YZ |
211 | rde->name_len = ceph_decode_32(p); |
212 | ceph_decode_need(p, end, rde->name_len, bad); | |
213 | rde->name = *p; | |
214 | *p += rde->name_len; | |
215 | dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name); | |
216 | rde->lease = *p; | |
2f2dc053 SW |
217 | *p += sizeof(struct ceph_mds_reply_lease); |
218 | ||
219 | /* inode */ | |
2a5beea3 | 220 | err = parse_reply_info_in(p, end, &rde->inode, features); |
2f2dc053 SW |
221 | if (err < 0) |
222 | goto out_bad; | |
8974eebd YZ |
223 | /* ceph_readdir_prepopulate() will update it */ |
224 | rde->offset = 0; | |
2f2dc053 SW |
225 | i++; |
226 | num--; | |
227 | } | |
228 | ||
229 | done: | |
230 | if (*p != end) | |
231 | goto bad; | |
232 | return 0; | |
233 | ||
234 | bad: | |
235 | err = -EIO; | |
236 | out_bad: | |
237 | pr_err("problem parsing dir contents %d\n", err); | |
238 | return err; | |
239 | } | |
240 | ||
25933abd HS |
241 | /* |
242 | * parse fcntl F_GETLK results | |
243 | */ | |
244 | static int parse_reply_info_filelock(void **p, void *end, | |
14303d20 | 245 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 246 | u64 features) |
25933abd HS |
247 | { |
248 | if (*p + sizeof(*info->filelock_reply) > end) | |
249 | goto bad; | |
250 | ||
251 | info->filelock_reply = *p; | |
252 | *p += sizeof(*info->filelock_reply); | |
253 | ||
254 | if (unlikely(*p != end)) | |
255 | goto bad; | |
256 | return 0; | |
257 | ||
258 | bad: | |
259 | return -EIO; | |
260 | } | |
261 | ||
6e8575fa SL |
262 | /* |
263 | * parse create results | |
264 | */ | |
265 | static int parse_reply_info_create(void **p, void *end, | |
266 | struct ceph_mds_reply_info_parsed *info, | |
12b4629a | 267 | u64 features) |
6e8575fa SL |
268 | { |
269 | if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { | |
270 | if (*p == end) { | |
271 | info->has_create_ino = false; | |
272 | } else { | |
273 | info->has_create_ino = true; | |
274 | info->ino = ceph_decode_64(p); | |
275 | } | |
276 | } | |
277 | ||
278 | if (unlikely(*p != end)) | |
279 | goto bad; | |
280 | return 0; | |
281 | ||
282 | bad: | |
283 | return -EIO; | |
284 | } | |
285 | ||
25933abd HS |
286 | /* |
287 | * parse extra results | |
288 | */ | |
289 | static int parse_reply_info_extra(void **p, void *end, | |
14303d20 | 290 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 291 | u64 features) |
25933abd | 292 | { |
6df8c9d8 JL |
293 | u32 op = le32_to_cpu(info->head->op); |
294 | ||
295 | if (op == CEPH_MDS_OP_GETFILELOCK) | |
14303d20 | 296 | return parse_reply_info_filelock(p, end, info, features); |
6df8c9d8 | 297 | else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) |
14303d20 | 298 | return parse_reply_info_dir(p, end, info, features); |
6df8c9d8 | 299 | else if (op == CEPH_MDS_OP_CREATE) |
6e8575fa SL |
300 | return parse_reply_info_create(p, end, info, features); |
301 | else | |
302 | return -EIO; | |
25933abd HS |
303 | } |
304 | ||
2f2dc053 SW |
305 | /* |
306 | * parse entire mds reply | |
307 | */ | |
308 | static int parse_reply_info(struct ceph_msg *msg, | |
14303d20 | 309 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 310 | u64 features) |
2f2dc053 SW |
311 | { |
312 | void *p, *end; | |
313 | u32 len; | |
314 | int err; | |
315 | ||
316 | info->head = msg->front.iov_base; | |
317 | p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); | |
318 | end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); | |
319 | ||
320 | /* trace */ | |
321 | ceph_decode_32_safe(&p, end, len, bad); | |
322 | if (len > 0) { | |
32852a81 | 323 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 324 | err = parse_reply_info_trace(&p, p+len, info, features); |
2f2dc053 SW |
325 | if (err < 0) |
326 | goto out_bad; | |
327 | } | |
328 | ||
25933abd | 329 | /* extra */ |
2f2dc053 SW |
330 | ceph_decode_32_safe(&p, end, len, bad); |
331 | if (len > 0) { | |
32852a81 | 332 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 333 | err = parse_reply_info_extra(&p, p+len, info, features); |
2f2dc053 SW |
334 | if (err < 0) |
335 | goto out_bad; | |
336 | } | |
337 | ||
338 | /* snap blob */ | |
339 | ceph_decode_32_safe(&p, end, len, bad); | |
340 | info->snapblob_len = len; | |
341 | info->snapblob = p; | |
342 | p += len; | |
343 | ||
344 | if (p != end) | |
345 | goto bad; | |
346 | return 0; | |
347 | ||
348 | bad: | |
349 | err = -EIO; | |
350 | out_bad: | |
351 | pr_err("mds parse_reply err %d\n", err); | |
352 | return err; | |
353 | } | |
354 | ||
355 | static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) | |
356 | { | |
2a5beea3 | 357 | if (!info->dir_entries) |
54008399 | 358 | return; |
2a5beea3 | 359 | free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size)); |
2f2dc053 SW |
360 | } |
361 | ||
362 | ||
363 | /* | |
364 | * sessions | |
365 | */ | |
a687ecaf | 366 | const char *ceph_session_state_name(int s) |
2f2dc053 SW |
367 | { |
368 | switch (s) { | |
369 | case CEPH_MDS_SESSION_NEW: return "new"; | |
370 | case CEPH_MDS_SESSION_OPENING: return "opening"; | |
371 | case CEPH_MDS_SESSION_OPEN: return "open"; | |
372 | case CEPH_MDS_SESSION_HUNG: return "hung"; | |
373 | case CEPH_MDS_SESSION_CLOSING: return "closing"; | |
44ca18f2 | 374 | case CEPH_MDS_SESSION_RESTARTING: return "restarting"; |
2f2dc053 | 375 | case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; |
fcff415c | 376 | case CEPH_MDS_SESSION_REJECTED: return "rejected"; |
2f2dc053 SW |
377 | default: return "???"; |
378 | } | |
379 | } | |
380 | ||
381 | static struct ceph_mds_session *get_session(struct ceph_mds_session *s) | |
382 | { | |
3997c01d | 383 | if (refcount_inc_not_zero(&s->s_ref)) { |
2f2dc053 | 384 | dout("mdsc get_session %p %d -> %d\n", s, |
3997c01d | 385 | refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref)); |
2f2dc053 SW |
386 | return s; |
387 | } else { | |
388 | dout("mdsc get_session %p 0 -- FAIL", s); | |
389 | return NULL; | |
390 | } | |
391 | } | |
392 | ||
393 | void ceph_put_mds_session(struct ceph_mds_session *s) | |
394 | { | |
395 | dout("mdsc put_session %p %d -> %d\n", s, | |
3997c01d ER |
396 | refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1); |
397 | if (refcount_dec_and_test(&s->s_ref)) { | |
6c4a1915 | 398 | if (s->s_auth.authorizer) |
6c1ea260 | 399 | ceph_auth_destroy_authorizer(s->s_auth.authorizer); |
2f2dc053 | 400 | kfree(s); |
4e7a5dcd | 401 | } |
2f2dc053 SW |
402 | } |
403 | ||
404 | /* | |
405 | * called under mdsc->mutex | |
406 | */ | |
407 | struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, | |
408 | int mds) | |
409 | { | |
410 | struct ceph_mds_session *session; | |
411 | ||
d37b1d99 | 412 | if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) |
2f2dc053 SW |
413 | return NULL; |
414 | session = mdsc->sessions[mds]; | |
415 | dout("lookup_mds_session %p %d\n", session, | |
3997c01d | 416 | refcount_read(&session->s_ref)); |
2f2dc053 SW |
417 | get_session(session); |
418 | return session; | |
419 | } | |
420 | ||
421 | static bool __have_session(struct ceph_mds_client *mdsc, int mds) | |
422 | { | |
423 | if (mds >= mdsc->max_sessions) | |
424 | return false; | |
425 | return mdsc->sessions[mds]; | |
426 | } | |
427 | ||
2600d2dd SW |
428 | static int __verify_registered_session(struct ceph_mds_client *mdsc, |
429 | struct ceph_mds_session *s) | |
430 | { | |
431 | if (s->s_mds >= mdsc->max_sessions || | |
432 | mdsc->sessions[s->s_mds] != s) | |
433 | return -ENOENT; | |
434 | return 0; | |
435 | } | |
436 | ||
2f2dc053 SW |
437 | /* |
438 | * create+register a new session for given mds. | |
439 | * called under mdsc->mutex. | |
440 | */ | |
441 | static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |
442 | int mds) | |
443 | { | |
444 | struct ceph_mds_session *s; | |
445 | ||
76201b63 | 446 | if (mds >= mdsc->mdsmap->m_num_mds) |
c338c07c NY |
447 | return ERR_PTR(-EINVAL); |
448 | ||
2f2dc053 | 449 | s = kzalloc(sizeof(*s), GFP_NOFS); |
4736b009 DC |
450 | if (!s) |
451 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
452 | s->s_mdsc = mdsc; |
453 | s->s_mds = mds; | |
454 | s->s_state = CEPH_MDS_SESSION_NEW; | |
455 | s->s_ttl = 0; | |
456 | s->s_seq = 0; | |
457 | mutex_init(&s->s_mutex); | |
458 | ||
b7a9e5dd | 459 | ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); |
2f2dc053 | 460 | |
d8fb02ab | 461 | spin_lock_init(&s->s_gen_ttl_lock); |
2f2dc053 | 462 | s->s_cap_gen = 0; |
1ce208a6 | 463 | s->s_cap_ttl = jiffies - 1; |
d8fb02ab AE |
464 | |
465 | spin_lock_init(&s->s_cap_lock); | |
2f2dc053 SW |
466 | s->s_renew_requested = 0; |
467 | s->s_renew_seq = 0; | |
468 | INIT_LIST_HEAD(&s->s_caps); | |
469 | s->s_nr_caps = 0; | |
5dacf091 | 470 | s->s_trim_caps = 0; |
3997c01d | 471 | refcount_set(&s->s_ref, 1); |
2f2dc053 SW |
472 | INIT_LIST_HEAD(&s->s_waiting); |
473 | INIT_LIST_HEAD(&s->s_unsafe); | |
474 | s->s_num_cap_releases = 0; | |
99a9c273 | 475 | s->s_cap_reconnect = 0; |
7c1332b8 | 476 | s->s_cap_iterator = NULL; |
2f2dc053 | 477 | INIT_LIST_HEAD(&s->s_cap_releases); |
1de82206 YZ |
478 | INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work); |
479 | ||
2f2dc053 | 480 | INIT_LIST_HEAD(&s->s_cap_flushing); |
2f2dc053 SW |
481 | |
482 | dout("register_session mds%d\n", mds); | |
483 | if (mds >= mdsc->max_sessions) { | |
484 | int newmax = 1 << get_count_order(mds+1); | |
485 | struct ceph_mds_session **sa; | |
486 | ||
487 | dout("register_session realloc to %d\n", newmax); | |
488 | sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); | |
d37b1d99 | 489 | if (!sa) |
42ce56e5 | 490 | goto fail_realloc; |
2f2dc053 SW |
491 | if (mdsc->sessions) { |
492 | memcpy(sa, mdsc->sessions, | |
493 | mdsc->max_sessions * sizeof(void *)); | |
494 | kfree(mdsc->sessions); | |
495 | } | |
496 | mdsc->sessions = sa; | |
497 | mdsc->max_sessions = newmax; | |
498 | } | |
499 | mdsc->sessions[mds] = s; | |
86d8f67b | 500 | atomic_inc(&mdsc->num_sessions); |
3997c01d | 501 | refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */ |
42ce56e5 | 502 | |
b7a9e5dd SW |
503 | ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, |
504 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
42ce56e5 | 505 | |
2f2dc053 | 506 | return s; |
42ce56e5 SW |
507 | |
508 | fail_realloc: | |
509 | kfree(s); | |
510 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
511 | } |
512 | ||
513 | /* | |
514 | * called under mdsc->mutex | |
515 | */ | |
2600d2dd | 516 | static void __unregister_session(struct ceph_mds_client *mdsc, |
42ce56e5 | 517 | struct ceph_mds_session *s) |
2f2dc053 | 518 | { |
2600d2dd SW |
519 | dout("__unregister_session mds%d %p\n", s->s_mds, s); |
520 | BUG_ON(mdsc->sessions[s->s_mds] != s); | |
42ce56e5 | 521 | mdsc->sessions[s->s_mds] = NULL; |
1de82206 | 522 | s->s_state = 0; |
42ce56e5 SW |
523 | ceph_con_close(&s->s_con); |
524 | ceph_put_mds_session(s); | |
86d8f67b | 525 | atomic_dec(&mdsc->num_sessions); |
2f2dc053 SW |
526 | } |
527 | ||
528 | /* | |
529 | * drop session refs in request. | |
530 | * | |
531 | * should be last request ref, or hold mdsc->mutex | |
532 | */ | |
533 | static void put_request_session(struct ceph_mds_request *req) | |
534 | { | |
535 | if (req->r_session) { | |
536 | ceph_put_mds_session(req->r_session); | |
537 | req->r_session = NULL; | |
538 | } | |
539 | } | |
540 | ||
153c8e6b | 541 | void ceph_mdsc_release_request(struct kref *kref) |
2f2dc053 | 542 | { |
153c8e6b SW |
543 | struct ceph_mds_request *req = container_of(kref, |
544 | struct ceph_mds_request, | |
545 | r_kref); | |
54008399 | 546 | destroy_reply_info(&req->r_reply_info); |
153c8e6b SW |
547 | if (req->r_request) |
548 | ceph_msg_put(req->r_request); | |
54008399 | 549 | if (req->r_reply) |
153c8e6b | 550 | ceph_msg_put(req->r_reply); |
153c8e6b | 551 | if (req->r_inode) { |
41b02e1f | 552 | ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); |
153c8e6b SW |
553 | iput(req->r_inode); |
554 | } | |
3dd69aab JL |
555 | if (req->r_parent) |
556 | ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); | |
e96a650a | 557 | iput(req->r_target_inode); |
153c8e6b SW |
558 | if (req->r_dentry) |
559 | dput(req->r_dentry); | |
844d87c3 SW |
560 | if (req->r_old_dentry) |
561 | dput(req->r_old_dentry); | |
562 | if (req->r_old_dentry_dir) { | |
41b02e1f SW |
563 | /* |
564 | * track (and drop pins for) r_old_dentry_dir | |
565 | * separately, since r_old_dentry's d_parent may have | |
566 | * changed between the dir mutex being dropped and | |
567 | * this request being freed. | |
568 | */ | |
569 | ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), | |
570 | CEPH_CAP_PIN); | |
41b02e1f | 571 | iput(req->r_old_dentry_dir); |
2f2dc053 | 572 | } |
153c8e6b SW |
573 | kfree(req->r_path1); |
574 | kfree(req->r_path2); | |
25e6bae3 YZ |
575 | if (req->r_pagelist) |
576 | ceph_pagelist_release(req->r_pagelist); | |
153c8e6b | 577 | put_request_session(req); |
37151668 | 578 | ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
153c8e6b | 579 | kfree(req); |
2f2dc053 SW |
580 | } |
581 | ||
fcd00b68 ID |
582 | DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node) |
583 | ||
2f2dc053 SW |
584 | /* |
585 | * lookup session, bump ref if found. | |
586 | * | |
587 | * called under mdsc->mutex. | |
588 | */ | |
fcd00b68 ID |
589 | static struct ceph_mds_request * |
590 | lookup_get_request(struct ceph_mds_client *mdsc, u64 tid) | |
2f2dc053 SW |
591 | { |
592 | struct ceph_mds_request *req; | |
44ca18f2 | 593 | |
fcd00b68 ID |
594 | req = lookup_request(&mdsc->request_tree, tid); |
595 | if (req) | |
596 | ceph_mdsc_get_request(req); | |
44ca18f2 | 597 | |
fcd00b68 | 598 | return req; |
2f2dc053 SW |
599 | } |
600 | ||
601 | /* | |
602 | * Register an in-flight request, and assign a tid. Link to directory | |
603 | * are modifying (if any). | |
604 | * | |
605 | * Called under mdsc->mutex. | |
606 | */ | |
607 | static void __register_request(struct ceph_mds_client *mdsc, | |
608 | struct ceph_mds_request *req, | |
609 | struct inode *dir) | |
610 | { | |
611 | req->r_tid = ++mdsc->last_tid; | |
612 | if (req->r_num_caps) | |
37151668 YS |
613 | ceph_reserve_caps(mdsc, &req->r_caps_reservation, |
614 | req->r_num_caps); | |
2f2dc053 SW |
615 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
616 | ceph_mdsc_get_request(req); | |
fcd00b68 | 617 | insert_request(&mdsc->request_tree, req); |
2f2dc053 | 618 | |
cb4276cc SW |
619 | req->r_uid = current_fsuid(); |
620 | req->r_gid = current_fsgid(); | |
621 | ||
e8a7b8b1 YZ |
622 | if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) |
623 | mdsc->oldest_tid = req->r_tid; | |
624 | ||
2f2dc053 | 625 | if (dir) { |
3b663780 | 626 | ihold(dir); |
2f2dc053 | 627 | req->r_unsafe_dir = dir; |
2f2dc053 SW |
628 | } |
629 | } | |
630 | ||
631 | static void __unregister_request(struct ceph_mds_client *mdsc, | |
632 | struct ceph_mds_request *req) | |
633 | { | |
634 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | |
e8a7b8b1 | 635 | |
df963ea8 JL |
636 | /* Never leave an unregistered request on an unsafe list! */ |
637 | list_del_init(&req->r_unsafe_item); | |
638 | ||
e8a7b8b1 YZ |
639 | if (req->r_tid == mdsc->oldest_tid) { |
640 | struct rb_node *p = rb_next(&req->r_node); | |
641 | mdsc->oldest_tid = 0; | |
642 | while (p) { | |
643 | struct ceph_mds_request *next_req = | |
644 | rb_entry(p, struct ceph_mds_request, r_node); | |
645 | if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { | |
646 | mdsc->oldest_tid = next_req->r_tid; | |
647 | break; | |
648 | } | |
649 | p = rb_next(p); | |
650 | } | |
651 | } | |
652 | ||
fcd00b68 | 653 | erase_request(&mdsc->request_tree, req); |
2f2dc053 | 654 | |
bc2de10d JL |
655 | if (req->r_unsafe_dir && |
656 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
2f2dc053 | 657 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); |
2f2dc053 SW |
658 | spin_lock(&ci->i_unsafe_lock); |
659 | list_del_init(&req->r_unsafe_dir_item); | |
660 | spin_unlock(&ci->i_unsafe_lock); | |
4c06ace8 | 661 | } |
bc2de10d JL |
662 | if (req->r_target_inode && |
663 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
68cd5b4b YZ |
664 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); |
665 | spin_lock(&ci->i_unsafe_lock); | |
666 | list_del_init(&req->r_unsafe_target_item); | |
667 | spin_unlock(&ci->i_unsafe_lock); | |
668 | } | |
3b663780 | 669 | |
4c06ace8 | 670 | if (req->r_unsafe_dir) { |
3b663780 SW |
671 | iput(req->r_unsafe_dir); |
672 | req->r_unsafe_dir = NULL; | |
2f2dc053 | 673 | } |
94aa8ae1 | 674 | |
fc55d2c9 YZ |
675 | complete_all(&req->r_safe_completion); |
676 | ||
94aa8ae1 | 677 | ceph_mdsc_put_request(req); |
2f2dc053 SW |
678 | } |
679 | ||
30c71233 JL |
680 | /* |
681 | * Walk back up the dentry tree until we hit a dentry representing a | |
682 | * non-snapshot inode. We do this using the rcu_read_lock (which must be held | |
683 | * when calling this) to ensure that the objects won't disappear while we're | |
684 | * working with them. Once we hit a candidate dentry, we attempt to take a | |
685 | * reference to it, and return that as the result. | |
686 | */ | |
f1075480 DC |
687 | static struct inode *get_nonsnap_parent(struct dentry *dentry) |
688 | { | |
689 | struct inode *inode = NULL; | |
30c71233 JL |
690 | |
691 | while (dentry && !IS_ROOT(dentry)) { | |
692 | inode = d_inode_rcu(dentry); | |
693 | if (!inode || ceph_snap(inode) == CEPH_NOSNAP) | |
694 | break; | |
695 | dentry = dentry->d_parent; | |
696 | } | |
697 | if (inode) | |
698 | inode = igrab(inode); | |
699 | return inode; | |
700 | } | |
701 | ||
2f2dc053 SW |
702 | /* |
703 | * Choose mds to send request to next. If there is a hint set in the | |
704 | * request (e.g., due to a prior forward hint from the mds), use that. | |
705 | * Otherwise, consult frag tree and/or caps to identify the | |
706 | * appropriate mds. If all else fails, choose randomly. | |
707 | * | |
708 | * Called under mdsc->mutex. | |
709 | */ | |
710 | static int __choose_mds(struct ceph_mds_client *mdsc, | |
711 | struct ceph_mds_request *req) | |
712 | { | |
713 | struct inode *inode; | |
714 | struct ceph_inode_info *ci; | |
715 | struct ceph_cap *cap; | |
716 | int mode = req->r_direct_mode; | |
717 | int mds = -1; | |
718 | u32 hash = req->r_direct_hash; | |
bc2de10d | 719 | bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); |
2f2dc053 SW |
720 | |
721 | /* | |
722 | * is there a specific mds we should try? ignore hint if we have | |
723 | * no session and the mds is not up (active or recovering). | |
724 | */ | |
725 | if (req->r_resend_mds >= 0 && | |
726 | (__have_session(mdsc, req->r_resend_mds) || | |
727 | ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { | |
728 | dout("choose_mds using resend_mds mds%d\n", | |
729 | req->r_resend_mds); | |
730 | return req->r_resend_mds; | |
731 | } | |
732 | ||
733 | if (mode == USE_RANDOM_MDS) | |
734 | goto random; | |
735 | ||
736 | inode = NULL; | |
737 | if (req->r_inode) { | |
5d37ca14 YZ |
738 | if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) { |
739 | inode = req->r_inode; | |
740 | ihold(inode); | |
741 | } else { | |
38f340cc YZ |
742 | /* req->r_dentry is non-null for LSSNAP request */ |
743 | rcu_read_lock(); | |
744 | inode = get_nonsnap_parent(req->r_dentry); | |
745 | rcu_read_unlock(); | |
746 | dout("__choose_mds using snapdir's parent %p\n", inode); | |
5d37ca14 | 747 | } |
38f340cc | 748 | } else if (req->r_dentry) { |
d79698da | 749 | /* ignore race with rename; old or new d_parent is okay */ |
30c71233 JL |
750 | struct dentry *parent; |
751 | struct inode *dir; | |
752 | ||
753 | rcu_read_lock(); | |
754 | parent = req->r_dentry->d_parent; | |
3dd69aab | 755 | dir = req->r_parent ? : d_inode_rcu(parent); |
eb6bb1c5 | 756 | |
30c71233 JL |
757 | if (!dir || dir->i_sb != mdsc->fsc->sb) { |
758 | /* not this fs or parent went negative */ | |
2b0143b5 | 759 | inode = d_inode(req->r_dentry); |
30c71233 JL |
760 | if (inode) |
761 | ihold(inode); | |
eb6bb1c5 SW |
762 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { |
763 | /* direct snapped/virtual snapdir requests | |
764 | * based on parent dir inode */ | |
30c71233 | 765 | inode = get_nonsnap_parent(parent); |
eb6bb1c5 | 766 | dout("__choose_mds using nonsnap parent %p\n", inode); |
ca18bede | 767 | } else { |
eb6bb1c5 | 768 | /* dentry target */ |
2b0143b5 | 769 | inode = d_inode(req->r_dentry); |
ca18bede YZ |
770 | if (!inode || mode == USE_AUTH_MDS) { |
771 | /* dir + name */ | |
30c71233 | 772 | inode = igrab(dir); |
ca18bede YZ |
773 | hash = ceph_dentry_hash(dir, req->r_dentry); |
774 | is_hash = true; | |
30c71233 JL |
775 | } else { |
776 | ihold(inode); | |
ca18bede | 777 | } |
2f2dc053 | 778 | } |
30c71233 | 779 | rcu_read_unlock(); |
2f2dc053 | 780 | } |
eb6bb1c5 | 781 | |
2f2dc053 SW |
782 | dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, |
783 | (int)hash, mode); | |
784 | if (!inode) | |
785 | goto random; | |
786 | ci = ceph_inode(inode); | |
787 | ||
788 | if (is_hash && S_ISDIR(inode->i_mode)) { | |
789 | struct ceph_inode_frag frag; | |
790 | int found; | |
791 | ||
792 | ceph_choose_frag(ci, hash, &frag, &found); | |
793 | if (found) { | |
794 | if (mode == USE_ANY_MDS && frag.ndist > 0) { | |
795 | u8 r; | |
796 | ||
797 | /* choose a random replica */ | |
798 | get_random_bytes(&r, 1); | |
799 | r %= frag.ndist; | |
800 | mds = frag.dist[r]; | |
801 | dout("choose_mds %p %llx.%llx " | |
802 | "frag %u mds%d (%d/%d)\n", | |
803 | inode, ceph_vinop(inode), | |
d66bbd44 | 804 | frag.frag, mds, |
2f2dc053 | 805 | (int)r, frag.ndist); |
d66bbd44 SW |
806 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
807 | CEPH_MDS_STATE_ACTIVE) | |
30c71233 | 808 | goto out; |
2f2dc053 SW |
809 | } |
810 | ||
811 | /* since this file/dir wasn't known to be | |
812 | * replicated, then we want to look for the | |
813 | * authoritative mds. */ | |
814 | mode = USE_AUTH_MDS; | |
815 | if (frag.mds >= 0) { | |
816 | /* choose auth mds */ | |
817 | mds = frag.mds; | |
818 | dout("choose_mds %p %llx.%llx " | |
819 | "frag %u mds%d (auth)\n", | |
820 | inode, ceph_vinop(inode), frag.frag, mds); | |
d66bbd44 SW |
821 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
822 | CEPH_MDS_STATE_ACTIVE) | |
30c71233 | 823 | goto out; |
2f2dc053 SW |
824 | } |
825 | } | |
826 | } | |
827 | ||
be655596 | 828 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
829 | cap = NULL; |
830 | if (mode == USE_AUTH_MDS) | |
831 | cap = ci->i_auth_cap; | |
832 | if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) | |
833 | cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); | |
834 | if (!cap) { | |
be655596 | 835 | spin_unlock(&ci->i_ceph_lock); |
30c71233 | 836 | iput(inode); |
2f2dc053 SW |
837 | goto random; |
838 | } | |
839 | mds = cap->session->s_mds; | |
840 | dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", | |
841 | inode, ceph_vinop(inode), mds, | |
842 | cap == ci->i_auth_cap ? "auth " : "", cap); | |
be655596 | 843 | spin_unlock(&ci->i_ceph_lock); |
30c71233 JL |
844 | out: |
845 | iput(inode); | |
2f2dc053 SW |
846 | return mds; |
847 | ||
848 | random: | |
849 | mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); | |
850 | dout("choose_mds chose random mds%d\n", mds); | |
851 | return mds; | |
852 | } | |
853 | ||
854 | ||
855 | /* | |
856 | * session messages | |
857 | */ | |
858 | static struct ceph_msg *create_session_msg(u32 op, u64 seq) | |
859 | { | |
860 | struct ceph_msg *msg; | |
861 | struct ceph_mds_session_head *h; | |
862 | ||
b61c2763 SW |
863 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, |
864 | false); | |
a79832f2 | 865 | if (!msg) { |
2f2dc053 | 866 | pr_err("create_session_msg ENOMEM creating msg\n"); |
a79832f2 | 867 | return NULL; |
2f2dc053 SW |
868 | } |
869 | h = msg->front.iov_base; | |
870 | h->op = cpu_to_le32(op); | |
871 | h->seq = cpu_to_le64(seq); | |
dbd0c8bf JS |
872 | |
873 | return msg; | |
874 | } | |
875 | ||
876 | /* | |
877 | * session message, specialization for CEPH_SESSION_REQUEST_OPEN | |
878 | * to include additional client metadata fields. | |
879 | */ | |
880 | static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) | |
881 | { | |
882 | struct ceph_msg *msg; | |
883 | struct ceph_mds_session_head *h; | |
884 | int i = -1; | |
885 | int metadata_bytes = 0; | |
886 | int metadata_key_count = 0; | |
887 | struct ceph_options *opt = mdsc->fsc->client->options; | |
3f384954 | 888 | struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; |
dbd0c8bf JS |
889 | void *p; |
890 | ||
a6a5ce4f | 891 | const char* metadata[][2] = { |
717e6f28 YZ |
892 | {"hostname", mdsc->nodename}, |
893 | {"kernel_version", init_utsname()->release}, | |
3f384954 YZ |
894 | {"entity_id", opt->name ? : ""}, |
895 | {"root", fsopt->server_path ? : "/"}, | |
dbd0c8bf JS |
896 | {NULL, NULL} |
897 | }; | |
898 | ||
899 | /* Calculate serialized length of metadata */ | |
900 | metadata_bytes = 4; /* map length */ | |
d37b1d99 | 901 | for (i = 0; metadata[i][0]; ++i) { |
dbd0c8bf JS |
902 | metadata_bytes += 8 + strlen(metadata[i][0]) + |
903 | strlen(metadata[i][1]); | |
904 | metadata_key_count++; | |
905 | } | |
906 | ||
907 | /* Allocate the message */ | |
908 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, | |
909 | GFP_NOFS, false); | |
910 | if (!msg) { | |
911 | pr_err("create_session_msg ENOMEM creating msg\n"); | |
912 | return NULL; | |
913 | } | |
914 | h = msg->front.iov_base; | |
915 | h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); | |
916 | h->seq = cpu_to_le64(seq); | |
917 | ||
918 | /* | |
919 | * Serialize client metadata into waiting buffer space, using | |
920 | * the format that userspace expects for map<string, string> | |
7cfa0313 JS |
921 | * |
922 | * ClientSession messages with metadata are v2 | |
dbd0c8bf | 923 | */ |
7cfa0313 JS |
924 | msg->hdr.version = cpu_to_le16(2); |
925 | msg->hdr.compat_version = cpu_to_le16(1); | |
dbd0c8bf JS |
926 | |
927 | /* The write pointer, following the session_head structure */ | |
928 | p = msg->front.iov_base + sizeof(*h); | |
929 | ||
930 | /* Number of entries in the map */ | |
931 | ceph_encode_32(&p, metadata_key_count); | |
932 | ||
933 | /* Two length-prefixed strings for each entry in the map */ | |
d37b1d99 | 934 | for (i = 0; metadata[i][0]; ++i) { |
dbd0c8bf JS |
935 | size_t const key_len = strlen(metadata[i][0]); |
936 | size_t const val_len = strlen(metadata[i][1]); | |
937 | ||
938 | ceph_encode_32(&p, key_len); | |
939 | memcpy(p, metadata[i][0], key_len); | |
940 | p += key_len; | |
941 | ceph_encode_32(&p, val_len); | |
942 | memcpy(p, metadata[i][1], val_len); | |
943 | p += val_len; | |
944 | } | |
945 | ||
2f2dc053 SW |
946 | return msg; |
947 | } | |
948 | ||
949 | /* | |
950 | * send session open request. | |
951 | * | |
952 | * called under mdsc->mutex | |
953 | */ | |
954 | static int __open_session(struct ceph_mds_client *mdsc, | |
955 | struct ceph_mds_session *session) | |
956 | { | |
957 | struct ceph_msg *msg; | |
958 | int mstate; | |
959 | int mds = session->s_mds; | |
2f2dc053 SW |
960 | |
961 | /* wait for mds to go active? */ | |
962 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); | |
963 | dout("open_session to mds%d (%s)\n", mds, | |
964 | ceph_mds_state_name(mstate)); | |
965 | session->s_state = CEPH_MDS_SESSION_OPENING; | |
966 | session->s_renew_requested = jiffies; | |
967 | ||
968 | /* send connect message */ | |
dbd0c8bf | 969 | msg = create_session_open_msg(mdsc, session->s_seq); |
a79832f2 SW |
970 | if (!msg) |
971 | return -ENOMEM; | |
2f2dc053 | 972 | ceph_con_send(&session->s_con, msg); |
2f2dc053 SW |
973 | return 0; |
974 | } | |
975 | ||
ed0552a1 SW |
976 | /* |
977 | * open sessions for any export targets for the given mds | |
978 | * | |
979 | * called under mdsc->mutex | |
980 | */ | |
5d72d13c YZ |
981 | static struct ceph_mds_session * |
982 | __open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
983 | { | |
984 | struct ceph_mds_session *session; | |
985 | ||
986 | session = __ceph_lookup_mds_session(mdsc, target); | |
987 | if (!session) { | |
988 | session = register_session(mdsc, target); | |
989 | if (IS_ERR(session)) | |
990 | return session; | |
991 | } | |
992 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
993 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
994 | __open_session(mdsc, session); | |
995 | ||
996 | return session; | |
997 | } | |
998 | ||
999 | struct ceph_mds_session * | |
1000 | ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
1001 | { | |
1002 | struct ceph_mds_session *session; | |
1003 | ||
1004 | dout("open_export_target_session to mds%d\n", target); | |
1005 | ||
1006 | mutex_lock(&mdsc->mutex); | |
1007 | session = __open_export_target_session(mdsc, target); | |
1008 | mutex_unlock(&mdsc->mutex); | |
1009 | ||
1010 | return session; | |
1011 | } | |
1012 | ||
ed0552a1 SW |
1013 | static void __open_export_target_sessions(struct ceph_mds_client *mdsc, |
1014 | struct ceph_mds_session *session) | |
1015 | { | |
1016 | struct ceph_mds_info *mi; | |
1017 | struct ceph_mds_session *ts; | |
1018 | int i, mds = session->s_mds; | |
ed0552a1 | 1019 | |
76201b63 | 1020 | if (mds >= mdsc->mdsmap->m_num_mds) |
ed0552a1 | 1021 | return; |
5d72d13c | 1022 | |
ed0552a1 SW |
1023 | mi = &mdsc->mdsmap->m_info[mds]; |
1024 | dout("open_export_target_sessions for mds%d (%d targets)\n", | |
1025 | session->s_mds, mi->num_export_targets); | |
1026 | ||
1027 | for (i = 0; i < mi->num_export_targets; i++) { | |
5d72d13c YZ |
1028 | ts = __open_export_target_session(mdsc, mi->export_targets[i]); |
1029 | if (!IS_ERR(ts)) | |
1030 | ceph_put_mds_session(ts); | |
ed0552a1 SW |
1031 | } |
1032 | } | |
1033 | ||
154f42c2 SW |
1034 | void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, |
1035 | struct ceph_mds_session *session) | |
1036 | { | |
1037 | mutex_lock(&mdsc->mutex); | |
1038 | __open_export_target_sessions(mdsc, session); | |
1039 | mutex_unlock(&mdsc->mutex); | |
1040 | } | |
1041 | ||
2f2dc053 SW |
1042 | /* |
1043 | * session caps | |
1044 | */ | |
1045 | ||
c8a96a31 JL |
1046 | static void detach_cap_releases(struct ceph_mds_session *session, |
1047 | struct list_head *target) | |
2f2dc053 | 1048 | { |
c8a96a31 JL |
1049 | lockdep_assert_held(&session->s_cap_lock); |
1050 | ||
1051 | list_splice_init(&session->s_cap_releases, target); | |
745a8e3b | 1052 | session->s_num_cap_releases = 0; |
c8a96a31 JL |
1053 | dout("dispose_cap_releases mds%d\n", session->s_mds); |
1054 | } | |
2f2dc053 | 1055 | |
c8a96a31 JL |
1056 | static void dispose_cap_releases(struct ceph_mds_client *mdsc, |
1057 | struct list_head *dispose) | |
1058 | { | |
1059 | while (!list_empty(dispose)) { | |
745a8e3b YZ |
1060 | struct ceph_cap *cap; |
1061 | /* zero out the in-progress message */ | |
c8a96a31 | 1062 | cap = list_first_entry(dispose, struct ceph_cap, session_caps); |
745a8e3b YZ |
1063 | list_del(&cap->session_caps); |
1064 | ceph_put_cap(mdsc, cap); | |
2f2dc053 | 1065 | } |
2f2dc053 SW |
1066 | } |
1067 | ||
1c841a96 YZ |
1068 | static void cleanup_session_requests(struct ceph_mds_client *mdsc, |
1069 | struct ceph_mds_session *session) | |
1070 | { | |
1071 | struct ceph_mds_request *req; | |
1072 | struct rb_node *p; | |
1073 | ||
1074 | dout("cleanup_session_requests mds%d\n", session->s_mds); | |
1075 | mutex_lock(&mdsc->mutex); | |
1076 | while (!list_empty(&session->s_unsafe)) { | |
1077 | req = list_first_entry(&session->s_unsafe, | |
1078 | struct ceph_mds_request, r_unsafe_item); | |
3e0708b9 YZ |
1079 | pr_warn_ratelimited(" dropping unsafe request %llu\n", |
1080 | req->r_tid); | |
1c841a96 YZ |
1081 | __unregister_request(mdsc, req); |
1082 | } | |
1083 | /* zero r_attempts, so kick_requests() will re-send requests */ | |
1084 | p = rb_first(&mdsc->request_tree); | |
1085 | while (p) { | |
1086 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
1087 | p = rb_next(p); | |
1088 | if (req->r_session && | |
1089 | req->r_session->s_mds == session->s_mds) | |
1090 | req->r_attempts = 0; | |
1091 | } | |
1092 | mutex_unlock(&mdsc->mutex); | |
1093 | } | |
1094 | ||
2f2dc053 | 1095 | /* |
f818a736 SW |
1096 | * Helper to safely iterate over all caps associated with a session, with |
1097 | * special care taken to handle a racing __ceph_remove_cap(). | |
2f2dc053 | 1098 | * |
f818a736 | 1099 | * Caller must hold session s_mutex. |
2f2dc053 SW |
1100 | */ |
1101 | static int iterate_session_caps(struct ceph_mds_session *session, | |
1102 | int (*cb)(struct inode *, struct ceph_cap *, | |
1103 | void *), void *arg) | |
1104 | { | |
7c1332b8 SW |
1105 | struct list_head *p; |
1106 | struct ceph_cap *cap; | |
1107 | struct inode *inode, *last_inode = NULL; | |
1108 | struct ceph_cap *old_cap = NULL; | |
2f2dc053 SW |
1109 | int ret; |
1110 | ||
1111 | dout("iterate_session_caps %p mds%d\n", session, session->s_mds); | |
1112 | spin_lock(&session->s_cap_lock); | |
7c1332b8 SW |
1113 | p = session->s_caps.next; |
1114 | while (p != &session->s_caps) { | |
1115 | cap = list_entry(p, struct ceph_cap, session_caps); | |
2f2dc053 | 1116 | inode = igrab(&cap->ci->vfs_inode); |
7c1332b8 SW |
1117 | if (!inode) { |
1118 | p = p->next; | |
2f2dc053 | 1119 | continue; |
7c1332b8 SW |
1120 | } |
1121 | session->s_cap_iterator = cap; | |
2f2dc053 | 1122 | spin_unlock(&session->s_cap_lock); |
7c1332b8 SW |
1123 | |
1124 | if (last_inode) { | |
1125 | iput(last_inode); | |
1126 | last_inode = NULL; | |
1127 | } | |
1128 | if (old_cap) { | |
37151668 | 1129 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 SW |
1130 | old_cap = NULL; |
1131 | } | |
1132 | ||
2f2dc053 | 1133 | ret = cb(inode, cap, arg); |
7c1332b8 SW |
1134 | last_inode = inode; |
1135 | ||
2f2dc053 | 1136 | spin_lock(&session->s_cap_lock); |
7c1332b8 | 1137 | p = p->next; |
d37b1d99 | 1138 | if (!cap->ci) { |
7c1332b8 SW |
1139 | dout("iterate_session_caps finishing cap %p removal\n", |
1140 | cap); | |
1141 | BUG_ON(cap->session != session); | |
745a8e3b | 1142 | cap->session = NULL; |
7c1332b8 SW |
1143 | list_del_init(&cap->session_caps); |
1144 | session->s_nr_caps--; | |
1de82206 YZ |
1145 | if (cap->queue_release) |
1146 | __ceph_queue_cap_release(session, cap); | |
1147 | else | |
745a8e3b | 1148 | old_cap = cap; /* put_cap it w/o locks held */ |
7c1332b8 | 1149 | } |
5dacf091 SW |
1150 | if (ret < 0) |
1151 | goto out; | |
2f2dc053 | 1152 | } |
5dacf091 SW |
1153 | ret = 0; |
1154 | out: | |
7c1332b8 | 1155 | session->s_cap_iterator = NULL; |
2f2dc053 | 1156 | spin_unlock(&session->s_cap_lock); |
7c1332b8 | 1157 | |
e96a650a | 1158 | iput(last_inode); |
7c1332b8 | 1159 | if (old_cap) |
37151668 | 1160 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 | 1161 | |
5dacf091 | 1162 | return ret; |
2f2dc053 SW |
1163 | } |
1164 | ||
1165 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |
6c99f254 | 1166 | void *arg) |
2f2dc053 | 1167 | { |
6c93df5d | 1168 | struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg; |
2f2dc053 | 1169 | struct ceph_inode_info *ci = ceph_inode(inode); |
553adfd9 | 1170 | LIST_HEAD(to_remove); |
6c93df5d YZ |
1171 | bool drop = false; |
1172 | bool invalidate = false; | |
6c99f254 | 1173 | |
2f2dc053 SW |
1174 | dout("removing cap %p, ci is %p, inode is %p\n", |
1175 | cap, ci, &ci->vfs_inode); | |
be655596 | 1176 | spin_lock(&ci->i_ceph_lock); |
a096b09a | 1177 | __ceph_remove_cap(cap, false); |
571ade33 | 1178 | if (!ci->i_auth_cap) { |
553adfd9 | 1179 | struct ceph_cap_flush *cf; |
6c93df5d | 1180 | struct ceph_mds_client *mdsc = fsc->mdsc; |
6c99f254 | 1181 | |
77310320 YZ |
1182 | ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; |
1183 | ||
6c93df5d | 1184 | if (ci->i_wrbuffer_ref > 0 && |
52953d55 | 1185 | READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
6c93df5d YZ |
1186 | invalidate = true; |
1187 | ||
e4500b5e YZ |
1188 | while (!list_empty(&ci->i_cap_flush_list)) { |
1189 | cf = list_first_entry(&ci->i_cap_flush_list, | |
1190 | struct ceph_cap_flush, i_list); | |
8cdcc07d | 1191 | list_move(&cf->i_list, &to_remove); |
553adfd9 YZ |
1192 | } |
1193 | ||
6c99f254 | 1194 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 | 1195 | |
e4500b5e YZ |
1196 | list_for_each_entry(cf, &to_remove, i_list) |
1197 | list_del(&cf->g_list); | |
8310b089 | 1198 | |
6c99f254 | 1199 | if (!list_empty(&ci->i_dirty_item)) { |
3e0708b9 YZ |
1200 | pr_warn_ratelimited( |
1201 | " dropping dirty %s state for %p %lld\n", | |
6c99f254 SW |
1202 | ceph_cap_string(ci->i_dirty_caps), |
1203 | inode, ceph_ino(inode)); | |
1204 | ci->i_dirty_caps = 0; | |
1205 | list_del_init(&ci->i_dirty_item); | |
6c93df5d | 1206 | drop = true; |
6c99f254 SW |
1207 | } |
1208 | if (!list_empty(&ci->i_flushing_item)) { | |
3e0708b9 YZ |
1209 | pr_warn_ratelimited( |
1210 | " dropping dirty+flushing %s state for %p %lld\n", | |
6c99f254 SW |
1211 | ceph_cap_string(ci->i_flushing_caps), |
1212 | inode, ceph_ino(inode)); | |
1213 | ci->i_flushing_caps = 0; | |
1214 | list_del_init(&ci->i_flushing_item); | |
1215 | mdsc->num_cap_flushing--; | |
6c93df5d | 1216 | drop = true; |
6c99f254 | 1217 | } |
6c99f254 | 1218 | spin_unlock(&mdsc->cap_dirty_lock); |
553adfd9 | 1219 | |
b3f8d68f YZ |
1220 | if (atomic_read(&ci->i_filelock_ref) > 0) { |
1221 | /* make further file lock syscall return -EIO */ | |
1222 | ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK; | |
1223 | pr_warn_ratelimited(" dropping file locks for %p %lld\n", | |
1224 | inode, ceph_ino(inode)); | |
1225 | } | |
1226 | ||
f66fd9f0 | 1227 | if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { |
e4500b5e | 1228 | list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); |
f66fd9f0 YZ |
1229 | ci->i_prealloc_cap_flush = NULL; |
1230 | } | |
6c99f254 | 1231 | } |
be655596 | 1232 | spin_unlock(&ci->i_ceph_lock); |
553adfd9 YZ |
1233 | while (!list_empty(&to_remove)) { |
1234 | struct ceph_cap_flush *cf; | |
1235 | cf = list_first_entry(&to_remove, | |
e4500b5e YZ |
1236 | struct ceph_cap_flush, i_list); |
1237 | list_del(&cf->i_list); | |
f66fd9f0 | 1238 | ceph_free_cap_flush(cf); |
553adfd9 | 1239 | } |
77310320 YZ |
1240 | |
1241 | wake_up_all(&ci->i_cap_wq); | |
6c93df5d YZ |
1242 | if (invalidate) |
1243 | ceph_queue_invalidate(inode); | |
77310320 | 1244 | if (drop) |
6c99f254 | 1245 | iput(inode); |
2f2dc053 SW |
1246 | return 0; |
1247 | } | |
1248 | ||
1249 | /* | |
1250 | * caller must hold session s_mutex | |
1251 | */ | |
1252 | static void remove_session_caps(struct ceph_mds_session *session) | |
1253 | { | |
6c93df5d YZ |
1254 | struct ceph_fs_client *fsc = session->s_mdsc->fsc; |
1255 | struct super_block *sb = fsc->sb; | |
c8a96a31 JL |
1256 | LIST_HEAD(dispose); |
1257 | ||
2f2dc053 | 1258 | dout("remove_session_caps on %p\n", session); |
6c93df5d | 1259 | iterate_session_caps(session, remove_session_caps_cb, fsc); |
6f60f889 | 1260 | |
c8799fc4 YZ |
1261 | wake_up_all(&fsc->mdsc->cap_flushing_wq); |
1262 | ||
6f60f889 YZ |
1263 | spin_lock(&session->s_cap_lock); |
1264 | if (session->s_nr_caps > 0) { | |
6f60f889 YZ |
1265 | struct inode *inode; |
1266 | struct ceph_cap *cap, *prev = NULL; | |
1267 | struct ceph_vino vino; | |
1268 | /* | |
1269 | * iterate_session_caps() skips inodes that are being | |
1270 | * deleted, we need to wait until deletions are complete. | |
1271 | * __wait_on_freeing_inode() is designed for the job, | |
1272 | * but it is not exported, so use lookup inode function | |
1273 | * to access it. | |
1274 | */ | |
1275 | while (!list_empty(&session->s_caps)) { | |
1276 | cap = list_entry(session->s_caps.next, | |
1277 | struct ceph_cap, session_caps); | |
1278 | if (cap == prev) | |
1279 | break; | |
1280 | prev = cap; | |
1281 | vino = cap->ci->i_vino; | |
1282 | spin_unlock(&session->s_cap_lock); | |
1283 | ||
ed284c49 | 1284 | inode = ceph_find_inode(sb, vino); |
6f60f889 YZ |
1285 | iput(inode); |
1286 | ||
1287 | spin_lock(&session->s_cap_lock); | |
1288 | } | |
1289 | } | |
745a8e3b YZ |
1290 | |
1291 | // drop cap expires and unlock s_cap_lock | |
c8a96a31 | 1292 | detach_cap_releases(session, &dispose); |
6f60f889 | 1293 | |
2f2dc053 | 1294 | BUG_ON(session->s_nr_caps > 0); |
6c99f254 | 1295 | BUG_ON(!list_empty(&session->s_cap_flushing)); |
c8a96a31 JL |
1296 | spin_unlock(&session->s_cap_lock); |
1297 | dispose_cap_releases(session->s_mdsc, &dispose); | |
2f2dc053 SW |
1298 | } |
1299 | ||
1300 | /* | |
1301 | * wake up any threads waiting on this session's caps. if the cap is | |
1302 | * old (didn't get renewed on the client reconnect), remove it now. | |
1303 | * | |
1304 | * caller must hold s_mutex. | |
1305 | */ | |
1306 | static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, | |
1307 | void *arg) | |
1308 | { | |
0dc2570f SW |
1309 | struct ceph_inode_info *ci = ceph_inode(inode); |
1310 | ||
0dc2570f | 1311 | if (arg) { |
be655596 | 1312 | spin_lock(&ci->i_ceph_lock); |
0dc2570f SW |
1313 | ci->i_wanted_max_size = 0; |
1314 | ci->i_requested_max_size = 0; | |
be655596 | 1315 | spin_unlock(&ci->i_ceph_lock); |
0dc2570f | 1316 | } |
e5360309 | 1317 | wake_up_all(&ci->i_cap_wq); |
2f2dc053 SW |
1318 | return 0; |
1319 | } | |
1320 | ||
0dc2570f SW |
1321 | static void wake_up_session_caps(struct ceph_mds_session *session, |
1322 | int reconnect) | |
2f2dc053 SW |
1323 | { |
1324 | dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); | |
0dc2570f SW |
1325 | iterate_session_caps(session, wake_up_session_cb, |
1326 | (void *)(unsigned long)reconnect); | |
2f2dc053 SW |
1327 | } |
1328 | ||
1329 | /* | |
1330 | * Send periodic message to MDS renewing all currently held caps. The | |
1331 | * ack will reset the expiration for all caps from this session. | |
1332 | * | |
1333 | * caller holds s_mutex | |
1334 | */ | |
1335 | static int send_renew_caps(struct ceph_mds_client *mdsc, | |
1336 | struct ceph_mds_session *session) | |
1337 | { | |
1338 | struct ceph_msg *msg; | |
1339 | int state; | |
1340 | ||
1341 | if (time_after_eq(jiffies, session->s_cap_ttl) && | |
1342 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) | |
1343 | pr_info("mds%d caps stale\n", session->s_mds); | |
e4cb4cb8 | 1344 | session->s_renew_requested = jiffies; |
2f2dc053 SW |
1345 | |
1346 | /* do not try to renew caps until a recovering mds has reconnected | |
1347 | * with its clients. */ | |
1348 | state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); | |
1349 | if (state < CEPH_MDS_STATE_RECONNECT) { | |
1350 | dout("send_renew_caps ignoring mds%d (%s)\n", | |
1351 | session->s_mds, ceph_mds_state_name(state)); | |
1352 | return 0; | |
1353 | } | |
1354 | ||
1355 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, | |
1356 | ceph_mds_state_name(state)); | |
2f2dc053 SW |
1357 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
1358 | ++session->s_renew_seq); | |
a79832f2 SW |
1359 | if (!msg) |
1360 | return -ENOMEM; | |
2f2dc053 SW |
1361 | ceph_con_send(&session->s_con, msg); |
1362 | return 0; | |
1363 | } | |
1364 | ||
186e4f7a YZ |
1365 | static int send_flushmsg_ack(struct ceph_mds_client *mdsc, |
1366 | struct ceph_mds_session *session, u64 seq) | |
1367 | { | |
1368 | struct ceph_msg *msg; | |
1369 | ||
1370 | dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", | |
a687ecaf | 1371 | session->s_mds, ceph_session_state_name(session->s_state), seq); |
186e4f7a YZ |
1372 | msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); |
1373 | if (!msg) | |
1374 | return -ENOMEM; | |
1375 | ceph_con_send(&session->s_con, msg); | |
1376 | return 0; | |
1377 | } | |
1378 | ||
1379 | ||
2f2dc053 SW |
1380 | /* |
1381 | * Note new cap ttl, and any transition from stale -> not stale (fresh?). | |
0dc2570f SW |
1382 | * |
1383 | * Called under session->s_mutex | |
2f2dc053 SW |
1384 | */ |
1385 | static void renewed_caps(struct ceph_mds_client *mdsc, | |
1386 | struct ceph_mds_session *session, int is_renew) | |
1387 | { | |
1388 | int was_stale; | |
1389 | int wake = 0; | |
1390 | ||
1391 | spin_lock(&session->s_cap_lock); | |
1ce208a6 | 1392 | was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); |
2f2dc053 SW |
1393 | |
1394 | session->s_cap_ttl = session->s_renew_requested + | |
1395 | mdsc->mdsmap->m_session_timeout*HZ; | |
1396 | ||
1397 | if (was_stale) { | |
1398 | if (time_before(jiffies, session->s_cap_ttl)) { | |
1399 | pr_info("mds%d caps renewed\n", session->s_mds); | |
1400 | wake = 1; | |
1401 | } else { | |
1402 | pr_info("mds%d caps still stale\n", session->s_mds); | |
1403 | } | |
1404 | } | |
1405 | dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", | |
1406 | session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", | |
1407 | time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); | |
1408 | spin_unlock(&session->s_cap_lock); | |
1409 | ||
1410 | if (wake) | |
0dc2570f | 1411 | wake_up_session_caps(session, 0); |
2f2dc053 SW |
1412 | } |
1413 | ||
1414 | /* | |
1415 | * send a session close request | |
1416 | */ | |
1417 | static int request_close_session(struct ceph_mds_client *mdsc, | |
1418 | struct ceph_mds_session *session) | |
1419 | { | |
1420 | struct ceph_msg *msg; | |
2f2dc053 SW |
1421 | |
1422 | dout("request_close_session mds%d state %s seq %lld\n", | |
a687ecaf | 1423 | session->s_mds, ceph_session_state_name(session->s_state), |
2f2dc053 SW |
1424 | session->s_seq); |
1425 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); | |
a79832f2 SW |
1426 | if (!msg) |
1427 | return -ENOMEM; | |
1428 | ceph_con_send(&session->s_con, msg); | |
fcff415c | 1429 | return 1; |
2f2dc053 SW |
1430 | } |
1431 | ||
1432 | /* | |
1433 | * Called with s_mutex held. | |
1434 | */ | |
1435 | static int __close_session(struct ceph_mds_client *mdsc, | |
1436 | struct ceph_mds_session *session) | |
1437 | { | |
1438 | if (session->s_state >= CEPH_MDS_SESSION_CLOSING) | |
1439 | return 0; | |
1440 | session->s_state = CEPH_MDS_SESSION_CLOSING; | |
1441 | return request_close_session(mdsc, session); | |
1442 | } | |
1443 | ||
040d7860 YZ |
1444 | static bool drop_negative_children(struct dentry *dentry) |
1445 | { | |
1446 | struct dentry *child; | |
1447 | bool all_negative = true; | |
1448 | ||
1449 | if (!d_is_dir(dentry)) | |
1450 | goto out; | |
1451 | ||
1452 | spin_lock(&dentry->d_lock); | |
1453 | list_for_each_entry(child, &dentry->d_subdirs, d_child) { | |
1454 | if (d_really_is_positive(child)) { | |
1455 | all_negative = false; | |
1456 | break; | |
1457 | } | |
1458 | } | |
1459 | spin_unlock(&dentry->d_lock); | |
1460 | ||
1461 | if (all_negative) | |
1462 | shrink_dcache_parent(dentry); | |
1463 | out: | |
1464 | return all_negative; | |
1465 | } | |
1466 | ||
2f2dc053 SW |
1467 | /* |
1468 | * Trim old(er) caps. | |
1469 | * | |
1470 | * Because we can't cache an inode without one or more caps, we do | |
1471 | * this indirectly: if a cap is unused, we prune its aliases, at which | |
1472 | * point the inode will hopefully get dropped to. | |
1473 | * | |
1474 | * Yes, this is a bit sloppy. Our only real goal here is to respond to | |
1475 | * memory pressure from the MDS, though, so it needn't be perfect. | |
1476 | */ | |
1477 | static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) | |
1478 | { | |
1479 | struct ceph_mds_session *session = arg; | |
1480 | struct ceph_inode_info *ci = ceph_inode(inode); | |
979abfdd | 1481 | int used, wanted, oissued, mine; |
2f2dc053 SW |
1482 | |
1483 | if (session->s_trim_caps <= 0) | |
1484 | return -1; | |
1485 | ||
be655596 | 1486 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
1487 | mine = cap->issued | cap->implemented; |
1488 | used = __ceph_caps_used(ci); | |
979abfdd | 1489 | wanted = __ceph_caps_file_wanted(ci); |
2f2dc053 SW |
1490 | oissued = __ceph_caps_issued_other(ci, cap); |
1491 | ||
979abfdd | 1492 | dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", |
2f2dc053 | 1493 | inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), |
979abfdd YZ |
1494 | ceph_cap_string(used), ceph_cap_string(wanted)); |
1495 | if (cap == ci->i_auth_cap) { | |
622f3e25 YZ |
1496 | if (ci->i_dirty_caps || ci->i_flushing_caps || |
1497 | !list_empty(&ci->i_cap_snaps)) | |
979abfdd YZ |
1498 | goto out; |
1499 | if ((used | wanted) & CEPH_CAP_ANY_WR) | |
1500 | goto out; | |
89aa5930 YZ |
1501 | /* Note: it's possible that i_filelock_ref becomes non-zero |
1502 | * after dropping auth caps. It doesn't hurt because reply | |
1503 | * of lock mds request will re-add auth caps. */ | |
1504 | if (atomic_read(&ci->i_filelock_ref) > 0) | |
1505 | goto out; | |
979abfdd | 1506 | } |
5e804ac4 YZ |
1507 | /* The inode has cached pages, but it's no longer used. |
1508 | * we can safely drop it */ | |
1509 | if (wanted == 0 && used == CEPH_CAP_FILE_CACHE && | |
1510 | !(oissued & CEPH_CAP_FILE_CACHE)) { | |
1511 | used = 0; | |
1512 | oissued = 0; | |
1513 | } | |
979abfdd | 1514 | if ((used | wanted) & ~oissued & mine) |
2f2dc053 SW |
1515 | goto out; /* we need these caps */ |
1516 | ||
2f2dc053 SW |
1517 | if (oissued) { |
1518 | /* we aren't the only cap.. just remove us */ | |
a096b09a | 1519 | __ceph_remove_cap(cap, true); |
040d7860 | 1520 | session->s_trim_caps--; |
2f2dc053 | 1521 | } else { |
040d7860 | 1522 | struct dentry *dentry; |
5e804ac4 | 1523 | /* try dropping referring dentries */ |
be655596 | 1524 | spin_unlock(&ci->i_ceph_lock); |
040d7860 YZ |
1525 | dentry = d_find_any_alias(inode); |
1526 | if (dentry && drop_negative_children(dentry)) { | |
1527 | int count; | |
1528 | dput(dentry); | |
1529 | d_prune_aliases(inode); | |
1530 | count = atomic_read(&inode->i_count); | |
1531 | if (count == 1) | |
1532 | session->s_trim_caps--; | |
1533 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | |
1534 | inode, cap, count); | |
1535 | } else { | |
1536 | dput(dentry); | |
1537 | } | |
2f2dc053 SW |
1538 | return 0; |
1539 | } | |
1540 | ||
1541 | out: | |
be655596 | 1542 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1543 | return 0; |
1544 | } | |
1545 | ||
1546 | /* | |
1547 | * Trim session cap count down to some max number. | |
1548 | */ | |
1549 | static int trim_caps(struct ceph_mds_client *mdsc, | |
1550 | struct ceph_mds_session *session, | |
1551 | int max_caps) | |
1552 | { | |
1553 | int trim_caps = session->s_nr_caps - max_caps; | |
1554 | ||
1555 | dout("trim_caps mds%d start: %d / %d, trim %d\n", | |
1556 | session->s_mds, session->s_nr_caps, max_caps, trim_caps); | |
1557 | if (trim_caps > 0) { | |
1558 | session->s_trim_caps = trim_caps; | |
1559 | iterate_session_caps(session, trim_caps_cb, session); | |
1560 | dout("trim_caps mds%d done: %d / %d, trimmed %d\n", | |
1561 | session->s_mds, session->s_nr_caps, max_caps, | |
1562 | trim_caps - session->s_trim_caps); | |
5dacf091 | 1563 | session->s_trim_caps = 0; |
2f2dc053 | 1564 | } |
a56371d9 | 1565 | |
1de82206 | 1566 | ceph_flush_cap_releases(mdsc, session); |
2f2dc053 SW |
1567 | return 0; |
1568 | } | |
1569 | ||
8310b089 YZ |
1570 | static int check_caps_flush(struct ceph_mds_client *mdsc, |
1571 | u64 want_flush_tid) | |
1572 | { | |
8310b089 YZ |
1573 | int ret = 1; |
1574 | ||
1575 | spin_lock(&mdsc->cap_dirty_lock); | |
e4500b5e YZ |
1576 | if (!list_empty(&mdsc->cap_flush_list)) { |
1577 | struct ceph_cap_flush *cf = | |
1578 | list_first_entry(&mdsc->cap_flush_list, | |
1579 | struct ceph_cap_flush, g_list); | |
1580 | if (cf->tid <= want_flush_tid) { | |
1581 | dout("check_caps_flush still flushing tid " | |
1582 | "%llu <= %llu\n", cf->tid, want_flush_tid); | |
1583 | ret = 0; | |
1584 | } | |
8310b089 YZ |
1585 | } |
1586 | spin_unlock(&mdsc->cap_dirty_lock); | |
1587 | return ret; | |
d3383a8e YZ |
1588 | } |
1589 | ||
2f2dc053 SW |
1590 | /* |
1591 | * flush all dirty inode data to disk. | |
1592 | * | |
8310b089 | 1593 | * returns true if we've flushed through want_flush_tid |
2f2dc053 | 1594 | */ |
affbc19a | 1595 | static void wait_caps_flush(struct ceph_mds_client *mdsc, |
0e294387 | 1596 | u64 want_flush_tid) |
2f2dc053 | 1597 | { |
0e294387 | 1598 | dout("check_caps_flush want %llu\n", want_flush_tid); |
8310b089 YZ |
1599 | |
1600 | wait_event(mdsc->cap_flushing_wq, | |
1601 | check_caps_flush(mdsc, want_flush_tid)); | |
1602 | ||
1603 | dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); | |
2f2dc053 SW |
1604 | } |
1605 | ||
1606 | /* | |
1607 | * called under s_mutex | |
1608 | */ | |
1de82206 YZ |
1609 | static void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
1610 | struct ceph_mds_session *session) | |
2f2dc053 | 1611 | { |
745a8e3b YZ |
1612 | struct ceph_msg *msg = NULL; |
1613 | struct ceph_mds_cap_release *head; | |
1614 | struct ceph_mds_cap_item *item; | |
92475f05 | 1615 | struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; |
745a8e3b YZ |
1616 | struct ceph_cap *cap; |
1617 | LIST_HEAD(tmp_list); | |
1618 | int num_cap_releases; | |
92475f05 JL |
1619 | __le32 barrier, *cap_barrier; |
1620 | ||
1621 | down_read(&osdc->lock); | |
1622 | barrier = cpu_to_le32(osdc->epoch_barrier); | |
1623 | up_read(&osdc->lock); | |
2f2dc053 | 1624 | |
0f8605f2 | 1625 | spin_lock(&session->s_cap_lock); |
745a8e3b YZ |
1626 | again: |
1627 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1628 | num_cap_releases = session->s_num_cap_releases; | |
1629 | session->s_num_cap_releases = 0; | |
2f2dc053 | 1630 | spin_unlock(&session->s_cap_lock); |
e01a5946 | 1631 | |
745a8e3b YZ |
1632 | while (!list_empty(&tmp_list)) { |
1633 | if (!msg) { | |
1634 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, | |
09cbfeaf | 1635 | PAGE_SIZE, GFP_NOFS, false); |
745a8e3b YZ |
1636 | if (!msg) |
1637 | goto out_err; | |
1638 | head = msg->front.iov_base; | |
1639 | head->num = cpu_to_le32(0); | |
1640 | msg->front.iov_len = sizeof(*head); | |
92475f05 JL |
1641 | |
1642 | msg->hdr.version = cpu_to_le16(2); | |
1643 | msg->hdr.compat_version = cpu_to_le16(1); | |
745a8e3b | 1644 | } |
92475f05 | 1645 | |
745a8e3b YZ |
1646 | cap = list_first_entry(&tmp_list, struct ceph_cap, |
1647 | session_caps); | |
1648 | list_del(&cap->session_caps); | |
1649 | num_cap_releases--; | |
e01a5946 | 1650 | |
00bd8edb | 1651 | head = msg->front.iov_base; |
745a8e3b YZ |
1652 | le32_add_cpu(&head->num, 1); |
1653 | item = msg->front.iov_base + msg->front.iov_len; | |
1654 | item->ino = cpu_to_le64(cap->cap_ino); | |
1655 | item->cap_id = cpu_to_le64(cap->cap_id); | |
1656 | item->migrate_seq = cpu_to_le32(cap->mseq); | |
1657 | item->seq = cpu_to_le32(cap->issue_seq); | |
1658 | msg->front.iov_len += sizeof(*item); | |
1659 | ||
1660 | ceph_put_cap(mdsc, cap); | |
1661 | ||
1662 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { | |
92475f05 JL |
1663 | // Append cap_barrier field |
1664 | cap_barrier = msg->front.iov_base + msg->front.iov_len; | |
1665 | *cap_barrier = barrier; | |
1666 | msg->front.iov_len += sizeof(*cap_barrier); | |
1667 | ||
745a8e3b YZ |
1668 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
1669 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1670 | ceph_con_send(&session->s_con, msg); | |
1671 | msg = NULL; | |
1672 | } | |
00bd8edb | 1673 | } |
e01a5946 | 1674 | |
745a8e3b | 1675 | BUG_ON(num_cap_releases != 0); |
e01a5946 | 1676 | |
745a8e3b YZ |
1677 | spin_lock(&session->s_cap_lock); |
1678 | if (!list_empty(&session->s_cap_releases)) | |
1679 | goto again; | |
1680 | spin_unlock(&session->s_cap_lock); | |
1681 | ||
1682 | if (msg) { | |
92475f05 JL |
1683 | // Append cap_barrier field |
1684 | cap_barrier = msg->front.iov_base + msg->front.iov_len; | |
1685 | *cap_barrier = barrier; | |
1686 | msg->front.iov_len += sizeof(*cap_barrier); | |
1687 | ||
745a8e3b YZ |
1688 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); |
1689 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1690 | ceph_con_send(&session->s_con, msg); | |
e01a5946 | 1691 | } |
745a8e3b YZ |
1692 | return; |
1693 | out_err: | |
1694 | pr_err("send_cap_releases mds%d, failed to allocate message\n", | |
1695 | session->s_mds); | |
1696 | spin_lock(&session->s_cap_lock); | |
1697 | list_splice(&tmp_list, &session->s_cap_releases); | |
1698 | session->s_num_cap_releases += num_cap_releases; | |
1699 | spin_unlock(&session->s_cap_lock); | |
e01a5946 SW |
1700 | } |
1701 | ||
1de82206 YZ |
1702 | static void ceph_cap_release_work(struct work_struct *work) |
1703 | { | |
1704 | struct ceph_mds_session *session = | |
1705 | container_of(work, struct ceph_mds_session, s_cap_release_work); | |
1706 | ||
1707 | mutex_lock(&session->s_mutex); | |
1708 | if (session->s_state == CEPH_MDS_SESSION_OPEN || | |
1709 | session->s_state == CEPH_MDS_SESSION_HUNG) | |
1710 | ceph_send_cap_releases(session->s_mdsc, session); | |
1711 | mutex_unlock(&session->s_mutex); | |
1712 | ceph_put_mds_session(session); | |
1713 | } | |
1714 | ||
1715 | void ceph_flush_cap_releases(struct ceph_mds_client *mdsc, | |
1716 | struct ceph_mds_session *session) | |
1717 | { | |
1718 | if (mdsc->stopping) | |
1719 | return; | |
1720 | ||
1721 | get_session(session); | |
1722 | if (queue_work(mdsc->fsc->cap_wq, | |
1723 | &session->s_cap_release_work)) { | |
1724 | dout("cap release work queued\n"); | |
1725 | } else { | |
1726 | ceph_put_mds_session(session); | |
1727 | dout("failed to queue cap release work\n"); | |
1728 | } | |
1729 | } | |
1730 | ||
1731 | /* | |
1732 | * caller holds session->s_cap_lock | |
1733 | */ | |
1734 | void __ceph_queue_cap_release(struct ceph_mds_session *session, | |
1735 | struct ceph_cap *cap) | |
1736 | { | |
1737 | list_add_tail(&cap->session_caps, &session->s_cap_releases); | |
1738 | session->s_num_cap_releases++; | |
1739 | ||
1740 | if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE)) | |
1741 | ceph_flush_cap_releases(session->s_mdsc, session); | |
1742 | } | |
1743 | ||
2f2dc053 SW |
1744 | /* |
1745 | * requests | |
1746 | */ | |
1747 | ||
54008399 YZ |
1748 | int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, |
1749 | struct inode *dir) | |
1750 | { | |
1751 | struct ceph_inode_info *ci = ceph_inode(dir); | |
1752 | struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; | |
1753 | struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; | |
2a5beea3 | 1754 | size_t size = sizeof(struct ceph_mds_reply_dir_entry); |
54008399 YZ |
1755 | int order, num_entries; |
1756 | ||
1757 | spin_lock(&ci->i_ceph_lock); | |
1758 | num_entries = ci->i_files + ci->i_subdirs; | |
1759 | spin_unlock(&ci->i_ceph_lock); | |
1760 | num_entries = max(num_entries, 1); | |
1761 | num_entries = min(num_entries, opt->max_readdir); | |
1762 | ||
1763 | order = get_order(size * num_entries); | |
1764 | while (order >= 0) { | |
2a5beea3 YZ |
1765 | rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL | |
1766 | __GFP_NOWARN, | |
1767 | order); | |
1768 | if (rinfo->dir_entries) | |
54008399 YZ |
1769 | break; |
1770 | order--; | |
1771 | } | |
2a5beea3 | 1772 | if (!rinfo->dir_entries) |
54008399 YZ |
1773 | return -ENOMEM; |
1774 | ||
1775 | num_entries = (PAGE_SIZE << order) / size; | |
1776 | num_entries = min(num_entries, opt->max_readdir); | |
1777 | ||
1778 | rinfo->dir_buf_size = PAGE_SIZE << order; | |
1779 | req->r_num_caps = num_entries + 1; | |
1780 | req->r_args.readdir.max_entries = cpu_to_le32(num_entries); | |
1781 | req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); | |
1782 | return 0; | |
1783 | } | |
1784 | ||
2f2dc053 SW |
1785 | /* |
1786 | * Create an mds request. | |
1787 | */ | |
1788 | struct ceph_mds_request * | |
1789 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |
1790 | { | |
1791 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); | |
1792 | ||
1793 | if (!req) | |
1794 | return ERR_PTR(-ENOMEM); | |
1795 | ||
b4556396 | 1796 | mutex_init(&req->r_fill_mutex); |
37151668 | 1797 | req->r_mdsc = mdsc; |
2f2dc053 SW |
1798 | req->r_started = jiffies; |
1799 | req->r_resend_mds = -1; | |
1800 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | |
68cd5b4b | 1801 | INIT_LIST_HEAD(&req->r_unsafe_target_item); |
2f2dc053 | 1802 | req->r_fmode = -1; |
153c8e6b | 1803 | kref_init(&req->r_kref); |
fcd00b68 | 1804 | RB_CLEAR_NODE(&req->r_node); |
2f2dc053 SW |
1805 | INIT_LIST_HEAD(&req->r_wait); |
1806 | init_completion(&req->r_completion); | |
1807 | init_completion(&req->r_safe_completion); | |
1808 | INIT_LIST_HEAD(&req->r_unsafe_item); | |
1809 | ||
56199016 | 1810 | req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran); |
b8e69066 | 1811 | |
2f2dc053 SW |
1812 | req->r_op = op; |
1813 | req->r_direct_mode = mode; | |
1814 | return req; | |
1815 | } | |
1816 | ||
1817 | /* | |
44ca18f2 | 1818 | * return oldest (lowest) request, tid in request tree, 0 if none. |
2f2dc053 SW |
1819 | * |
1820 | * called under mdsc->mutex. | |
1821 | */ | |
44ca18f2 SW |
1822 | static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) |
1823 | { | |
1824 | if (RB_EMPTY_ROOT(&mdsc->request_tree)) | |
1825 | return NULL; | |
1826 | return rb_entry(rb_first(&mdsc->request_tree), | |
1827 | struct ceph_mds_request, r_node); | |
1828 | } | |
1829 | ||
e8a7b8b1 | 1830 | static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) |
2f2dc053 | 1831 | { |
e8a7b8b1 | 1832 | return mdsc->oldest_tid; |
2f2dc053 SW |
1833 | } |
1834 | ||
1835 | /* | |
1836 | * Build a dentry's path. Allocate on heap; caller must kfree. Based | |
1837 | * on build_path_from_dentry in fs/cifs/dir.c. | |
1838 | * | |
1839 | * If @stop_on_nosnap, generate path relative to the first non-snapped | |
1840 | * inode. | |
1841 | * | |
1842 | * Encode hidden .snap dirs as a double /, i.e. | |
1843 | * foo/.snap/bar -> foo//bar | |
1844 | */ | |
1845 | char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, | |
1846 | int stop_on_nosnap) | |
1847 | { | |
1848 | struct dentry *temp; | |
1849 | char *path; | |
1850 | int len, pos; | |
1b71fe2e | 1851 | unsigned seq; |
2f2dc053 | 1852 | |
d37b1d99 | 1853 | if (!dentry) |
2f2dc053 SW |
1854 | return ERR_PTR(-EINVAL); |
1855 | ||
1856 | retry: | |
1857 | len = 0; | |
1b71fe2e AV |
1858 | seq = read_seqbegin(&rename_lock); |
1859 | rcu_read_lock(); | |
2f2dc053 | 1860 | for (temp = dentry; !IS_ROOT(temp);) { |
2b0143b5 | 1861 | struct inode *inode = d_inode(temp); |
2f2dc053 SW |
1862 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) |
1863 | len++; /* slash only */ | |
1864 | else if (stop_on_nosnap && inode && | |
1865 | ceph_snap(inode) == CEPH_NOSNAP) | |
1866 | break; | |
1867 | else | |
1868 | len += 1 + temp->d_name.len; | |
1869 | temp = temp->d_parent; | |
2f2dc053 | 1870 | } |
1b71fe2e | 1871 | rcu_read_unlock(); |
2f2dc053 SW |
1872 | if (len) |
1873 | len--; /* no leading '/' */ | |
1874 | ||
1875 | path = kmalloc(len+1, GFP_NOFS); | |
d37b1d99 | 1876 | if (!path) |
2f2dc053 SW |
1877 | return ERR_PTR(-ENOMEM); |
1878 | pos = len; | |
1879 | path[pos] = 0; /* trailing null */ | |
1b71fe2e | 1880 | rcu_read_lock(); |
2f2dc053 | 1881 | for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { |
1b71fe2e | 1882 | struct inode *inode; |
2f2dc053 | 1883 | |
1b71fe2e | 1884 | spin_lock(&temp->d_lock); |
2b0143b5 | 1885 | inode = d_inode(temp); |
2f2dc053 | 1886 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
104648ad | 1887 | dout("build_path path+%d: %p SNAPDIR\n", |
2f2dc053 SW |
1888 | pos, temp); |
1889 | } else if (stop_on_nosnap && inode && | |
1890 | ceph_snap(inode) == CEPH_NOSNAP) { | |
9d5a09e6 | 1891 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1892 | break; |
1893 | } else { | |
1894 | pos -= temp->d_name.len; | |
1b71fe2e AV |
1895 | if (pos < 0) { |
1896 | spin_unlock(&temp->d_lock); | |
2f2dc053 | 1897 | break; |
1b71fe2e | 1898 | } |
2f2dc053 SW |
1899 | strncpy(path + pos, temp->d_name.name, |
1900 | temp->d_name.len); | |
2f2dc053 | 1901 | } |
1b71fe2e | 1902 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1903 | if (pos) |
1904 | path[--pos] = '/'; | |
1905 | temp = temp->d_parent; | |
2f2dc053 | 1906 | } |
1b71fe2e AV |
1907 | rcu_read_unlock(); |
1908 | if (pos != 0 || read_seqretry(&rename_lock, seq)) { | |
104648ad | 1909 | pr_err("build_path did not end path lookup where " |
2f2dc053 SW |
1910 | "expected, namelen is %d, pos is %d\n", len, pos); |
1911 | /* presumably this is only possible if racing with a | |
1912 | rename of one of the parent directories (we can not | |
1913 | lock the dentries above us to prevent this, but | |
1914 | retrying should be harmless) */ | |
1915 | kfree(path); | |
1916 | goto retry; | |
1917 | } | |
1918 | ||
2b0143b5 | 1919 | *base = ceph_ino(d_inode(temp)); |
2f2dc053 | 1920 | *plen = len; |
104648ad | 1921 | dout("build_path on %p %d built %llx '%.*s'\n", |
84d08fa8 | 1922 | dentry, d_count(dentry), *base, len, path); |
2f2dc053 SW |
1923 | return path; |
1924 | } | |
1925 | ||
fd36a717 | 1926 | static int build_dentry_path(struct dentry *dentry, struct inode *dir, |
2f2dc053 SW |
1927 | const char **ppath, int *ppathlen, u64 *pino, |
1928 | int *pfreepath) | |
1929 | { | |
1930 | char *path; | |
1931 | ||
c6b0b656 | 1932 | rcu_read_lock(); |
fd36a717 JL |
1933 | if (!dir) |
1934 | dir = d_inode_rcu(dentry->d_parent); | |
c6b0b656 JL |
1935 | if (dir && ceph_snap(dir) == CEPH_NOSNAP) { |
1936 | *pino = ceph_ino(dir); | |
1937 | rcu_read_unlock(); | |
2f2dc053 SW |
1938 | *ppath = dentry->d_name.name; |
1939 | *ppathlen = dentry->d_name.len; | |
1940 | return 0; | |
1941 | } | |
c6b0b656 | 1942 | rcu_read_unlock(); |
2f2dc053 SW |
1943 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); |
1944 | if (IS_ERR(path)) | |
1945 | return PTR_ERR(path); | |
1946 | *ppath = path; | |
1947 | *pfreepath = 1; | |
1948 | return 0; | |
1949 | } | |
1950 | ||
1951 | static int build_inode_path(struct inode *inode, | |
1952 | const char **ppath, int *ppathlen, u64 *pino, | |
1953 | int *pfreepath) | |
1954 | { | |
1955 | struct dentry *dentry; | |
1956 | char *path; | |
1957 | ||
1958 | if (ceph_snap(inode) == CEPH_NOSNAP) { | |
1959 | *pino = ceph_ino(inode); | |
1960 | *ppathlen = 0; | |
1961 | return 0; | |
1962 | } | |
1963 | dentry = d_find_alias(inode); | |
1964 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1965 | dput(dentry); | |
1966 | if (IS_ERR(path)) | |
1967 | return PTR_ERR(path); | |
1968 | *ppath = path; | |
1969 | *pfreepath = 1; | |
1970 | return 0; | |
1971 | } | |
1972 | ||
1973 | /* | |
1974 | * request arguments may be specified via an inode *, a dentry *, or | |
1975 | * an explicit ino+path. | |
1976 | */ | |
1977 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | |
fd36a717 JL |
1978 | struct inode *rdiri, const char *rpath, |
1979 | u64 rino, const char **ppath, int *pathlen, | |
2f2dc053 SW |
1980 | u64 *ino, int *freepath) |
1981 | { | |
1982 | int r = 0; | |
1983 | ||
1984 | if (rinode) { | |
1985 | r = build_inode_path(rinode, ppath, pathlen, ino, freepath); | |
1986 | dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), | |
1987 | ceph_snap(rinode)); | |
1988 | } else if (rdentry) { | |
fd36a717 JL |
1989 | r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, |
1990 | freepath); | |
2f2dc053 SW |
1991 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, |
1992 | *ppath); | |
795858db | 1993 | } else if (rpath || rino) { |
2f2dc053 SW |
1994 | *ino = rino; |
1995 | *ppath = rpath; | |
b000056a | 1996 | *pathlen = rpath ? strlen(rpath) : 0; |
2f2dc053 SW |
1997 | dout(" path %.*s\n", *pathlen, rpath); |
1998 | } | |
1999 | ||
2000 | return r; | |
2001 | } | |
2002 | ||
2003 | /* | |
2004 | * called under mdsc->mutex | |
2005 | */ | |
2006 | static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |
2007 | struct ceph_mds_request *req, | |
6e6f0923 | 2008 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
2009 | { |
2010 | struct ceph_msg *msg; | |
2011 | struct ceph_mds_request_head *head; | |
2012 | const char *path1 = NULL; | |
2013 | const char *path2 = NULL; | |
2014 | u64 ino1 = 0, ino2 = 0; | |
2015 | int pathlen1 = 0, pathlen2 = 0; | |
2016 | int freepath1 = 0, freepath2 = 0; | |
2017 | int len; | |
2018 | u16 releases; | |
2019 | void *p, *end; | |
2020 | int ret; | |
2021 | ||
2022 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | |
3dd69aab | 2023 | req->r_parent, req->r_path1, req->r_ino1.ino, |
2f2dc053 SW |
2024 | &path1, &pathlen1, &ino1, &freepath1); |
2025 | if (ret < 0) { | |
2026 | msg = ERR_PTR(ret); | |
2027 | goto out; | |
2028 | } | |
2029 | ||
2030 | ret = set_request_path_attr(NULL, req->r_old_dentry, | |
fd36a717 | 2031 | req->r_old_dentry_dir, |
2f2dc053 SW |
2032 | req->r_path2, req->r_ino2.ino, |
2033 | &path2, &pathlen2, &ino2, &freepath2); | |
2034 | if (ret < 0) { | |
2035 | msg = ERR_PTR(ret); | |
2036 | goto out_free1; | |
2037 | } | |
2038 | ||
2039 | len = sizeof(*head) + | |
b8e69066 | 2040 | pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + |
777d738a | 2041 | sizeof(struct ceph_timespec); |
2f2dc053 SW |
2042 | |
2043 | /* calculate (max) length for cap releases */ | |
2044 | len += sizeof(struct ceph_mds_request_release) * | |
2045 | (!!req->r_inode_drop + !!req->r_dentry_drop + | |
2046 | !!req->r_old_inode_drop + !!req->r_old_dentry_drop); | |
2047 | if (req->r_dentry_drop) | |
2048 | len += req->r_dentry->d_name.len; | |
2049 | if (req->r_old_dentry_drop) | |
2050 | len += req->r_old_dentry->d_name.len; | |
2051 | ||
b61c2763 | 2052 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); |
a79832f2 SW |
2053 | if (!msg) { |
2054 | msg = ERR_PTR(-ENOMEM); | |
2f2dc053 | 2055 | goto out_free2; |
a79832f2 | 2056 | } |
2f2dc053 | 2057 | |
7cfa0313 | 2058 | msg->hdr.version = cpu_to_le16(2); |
6df058c0 SW |
2059 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
2060 | ||
2f2dc053 SW |
2061 | head = msg->front.iov_base; |
2062 | p = msg->front.iov_base + sizeof(*head); | |
2063 | end = msg->front.iov_base + msg->front.iov_len; | |
2064 | ||
2065 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); | |
2066 | head->op = cpu_to_le32(req->r_op); | |
ff3d0046 EB |
2067 | head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); |
2068 | head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); | |
2f2dc053 SW |
2069 | head->args = req->r_args; |
2070 | ||
2071 | ceph_encode_filepath(&p, end, ino1, path1); | |
2072 | ceph_encode_filepath(&p, end, ino2, path2); | |
2073 | ||
e979cf50 SW |
2074 | /* make note of release offset, in case we need to replay */ |
2075 | req->r_request_release_offset = p - msg->front.iov_base; | |
2076 | ||
2f2dc053 SW |
2077 | /* cap releases */ |
2078 | releases = 0; | |
2079 | if (req->r_inode_drop) | |
2080 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 2081 | req->r_inode ? req->r_inode : d_inode(req->r_dentry), |
2f2dc053 SW |
2082 | mds, req->r_inode_drop, req->r_inode_unless, 0); |
2083 | if (req->r_dentry_drop) | |
2084 | releases += ceph_encode_dentry_release(&p, req->r_dentry, | |
3dd69aab | 2085 | req->r_parent, mds, req->r_dentry_drop, |
ca6c8ae0 | 2086 | req->r_dentry_unless); |
2f2dc053 SW |
2087 | if (req->r_old_dentry_drop) |
2088 | releases += ceph_encode_dentry_release(&p, req->r_old_dentry, | |
ca6c8ae0 JL |
2089 | req->r_old_dentry_dir, mds, |
2090 | req->r_old_dentry_drop, | |
2091 | req->r_old_dentry_unless); | |
2f2dc053 SW |
2092 | if (req->r_old_inode_drop) |
2093 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 2094 | d_inode(req->r_old_dentry), |
2f2dc053 | 2095 | mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); |
6e6f0923 YZ |
2096 | |
2097 | if (drop_cap_releases) { | |
2098 | releases = 0; | |
2099 | p = msg->front.iov_base + req->r_request_release_offset; | |
2100 | } | |
2101 | ||
2f2dc053 SW |
2102 | head->num_releases = cpu_to_le16(releases); |
2103 | ||
b8e69066 | 2104 | /* time stamp */ |
1f041a89 YZ |
2105 | { |
2106 | struct ceph_timespec ts; | |
2107 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2108 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2109 | } | |
b8e69066 | 2110 | |
2f2dc053 SW |
2111 | BUG_ON(p > end); |
2112 | msg->front.iov_len = p - msg->front.iov_base; | |
2113 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
2114 | ||
25e6bae3 YZ |
2115 | if (req->r_pagelist) { |
2116 | struct ceph_pagelist *pagelist = req->r_pagelist; | |
0e1a5ee6 | 2117 | refcount_inc(&pagelist->refcnt); |
25e6bae3 YZ |
2118 | ceph_msg_data_add_pagelist(msg, pagelist); |
2119 | msg->hdr.data_len = cpu_to_le32(pagelist->length); | |
2120 | } else { | |
2121 | msg->hdr.data_len = 0; | |
ebf18f47 | 2122 | } |
02afca6c | 2123 | |
2f2dc053 SW |
2124 | msg->hdr.data_off = cpu_to_le16(0); |
2125 | ||
2126 | out_free2: | |
2127 | if (freepath2) | |
2128 | kfree((char *)path2); | |
2129 | out_free1: | |
2130 | if (freepath1) | |
2131 | kfree((char *)path1); | |
2132 | out: | |
2133 | return msg; | |
2134 | } | |
2135 | ||
2136 | /* | |
2137 | * called under mdsc->mutex if error, under no mutex if | |
2138 | * success. | |
2139 | */ | |
2140 | static void complete_request(struct ceph_mds_client *mdsc, | |
2141 | struct ceph_mds_request *req) | |
2142 | { | |
2143 | if (req->r_callback) | |
2144 | req->r_callback(mdsc, req); | |
2145 | else | |
03066f23 | 2146 | complete_all(&req->r_completion); |
2f2dc053 SW |
2147 | } |
2148 | ||
2149 | /* | |
2150 | * called under mdsc->mutex | |
2151 | */ | |
2152 | static int __prepare_send_request(struct ceph_mds_client *mdsc, | |
2153 | struct ceph_mds_request *req, | |
6e6f0923 | 2154 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
2155 | { |
2156 | struct ceph_mds_request_head *rhead; | |
2157 | struct ceph_msg *msg; | |
2158 | int flags = 0; | |
2159 | ||
2f2dc053 | 2160 | req->r_attempts++; |
e55b71f8 GF |
2161 | if (req->r_inode) { |
2162 | struct ceph_cap *cap = | |
2163 | ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); | |
2164 | ||
2165 | if (cap) | |
2166 | req->r_sent_on_mseq = cap->mseq; | |
2167 | else | |
2168 | req->r_sent_on_mseq = -1; | |
2169 | } | |
2f2dc053 SW |
2170 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
2171 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | |
2172 | ||
bc2de10d | 2173 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
c5c9a0bf | 2174 | void *p; |
01a92f17 SW |
2175 | /* |
2176 | * Replay. Do not regenerate message (and rebuild | |
2177 | * paths, etc.); just use the original message. | |
2178 | * Rebuilding paths will break for renames because | |
2179 | * d_move mangles the src name. | |
2180 | */ | |
2181 | msg = req->r_request; | |
2182 | rhead = msg->front.iov_base; | |
2183 | ||
2184 | flags = le32_to_cpu(rhead->flags); | |
2185 | flags |= CEPH_MDS_FLAG_REPLAY; | |
2186 | rhead->flags = cpu_to_le32(flags); | |
2187 | ||
2188 | if (req->r_target_inode) | |
2189 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | |
2190 | ||
2191 | rhead->num_retry = req->r_attempts - 1; | |
e979cf50 SW |
2192 | |
2193 | /* remove cap/dentry releases from message */ | |
2194 | rhead->num_releases = 0; | |
c5c9a0bf YZ |
2195 | |
2196 | /* time stamp */ | |
2197 | p = msg->front.iov_base + req->r_request_release_offset; | |
1f041a89 YZ |
2198 | { |
2199 | struct ceph_timespec ts; | |
2200 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2201 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2202 | } | |
c5c9a0bf YZ |
2203 | |
2204 | msg->front.iov_len = p - msg->front.iov_base; | |
2205 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
01a92f17 SW |
2206 | return 0; |
2207 | } | |
2208 | ||
2f2dc053 SW |
2209 | if (req->r_request) { |
2210 | ceph_msg_put(req->r_request); | |
2211 | req->r_request = NULL; | |
2212 | } | |
6e6f0923 | 2213 | msg = create_request_message(mdsc, req, mds, drop_cap_releases); |
2f2dc053 | 2214 | if (IS_ERR(msg)) { |
e1518c7c | 2215 | req->r_err = PTR_ERR(msg); |
a79832f2 | 2216 | return PTR_ERR(msg); |
2f2dc053 SW |
2217 | } |
2218 | req->r_request = msg; | |
2219 | ||
2220 | rhead = msg->front.iov_base; | |
2f2dc053 | 2221 | rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); |
bc2de10d | 2222 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
2f2dc053 | 2223 | flags |= CEPH_MDS_FLAG_REPLAY; |
3dd69aab | 2224 | if (req->r_parent) |
2f2dc053 SW |
2225 | flags |= CEPH_MDS_FLAG_WANT_DENTRY; |
2226 | rhead->flags = cpu_to_le32(flags); | |
2227 | rhead->num_fwd = req->r_num_fwd; | |
2228 | rhead->num_retry = req->r_attempts - 1; | |
01a92f17 | 2229 | rhead->ino = 0; |
2f2dc053 | 2230 | |
3dd69aab | 2231 | dout(" r_parent = %p\n", req->r_parent); |
2f2dc053 SW |
2232 | return 0; |
2233 | } | |
2234 | ||
2235 | /* | |
2236 | * send request, or put it on the appropriate wait list. | |
2237 | */ | |
2238 | static int __do_request(struct ceph_mds_client *mdsc, | |
2239 | struct ceph_mds_request *req) | |
2240 | { | |
2241 | struct ceph_mds_session *session = NULL; | |
2242 | int mds = -1; | |
48fec5d0 | 2243 | int err = 0; |
2f2dc053 | 2244 | |
bc2de10d JL |
2245 | if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
2246 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) | |
eb1b8af3 | 2247 | __unregister_request(mdsc, req); |
2f2dc053 | 2248 | goto out; |
eb1b8af3 | 2249 | } |
2f2dc053 SW |
2250 | |
2251 | if (req->r_timeout && | |
2252 | time_after_eq(jiffies, req->r_started + req->r_timeout)) { | |
2253 | dout("do_request timed out\n"); | |
2254 | err = -EIO; | |
2255 | goto finish; | |
2256 | } | |
52953d55 | 2257 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { |
48fec5d0 YZ |
2258 | dout("do_request forced umount\n"); |
2259 | err = -EIO; | |
2260 | goto finish; | |
2261 | } | |
52953d55 | 2262 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { |
e9e427f0 YZ |
2263 | if (mdsc->mdsmap_err) { |
2264 | err = mdsc->mdsmap_err; | |
2265 | dout("do_request mdsmap err %d\n", err); | |
2266 | goto finish; | |
2267 | } | |
cc8e8342 YZ |
2268 | if (mdsc->mdsmap->m_epoch == 0) { |
2269 | dout("do_request no mdsmap, waiting for map\n"); | |
2270 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2271 | goto finish; | |
2272 | } | |
e9e427f0 YZ |
2273 | if (!(mdsc->fsc->mount_options->flags & |
2274 | CEPH_MOUNT_OPT_MOUNTWAIT) && | |
2275 | !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { | |
2276 | err = -ENOENT; | |
2277 | pr_info("probably no mds server is up\n"); | |
2278 | goto finish; | |
2279 | } | |
2280 | } | |
2f2dc053 | 2281 | |
dc69e2e9 SW |
2282 | put_request_session(req); |
2283 | ||
2f2dc053 SW |
2284 | mds = __choose_mds(mdsc, req); |
2285 | if (mds < 0 || | |
2286 | ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { | |
2287 | dout("do_request no mds or not active, waiting for map\n"); | |
2288 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2289 | goto out; | |
2290 | } | |
2291 | ||
2292 | /* get, open session */ | |
2293 | session = __ceph_lookup_mds_session(mdsc, mds); | |
9c423956 | 2294 | if (!session) { |
2f2dc053 | 2295 | session = register_session(mdsc, mds); |
9c423956 SW |
2296 | if (IS_ERR(session)) { |
2297 | err = PTR_ERR(session); | |
2298 | goto finish; | |
2299 | } | |
2300 | } | |
dc69e2e9 SW |
2301 | req->r_session = get_session(session); |
2302 | ||
2f2dc053 | 2303 | dout("do_request mds%d session %p state %s\n", mds, session, |
a687ecaf | 2304 | ceph_session_state_name(session->s_state)); |
2f2dc053 SW |
2305 | if (session->s_state != CEPH_MDS_SESSION_OPEN && |
2306 | session->s_state != CEPH_MDS_SESSION_HUNG) { | |
fcff415c YZ |
2307 | if (session->s_state == CEPH_MDS_SESSION_REJECTED) { |
2308 | err = -EACCES; | |
2309 | goto out_session; | |
2310 | } | |
2f2dc053 SW |
2311 | if (session->s_state == CEPH_MDS_SESSION_NEW || |
2312 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
2313 | __open_session(mdsc, session); | |
2314 | list_add(&req->r_wait, &session->s_waiting); | |
2315 | goto out_session; | |
2316 | } | |
2317 | ||
2318 | /* send request */ | |
2f2dc053 SW |
2319 | req->r_resend_mds = -1; /* forget any previous mds hint */ |
2320 | ||
2321 | if (req->r_request_started == 0) /* note request start time */ | |
2322 | req->r_request_started = jiffies; | |
2323 | ||
6e6f0923 | 2324 | err = __prepare_send_request(mdsc, req, mds, false); |
2f2dc053 SW |
2325 | if (!err) { |
2326 | ceph_msg_get(req->r_request); | |
2327 | ceph_con_send(&session->s_con, req->r_request); | |
2328 | } | |
2329 | ||
2330 | out_session: | |
2331 | ceph_put_mds_session(session); | |
48fec5d0 YZ |
2332 | finish: |
2333 | if (err) { | |
2334 | dout("__do_request early error %d\n", err); | |
2335 | req->r_err = err; | |
2336 | complete_request(mdsc, req); | |
2337 | __unregister_request(mdsc, req); | |
2338 | } | |
2f2dc053 SW |
2339 | out: |
2340 | return err; | |
2f2dc053 SW |
2341 | } |
2342 | ||
2343 | /* | |
2344 | * called under mdsc->mutex | |
2345 | */ | |
2346 | static void __wake_requests(struct ceph_mds_client *mdsc, | |
2347 | struct list_head *head) | |
2348 | { | |
ed75ec2c YZ |
2349 | struct ceph_mds_request *req; |
2350 | LIST_HEAD(tmp_list); | |
2351 | ||
2352 | list_splice_init(head, &tmp_list); | |
2f2dc053 | 2353 | |
ed75ec2c YZ |
2354 | while (!list_empty(&tmp_list)) { |
2355 | req = list_entry(tmp_list.next, | |
2356 | struct ceph_mds_request, r_wait); | |
2f2dc053 | 2357 | list_del_init(&req->r_wait); |
7971bd92 | 2358 | dout(" wake request %p tid %llu\n", req, req->r_tid); |
2f2dc053 SW |
2359 | __do_request(mdsc, req); |
2360 | } | |
2361 | } | |
2362 | ||
2363 | /* | |
2364 | * Wake up threads with requests pending for @mds, so that they can | |
29790f26 | 2365 | * resubmit their requests to a possibly different mds. |
2f2dc053 | 2366 | */ |
29790f26 | 2367 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2f2dc053 | 2368 | { |
44ca18f2 | 2369 | struct ceph_mds_request *req; |
282c1052 | 2370 | struct rb_node *p = rb_first(&mdsc->request_tree); |
2f2dc053 SW |
2371 | |
2372 | dout("kick_requests mds%d\n", mds); | |
282c1052 | 2373 | while (p) { |
44ca18f2 | 2374 | req = rb_entry(p, struct ceph_mds_request, r_node); |
282c1052 | 2375 | p = rb_next(p); |
bc2de10d | 2376 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
44ca18f2 | 2377 | continue; |
3de22be6 YZ |
2378 | if (req->r_attempts > 0) |
2379 | continue; /* only new requests */ | |
44ca18f2 SW |
2380 | if (req->r_session && |
2381 | req->r_session->s_mds == mds) { | |
2382 | dout(" kicking tid %llu\n", req->r_tid); | |
03974e81 | 2383 | list_del_init(&req->r_wait); |
44ca18f2 | 2384 | __do_request(mdsc, req); |
2f2dc053 SW |
2385 | } |
2386 | } | |
2387 | } | |
2388 | ||
2389 | void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, | |
2390 | struct ceph_mds_request *req) | |
2391 | { | |
2392 | dout("submit_request on %p\n", req); | |
2393 | mutex_lock(&mdsc->mutex); | |
2394 | __register_request(mdsc, req, NULL); | |
2395 | __do_request(mdsc, req); | |
2396 | mutex_unlock(&mdsc->mutex); | |
2397 | } | |
2398 | ||
2399 | /* | |
2400 | * Synchrously perform an mds request. Take care of all of the | |
2401 | * session setup, forwarding, retry details. | |
2402 | */ | |
2403 | int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, | |
2404 | struct inode *dir, | |
2405 | struct ceph_mds_request *req) | |
2406 | { | |
2407 | int err; | |
2408 | ||
2409 | dout("do_request on %p\n", req); | |
2410 | ||
3dd69aab | 2411 | /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ |
2f2dc053 SW |
2412 | if (req->r_inode) |
2413 | ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); | |
3dd69aab JL |
2414 | if (req->r_parent) |
2415 | ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); | |
844d87c3 | 2416 | if (req->r_old_dentry_dir) |
41b02e1f SW |
2417 | ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), |
2418 | CEPH_CAP_PIN); | |
2f2dc053 SW |
2419 | |
2420 | /* issue */ | |
2421 | mutex_lock(&mdsc->mutex); | |
2422 | __register_request(mdsc, req, dir); | |
2423 | __do_request(mdsc, req); | |
2424 | ||
e1518c7c SW |
2425 | if (req->r_err) { |
2426 | err = req->r_err; | |
e1518c7c | 2427 | goto out; |
2f2dc053 SW |
2428 | } |
2429 | ||
e1518c7c SW |
2430 | /* wait */ |
2431 | mutex_unlock(&mdsc->mutex); | |
2432 | dout("do_request waiting\n"); | |
5be73034 | 2433 | if (!req->r_timeout && req->r_wait_for_completion) { |
9280be24 | 2434 | err = req->r_wait_for_completion(mdsc, req); |
e1518c7c | 2435 | } else { |
5be73034 ID |
2436 | long timeleft = wait_for_completion_killable_timeout( |
2437 | &req->r_completion, | |
2438 | ceph_timeout_jiffies(req->r_timeout)); | |
2439 | if (timeleft > 0) | |
2440 | err = 0; | |
2441 | else if (!timeleft) | |
2442 | err = -EIO; /* timed out */ | |
2443 | else | |
2444 | err = timeleft; /* killed */ | |
e1518c7c SW |
2445 | } |
2446 | dout("do_request waited, got %d\n", err); | |
2447 | mutex_lock(&mdsc->mutex); | |
5b1daecd | 2448 | |
e1518c7c | 2449 | /* only abort if we didn't race with a real reply */ |
bc2de10d | 2450 | if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { |
e1518c7c SW |
2451 | err = le32_to_cpu(req->r_reply_info.head->result); |
2452 | } else if (err < 0) { | |
2453 | dout("aborted request %lld with %d\n", req->r_tid, err); | |
b4556396 SW |
2454 | |
2455 | /* | |
2456 | * ensure we aren't running concurrently with | |
2457 | * ceph_fill_trace or ceph_readdir_prepopulate, which | |
2458 | * rely on locks (dir mutex) held by our caller. | |
2459 | */ | |
2460 | mutex_lock(&req->r_fill_mutex); | |
e1518c7c | 2461 | req->r_err = err; |
bc2de10d | 2462 | set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); |
b4556396 | 2463 | mutex_unlock(&req->r_fill_mutex); |
5b1daecd | 2464 | |
3dd69aab | 2465 | if (req->r_parent && |
167c9e35 SW |
2466 | (req->r_op & CEPH_MDS_OP_WRITE)) |
2467 | ceph_invalidate_dir_request(req); | |
2f2dc053 | 2468 | } else { |
e1518c7c | 2469 | err = req->r_err; |
2f2dc053 | 2470 | } |
2f2dc053 | 2471 | |
e1518c7c SW |
2472 | out: |
2473 | mutex_unlock(&mdsc->mutex); | |
2f2dc053 SW |
2474 | dout("do_request %p done, result %d\n", req, err); |
2475 | return err; | |
2476 | } | |
2477 | ||
167c9e35 | 2478 | /* |
2f276c51 | 2479 | * Invalidate dir's completeness, dentry lease state on an aborted MDS |
167c9e35 SW |
2480 | * namespace request. |
2481 | */ | |
2482 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) | |
2483 | { | |
3dd69aab | 2484 | struct inode *inode = req->r_parent; |
167c9e35 | 2485 | |
2f276c51 | 2486 | dout("invalidate_dir_request %p (complete, lease(s))\n", inode); |
167c9e35 | 2487 | |
2f276c51 | 2488 | ceph_dir_clear_complete(inode); |
167c9e35 SW |
2489 | if (req->r_dentry) |
2490 | ceph_invalidate_dentry_lease(req->r_dentry); | |
2491 | if (req->r_old_dentry) | |
2492 | ceph_invalidate_dentry_lease(req->r_old_dentry); | |
2493 | } | |
2494 | ||
2f2dc053 SW |
2495 | /* |
2496 | * Handle mds reply. | |
2497 | * | |
2498 | * We take the session mutex and parse and process the reply immediately. | |
2499 | * This preserves the logical ordering of replies, capabilities, etc., sent | |
2500 | * by the MDS as they are applied to our local cache. | |
2501 | */ | |
2502 | static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |
2503 | { | |
2504 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2505 | struct ceph_mds_request *req; | |
2506 | struct ceph_mds_reply_head *head = msg->front.iov_base; | |
2507 | struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ | |
982d6011 | 2508 | struct ceph_snap_realm *realm; |
2f2dc053 SW |
2509 | u64 tid; |
2510 | int err, result; | |
2600d2dd | 2511 | int mds = session->s_mds; |
2f2dc053 | 2512 | |
2f2dc053 SW |
2513 | if (msg->front.iov_len < sizeof(*head)) { |
2514 | pr_err("mdsc_handle_reply got corrupt (short) reply\n"); | |
9ec7cab1 | 2515 | ceph_msg_dump(msg); |
2f2dc053 SW |
2516 | return; |
2517 | } | |
2518 | ||
2519 | /* get request, session */ | |
6df058c0 | 2520 | tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 | 2521 | mutex_lock(&mdsc->mutex); |
fcd00b68 | 2522 | req = lookup_get_request(mdsc, tid); |
2f2dc053 SW |
2523 | if (!req) { |
2524 | dout("handle_reply on unknown tid %llu\n", tid); | |
2525 | mutex_unlock(&mdsc->mutex); | |
2526 | return; | |
2527 | } | |
2528 | dout("handle_reply %p\n", req); | |
2f2dc053 SW |
2529 | |
2530 | /* correct session? */ | |
d96d6049 | 2531 | if (req->r_session != session) { |
2f2dc053 SW |
2532 | pr_err("mdsc_handle_reply got %llu on session mds%d" |
2533 | " not mds%d\n", tid, session->s_mds, | |
2534 | req->r_session ? req->r_session->s_mds : -1); | |
2535 | mutex_unlock(&mdsc->mutex); | |
2536 | goto out; | |
2537 | } | |
2538 | ||
2539 | /* dup? */ | |
bc2de10d JL |
2540 | if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || |
2541 | (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { | |
f3ae1b97 | 2542 | pr_warn("got a dup %s reply on %llu from mds%d\n", |
2f2dc053 SW |
2543 | head->safe ? "safe" : "unsafe", tid, mds); |
2544 | mutex_unlock(&mdsc->mutex); | |
2545 | goto out; | |
2546 | } | |
bc2de10d | 2547 | if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { |
f3ae1b97 | 2548 | pr_warn("got unsafe after safe on %llu from mds%d\n", |
85792d0d SW |
2549 | tid, mds); |
2550 | mutex_unlock(&mdsc->mutex); | |
2551 | goto out; | |
2552 | } | |
2f2dc053 SW |
2553 | |
2554 | result = le32_to_cpu(head->result); | |
2555 | ||
2556 | /* | |
e55b71f8 GF |
2557 | * Handle an ESTALE |
2558 | * if we're not talking to the authority, send to them | |
2559 | * if the authority has changed while we weren't looking, | |
2560 | * send to new authority | |
2561 | * Otherwise we just have to return an ESTALE | |
2f2dc053 SW |
2562 | */ |
2563 | if (result == -ESTALE) { | |
e55b71f8 | 2564 | dout("got ESTALE on request %llu", req->r_tid); |
51da8e8c | 2565 | req->r_resend_mds = -1; |
ca18bede | 2566 | if (req->r_direct_mode != USE_AUTH_MDS) { |
e55b71f8 GF |
2567 | dout("not using auth, setting for that now"); |
2568 | req->r_direct_mode = USE_AUTH_MDS; | |
2f2dc053 SW |
2569 | __do_request(mdsc, req); |
2570 | mutex_unlock(&mdsc->mutex); | |
2571 | goto out; | |
e55b71f8 | 2572 | } else { |
ca18bede YZ |
2573 | int mds = __choose_mds(mdsc, req); |
2574 | if (mds >= 0 && mds != req->r_session->s_mds) { | |
2575 | dout("but auth changed, so resending"); | |
e55b71f8 GF |
2576 | __do_request(mdsc, req); |
2577 | mutex_unlock(&mdsc->mutex); | |
2578 | goto out; | |
2579 | } | |
2f2dc053 | 2580 | } |
e55b71f8 | 2581 | dout("have to return ESTALE on request %llu", req->r_tid); |
2f2dc053 SW |
2582 | } |
2583 | ||
e55b71f8 | 2584 | |
2f2dc053 | 2585 | if (head->safe) { |
bc2de10d | 2586 | set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags); |
2f2dc053 | 2587 | __unregister_request(mdsc, req); |
2f2dc053 | 2588 | |
bc2de10d | 2589 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { |
2f2dc053 SW |
2590 | /* |
2591 | * We already handled the unsafe response, now do the | |
2592 | * cleanup. No need to examine the response; the MDS | |
2593 | * doesn't include any result info in the safe | |
2594 | * response. And even if it did, there is nothing | |
2595 | * useful we could do with a revised return value. | |
2596 | */ | |
2597 | dout("got safe reply %llu, mds%d\n", tid, mds); | |
2f2dc053 SW |
2598 | |
2599 | /* last unsafe request during umount? */ | |
44ca18f2 | 2600 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
03066f23 | 2601 | complete_all(&mdsc->safe_umount_waiters); |
2f2dc053 SW |
2602 | mutex_unlock(&mdsc->mutex); |
2603 | goto out; | |
2604 | } | |
e1518c7c | 2605 | } else { |
bc2de10d | 2606 | set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags); |
2f2dc053 | 2607 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); |
4c06ace8 YZ |
2608 | if (req->r_unsafe_dir) { |
2609 | struct ceph_inode_info *ci = | |
2610 | ceph_inode(req->r_unsafe_dir); | |
2611 | spin_lock(&ci->i_unsafe_lock); | |
2612 | list_add_tail(&req->r_unsafe_dir_item, | |
2613 | &ci->i_unsafe_dirops); | |
2614 | spin_unlock(&ci->i_unsafe_lock); | |
2615 | } | |
2f2dc053 SW |
2616 | } |
2617 | ||
2618 | dout("handle_reply tid %lld result %d\n", tid, result); | |
2619 | rinfo = &req->r_reply_info; | |
14303d20 | 2620 | err = parse_reply_info(msg, rinfo, session->s_con.peer_features); |
2f2dc053 SW |
2621 | mutex_unlock(&mdsc->mutex); |
2622 | ||
2623 | mutex_lock(&session->s_mutex); | |
2624 | if (err < 0) { | |
25933abd | 2625 | pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); |
9ec7cab1 | 2626 | ceph_msg_dump(msg); |
2f2dc053 SW |
2627 | goto out_err; |
2628 | } | |
2629 | ||
2630 | /* snap trace */ | |
982d6011 | 2631 | realm = NULL; |
2f2dc053 SW |
2632 | if (rinfo->snapblob_len) { |
2633 | down_write(&mdsc->snap_rwsem); | |
2634 | ceph_update_snap_trace(mdsc, rinfo->snapblob, | |
982d6011 YZ |
2635 | rinfo->snapblob + rinfo->snapblob_len, |
2636 | le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, | |
2637 | &realm); | |
2f2dc053 SW |
2638 | downgrade_write(&mdsc->snap_rwsem); |
2639 | } else { | |
2640 | down_read(&mdsc->snap_rwsem); | |
2641 | } | |
2642 | ||
2643 | /* insert trace into our cache */ | |
b4556396 | 2644 | mutex_lock(&req->r_fill_mutex); |
315f2408 | 2645 | current->journal_info = req; |
f5a03b08 | 2646 | err = ceph_fill_trace(mdsc->fsc->sb, req); |
2f2dc053 | 2647 | if (err == 0) { |
6e8575fa | 2648 | if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || |
81c6aea5 | 2649 | req->r_op == CEPH_MDS_OP_LSSNAP)) |
2f2dc053 | 2650 | ceph_readdir_prepopulate(req, req->r_session); |
37151668 | 2651 | ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
2f2dc053 | 2652 | } |
315f2408 | 2653 | current->journal_info = NULL; |
b4556396 | 2654 | mutex_unlock(&req->r_fill_mutex); |
2f2dc053 SW |
2655 | |
2656 | up_read(&mdsc->snap_rwsem); | |
982d6011 YZ |
2657 | if (realm) |
2658 | ceph_put_snap_realm(mdsc, realm); | |
68cd5b4b | 2659 | |
bc2de10d JL |
2660 | if (err == 0 && req->r_target_inode && |
2661 | test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { | |
68cd5b4b YZ |
2662 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); |
2663 | spin_lock(&ci->i_unsafe_lock); | |
2664 | list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); | |
2665 | spin_unlock(&ci->i_unsafe_lock); | |
2666 | } | |
2f2dc053 | 2667 | out_err: |
e1518c7c | 2668 | mutex_lock(&mdsc->mutex); |
bc2de10d | 2669 | if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
e1518c7c SW |
2670 | if (err) { |
2671 | req->r_err = err; | |
2672 | } else { | |
5fdb1389 | 2673 | req->r_reply = ceph_msg_get(msg); |
bc2de10d | 2674 | set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); |
e1518c7c | 2675 | } |
2f2dc053 | 2676 | } else { |
e1518c7c | 2677 | dout("reply arrived after request %lld was aborted\n", tid); |
2f2dc053 | 2678 | } |
e1518c7c | 2679 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 2680 | |
2f2dc053 SW |
2681 | mutex_unlock(&session->s_mutex); |
2682 | ||
2683 | /* kick calling process */ | |
2684 | complete_request(mdsc, req); | |
2685 | out: | |
2686 | ceph_mdsc_put_request(req); | |
2687 | return; | |
2688 | } | |
2689 | ||
2690 | ||
2691 | ||
2692 | /* | |
2693 | * handle mds notification that our request has been forwarded. | |
2694 | */ | |
2600d2dd SW |
2695 | static void handle_forward(struct ceph_mds_client *mdsc, |
2696 | struct ceph_mds_session *session, | |
2697 | struct ceph_msg *msg) | |
2f2dc053 SW |
2698 | { |
2699 | struct ceph_mds_request *req; | |
a1ea787c | 2700 | u64 tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2701 | u32 next_mds; |
2702 | u32 fwd_seq; | |
2f2dc053 SW |
2703 | int err = -EINVAL; |
2704 | void *p = msg->front.iov_base; | |
2705 | void *end = p + msg->front.iov_len; | |
2f2dc053 | 2706 | |
a1ea787c | 2707 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
c89136ea SW |
2708 | next_mds = ceph_decode_32(&p); |
2709 | fwd_seq = ceph_decode_32(&p); | |
2f2dc053 SW |
2710 | |
2711 | mutex_lock(&mdsc->mutex); | |
fcd00b68 | 2712 | req = lookup_get_request(mdsc, tid); |
2f2dc053 | 2713 | if (!req) { |
2a8e5e36 | 2714 | dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); |
2f2dc053 SW |
2715 | goto out; /* dup reply? */ |
2716 | } | |
2717 | ||
bc2de10d | 2718 | if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { |
2a8e5e36 SW |
2719 | dout("forward tid %llu aborted, unregistering\n", tid); |
2720 | __unregister_request(mdsc, req); | |
2721 | } else if (fwd_seq <= req->r_num_fwd) { | |
2722 | dout("forward tid %llu to mds%d - old seq %d <= %d\n", | |
2f2dc053 SW |
2723 | tid, next_mds, req->r_num_fwd, fwd_seq); |
2724 | } else { | |
2725 | /* resend. forward race not possible; mds would drop */ | |
2a8e5e36 SW |
2726 | dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); |
2727 | BUG_ON(req->r_err); | |
bc2de10d | 2728 | BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); |
3de22be6 | 2729 | req->r_attempts = 0; |
2f2dc053 SW |
2730 | req->r_num_fwd = fwd_seq; |
2731 | req->r_resend_mds = next_mds; | |
2732 | put_request_session(req); | |
2733 | __do_request(mdsc, req); | |
2734 | } | |
2735 | ceph_mdsc_put_request(req); | |
2736 | out: | |
2737 | mutex_unlock(&mdsc->mutex); | |
2738 | return; | |
2739 | ||
2740 | bad: | |
2741 | pr_err("mdsc_handle_forward decode error err=%d\n", err); | |
2742 | } | |
2743 | ||
2744 | /* | |
2745 | * handle a mds session control message | |
2746 | */ | |
2747 | static void handle_session(struct ceph_mds_session *session, | |
2748 | struct ceph_msg *msg) | |
2749 | { | |
2750 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2751 | u32 op; | |
2752 | u64 seq; | |
2600d2dd | 2753 | int mds = session->s_mds; |
2f2dc053 SW |
2754 | struct ceph_mds_session_head *h = msg->front.iov_base; |
2755 | int wake = 0; | |
2756 | ||
2f2dc053 SW |
2757 | /* decode */ |
2758 | if (msg->front.iov_len != sizeof(*h)) | |
2759 | goto bad; | |
2760 | op = le32_to_cpu(h->op); | |
2761 | seq = le64_to_cpu(h->seq); | |
2762 | ||
2763 | mutex_lock(&mdsc->mutex); | |
0a07fc8c YZ |
2764 | if (op == CEPH_SESSION_CLOSE) { |
2765 | get_session(session); | |
2600d2dd | 2766 | __unregister_session(mdsc, session); |
0a07fc8c | 2767 | } |
2f2dc053 SW |
2768 | /* FIXME: this ttl calculation is generous */ |
2769 | session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; | |
2770 | mutex_unlock(&mdsc->mutex); | |
2771 | ||
2772 | mutex_lock(&session->s_mutex); | |
2773 | ||
2774 | dout("handle_session mds%d %s %p state %s seq %llu\n", | |
2775 | mds, ceph_session_op_name(op), session, | |
a687ecaf | 2776 | ceph_session_state_name(session->s_state), seq); |
2f2dc053 SW |
2777 | |
2778 | if (session->s_state == CEPH_MDS_SESSION_HUNG) { | |
2779 | session->s_state = CEPH_MDS_SESSION_OPEN; | |
2780 | pr_info("mds%d came back\n", session->s_mds); | |
2781 | } | |
2782 | ||
2783 | switch (op) { | |
2784 | case CEPH_SESSION_OPEN: | |
29790f26 SW |
2785 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2786 | pr_info("mds%d reconnect success\n", session->s_mds); | |
2f2dc053 SW |
2787 | session->s_state = CEPH_MDS_SESSION_OPEN; |
2788 | renewed_caps(mdsc, session, 0); | |
2789 | wake = 1; | |
2790 | if (mdsc->stopping) | |
2791 | __close_session(mdsc, session); | |
2792 | break; | |
2793 | ||
2794 | case CEPH_SESSION_RENEWCAPS: | |
2795 | if (session->s_renew_seq == seq) | |
2796 | renewed_caps(mdsc, session, 1); | |
2797 | break; | |
2798 | ||
2799 | case CEPH_SESSION_CLOSE: | |
29790f26 SW |
2800 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2801 | pr_info("mds%d reconnect denied\n", session->s_mds); | |
1c841a96 | 2802 | cleanup_session_requests(mdsc, session); |
2f2dc053 | 2803 | remove_session_caps(session); |
656e4382 | 2804 | wake = 2; /* for good measure */ |
f3c60c59 | 2805 | wake_up_all(&mdsc->session_close_wq); |
2f2dc053 SW |
2806 | break; |
2807 | ||
2808 | case CEPH_SESSION_STALE: | |
2809 | pr_info("mds%d caps went stale, renewing\n", | |
2810 | session->s_mds); | |
d8fb02ab | 2811 | spin_lock(&session->s_gen_ttl_lock); |
2f2dc053 | 2812 | session->s_cap_gen++; |
1ce208a6 | 2813 | session->s_cap_ttl = jiffies - 1; |
d8fb02ab | 2814 | spin_unlock(&session->s_gen_ttl_lock); |
2f2dc053 SW |
2815 | send_renew_caps(mdsc, session); |
2816 | break; | |
2817 | ||
2818 | case CEPH_SESSION_RECALL_STATE: | |
2819 | trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); | |
2820 | break; | |
2821 | ||
186e4f7a YZ |
2822 | case CEPH_SESSION_FLUSHMSG: |
2823 | send_flushmsg_ack(mdsc, session, seq); | |
2824 | break; | |
2825 | ||
03f4fcb0 YZ |
2826 | case CEPH_SESSION_FORCE_RO: |
2827 | dout("force_session_readonly %p\n", session); | |
2828 | spin_lock(&session->s_cap_lock); | |
2829 | session->s_readonly = true; | |
2830 | spin_unlock(&session->s_cap_lock); | |
2831 | wake_up_session_caps(session, 0); | |
2832 | break; | |
2833 | ||
fcff415c YZ |
2834 | case CEPH_SESSION_REJECT: |
2835 | WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); | |
2836 | pr_info("mds%d rejected session\n", session->s_mds); | |
2837 | session->s_state = CEPH_MDS_SESSION_REJECTED; | |
2838 | cleanup_session_requests(mdsc, session); | |
2839 | remove_session_caps(session); | |
2840 | wake = 2; /* for good measure */ | |
2841 | break; | |
2842 | ||
2f2dc053 SW |
2843 | default: |
2844 | pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); | |
2845 | WARN_ON(1); | |
2846 | } | |
2847 | ||
2848 | mutex_unlock(&session->s_mutex); | |
2849 | if (wake) { | |
2850 | mutex_lock(&mdsc->mutex); | |
2851 | __wake_requests(mdsc, &session->s_waiting); | |
656e4382 YZ |
2852 | if (wake == 2) |
2853 | kick_requests(mdsc, mds); | |
2f2dc053 SW |
2854 | mutex_unlock(&mdsc->mutex); |
2855 | } | |
0a07fc8c YZ |
2856 | if (op == CEPH_SESSION_CLOSE) |
2857 | ceph_put_mds_session(session); | |
2f2dc053 SW |
2858 | return; |
2859 | ||
2860 | bad: | |
2861 | pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, | |
2862 | (int)msg->front.iov_len); | |
9ec7cab1 | 2863 | ceph_msg_dump(msg); |
2f2dc053 SW |
2864 | return; |
2865 | } | |
2866 | ||
2867 | ||
2868 | /* | |
2869 | * called under session->mutex. | |
2870 | */ | |
2871 | static void replay_unsafe_requests(struct ceph_mds_client *mdsc, | |
2872 | struct ceph_mds_session *session) | |
2873 | { | |
2874 | struct ceph_mds_request *req, *nreq; | |
3de22be6 | 2875 | struct rb_node *p; |
2f2dc053 SW |
2876 | int err; |
2877 | ||
2878 | dout("replay_unsafe_requests mds%d\n", session->s_mds); | |
2879 | ||
2880 | mutex_lock(&mdsc->mutex); | |
2881 | list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { | |
6e6f0923 | 2882 | err = __prepare_send_request(mdsc, req, session->s_mds, true); |
2f2dc053 SW |
2883 | if (!err) { |
2884 | ceph_msg_get(req->r_request); | |
2885 | ceph_con_send(&session->s_con, req->r_request); | |
2886 | } | |
2887 | } | |
3de22be6 YZ |
2888 | |
2889 | /* | |
2890 | * also re-send old requests when MDS enters reconnect stage. So that MDS | |
2891 | * can process completed request in clientreplay stage. | |
2892 | */ | |
2893 | p = rb_first(&mdsc->request_tree); | |
2894 | while (p) { | |
2895 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
2896 | p = rb_next(p); | |
bc2de10d | 2897 | if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) |
3de22be6 YZ |
2898 | continue; |
2899 | if (req->r_attempts == 0) | |
2900 | continue; /* only old requests */ | |
2901 | if (req->r_session && | |
2902 | req->r_session->s_mds == session->s_mds) { | |
6e6f0923 YZ |
2903 | err = __prepare_send_request(mdsc, req, |
2904 | session->s_mds, true); | |
3de22be6 YZ |
2905 | if (!err) { |
2906 | ceph_msg_get(req->r_request); | |
2907 | ceph_con_send(&session->s_con, req->r_request); | |
2908 | } | |
2909 | } | |
2910 | } | |
2f2dc053 SW |
2911 | mutex_unlock(&mdsc->mutex); |
2912 | } | |
2913 | ||
2914 | /* | |
2915 | * Encode information about a cap for a reconnect with the MDS. | |
2916 | */ | |
2f2dc053 SW |
2917 | static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, |
2918 | void *arg) | |
2919 | { | |
20cb34ae SW |
2920 | union { |
2921 | struct ceph_mds_cap_reconnect v2; | |
2922 | struct ceph_mds_cap_reconnect_v1 v1; | |
2923 | } rec; | |
b3f8d68f | 2924 | struct ceph_inode_info *ci = cap->ci; |
20cb34ae SW |
2925 | struct ceph_reconnect_state *recon_state = arg; |
2926 | struct ceph_pagelist *pagelist = recon_state->pagelist; | |
2f2dc053 SW |
2927 | char *path; |
2928 | int pathlen, err; | |
2929 | u64 pathbase; | |
3469ed0d | 2930 | u64 snap_follows; |
2f2dc053 SW |
2931 | struct dentry *dentry; |
2932 | ||
2f2dc053 SW |
2933 | dout(" adding %p ino %llx.%llx cap %p %lld %s\n", |
2934 | inode, ceph_vinop(inode), cap, cap->cap_id, | |
2935 | ceph_cap_string(cap->issued)); | |
93cea5be SW |
2936 | err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); |
2937 | if (err) | |
2938 | return err; | |
2f2dc053 SW |
2939 | |
2940 | dentry = d_find_alias(inode); | |
2941 | if (dentry) { | |
2942 | path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); | |
2943 | if (IS_ERR(path)) { | |
2944 | err = PTR_ERR(path); | |
e072f8aa | 2945 | goto out_dput; |
2f2dc053 SW |
2946 | } |
2947 | } else { | |
2948 | path = NULL; | |
2949 | pathlen = 0; | |
4eacd4cb | 2950 | pathbase = 0; |
2f2dc053 | 2951 | } |
2f2dc053 | 2952 | |
be655596 | 2953 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
2954 | cap->seq = 0; /* reset cap seq */ |
2955 | cap->issue_seq = 0; /* and issue_seq */ | |
667ca05c | 2956 | cap->mseq = 0; /* and migrate_seq */ |
99a9c273 | 2957 | cap->cap_gen = cap->session->s_cap_gen; |
20cb34ae | 2958 | |
121f22a1 | 2959 | if (recon_state->msg_version >= 2) { |
20cb34ae SW |
2960 | rec.v2.cap_id = cpu_to_le64(cap->cap_id); |
2961 | rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2962 | rec.v2.issued = cpu_to_le32(cap->issued); | |
2963 | rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2964 | rec.v2.pathbase = cpu_to_le64(pathbase); | |
ec1dff25 JL |
2965 | rec.v2.flock_len = (__force __le32) |
2966 | ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1); | |
20cb34ae SW |
2967 | } else { |
2968 | rec.v1.cap_id = cpu_to_le64(cap->cap_id); | |
2969 | rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2970 | rec.v1.issued = cpu_to_le32(cap->issued); | |
2971 | rec.v1.size = cpu_to_le64(inode->i_size); | |
2972 | ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); | |
2973 | ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); | |
2974 | rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2975 | rec.v1.pathbase = cpu_to_le64(pathbase); | |
20cb34ae | 2976 | } |
3469ed0d YZ |
2977 | |
2978 | if (list_empty(&ci->i_cap_snaps)) { | |
92776fd2 | 2979 | snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0; |
3469ed0d YZ |
2980 | } else { |
2981 | struct ceph_cap_snap *capsnap = | |
2982 | list_first_entry(&ci->i_cap_snaps, | |
2983 | struct ceph_cap_snap, ci_item); | |
2984 | snap_follows = capsnap->follows; | |
20cb34ae | 2985 | } |
be655596 | 2986 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 | 2987 | |
121f22a1 | 2988 | if (recon_state->msg_version >= 2) { |
40819f6f | 2989 | int num_fcntl_locks, num_flock_locks; |
4deb14a2 | 2990 | struct ceph_filelock *flocks = NULL; |
121f22a1 YZ |
2991 | size_t struct_len, total_len = 0; |
2992 | u8 struct_v = 0; | |
39be95e9 JS |
2993 | |
2994 | encode_again: | |
b3f8d68f YZ |
2995 | if (rec.v2.flock_len) { |
2996 | ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); | |
2997 | } else { | |
2998 | num_fcntl_locks = 0; | |
2999 | num_flock_locks = 0; | |
3000 | } | |
4deb14a2 YZ |
3001 | if (num_fcntl_locks + num_flock_locks > 0) { |
3002 | flocks = kmalloc((num_fcntl_locks + num_flock_locks) * | |
3003 | sizeof(struct ceph_filelock), GFP_NOFS); | |
3004 | if (!flocks) { | |
3005 | err = -ENOMEM; | |
3006 | goto out_free; | |
3007 | } | |
3008 | err = ceph_encode_locks_to_buffer(inode, flocks, | |
3009 | num_fcntl_locks, | |
3010 | num_flock_locks); | |
3011 | if (err) { | |
3012 | kfree(flocks); | |
3013 | flocks = NULL; | |
3014 | if (err == -ENOSPC) | |
3015 | goto encode_again; | |
3016 | goto out_free; | |
3017 | } | |
3018 | } else { | |
39be95e9 | 3019 | kfree(flocks); |
4deb14a2 | 3020 | flocks = NULL; |
39be95e9 | 3021 | } |
121f22a1 YZ |
3022 | |
3023 | if (recon_state->msg_version >= 3) { | |
3024 | /* version, compat_version and struct_len */ | |
3025 | total_len = 2 * sizeof(u8) + sizeof(u32); | |
3469ed0d | 3026 | struct_v = 2; |
121f22a1 | 3027 | } |
39be95e9 JS |
3028 | /* |
3029 | * number of encoded locks is stable, so copy to pagelist | |
3030 | */ | |
121f22a1 YZ |
3031 | struct_len = 2 * sizeof(u32) + |
3032 | (num_fcntl_locks + num_flock_locks) * | |
3033 | sizeof(struct ceph_filelock); | |
3034 | rec.v2.flock_len = cpu_to_le32(struct_len); | |
3035 | ||
3036 | struct_len += sizeof(rec.v2); | |
3037 | struct_len += sizeof(u32) + pathlen; | |
3038 | ||
3469ed0d YZ |
3039 | if (struct_v >= 2) |
3040 | struct_len += sizeof(u64); /* snap_follows */ | |
3041 | ||
121f22a1 YZ |
3042 | total_len += struct_len; |
3043 | err = ceph_pagelist_reserve(pagelist, total_len); | |
3044 | ||
3045 | if (!err) { | |
3046 | if (recon_state->msg_version >= 3) { | |
3047 | ceph_pagelist_encode_8(pagelist, struct_v); | |
3048 | ceph_pagelist_encode_8(pagelist, 1); | |
3049 | ceph_pagelist_encode_32(pagelist, struct_len); | |
3050 | } | |
3051 | ceph_pagelist_encode_string(pagelist, path, pathlen); | |
3052 | ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); | |
3053 | ceph_locks_to_pagelist(flocks, pagelist, | |
3054 | num_fcntl_locks, | |
3055 | num_flock_locks); | |
3469ed0d YZ |
3056 | if (struct_v >= 2) |
3057 | ceph_pagelist_encode_64(pagelist, snap_follows); | |
121f22a1 | 3058 | } |
39be95e9 | 3059 | kfree(flocks); |
3612abbd | 3060 | } else { |
121f22a1 YZ |
3061 | size_t size = sizeof(u32) + pathlen + sizeof(rec.v1); |
3062 | err = ceph_pagelist_reserve(pagelist, size); | |
3063 | if (!err) { | |
3064 | ceph_pagelist_encode_string(pagelist, path, pathlen); | |
3065 | ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); | |
3066 | } | |
40819f6f | 3067 | } |
44c99757 YZ |
3068 | |
3069 | recon_state->nr_caps++; | |
e072f8aa | 3070 | out_free: |
2f2dc053 | 3071 | kfree(path); |
e072f8aa | 3072 | out_dput: |
2f2dc053 | 3073 | dput(dentry); |
93cea5be | 3074 | return err; |
2f2dc053 SW |
3075 | } |
3076 | ||
3077 | ||
3078 | /* | |
3079 | * If an MDS fails and recovers, clients need to reconnect in order to | |
3080 | * reestablish shared state. This includes all caps issued through | |
3081 | * this session _and_ the snap_realm hierarchy. Because it's not | |
3082 | * clear which snap realms the mds cares about, we send everything we | |
3083 | * know about.. that ensures we'll then get any new info the | |
3084 | * recovering MDS might have. | |
3085 | * | |
3086 | * This is a relatively heavyweight operation, but it's rare. | |
3087 | * | |
3088 | * called with mdsc->mutex held. | |
3089 | */ | |
34b6c855 SW |
3090 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
3091 | struct ceph_mds_session *session) | |
2f2dc053 | 3092 | { |
2f2dc053 | 3093 | struct ceph_msg *reply; |
a105f00c | 3094 | struct rb_node *p; |
34b6c855 | 3095 | int mds = session->s_mds; |
9abf82b8 | 3096 | int err = -ENOMEM; |
44c99757 | 3097 | int s_nr_caps; |
93cea5be | 3098 | struct ceph_pagelist *pagelist; |
20cb34ae | 3099 | struct ceph_reconnect_state recon_state; |
c8a96a31 | 3100 | LIST_HEAD(dispose); |
2f2dc053 | 3101 | |
34b6c855 | 3102 | pr_info("mds%d reconnect start\n", mds); |
2f2dc053 | 3103 | |
93cea5be SW |
3104 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); |
3105 | if (!pagelist) | |
3106 | goto fail_nopagelist; | |
3107 | ceph_pagelist_init(pagelist); | |
3108 | ||
b61c2763 | 3109 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); |
a79832f2 | 3110 | if (!reply) |
93cea5be | 3111 | goto fail_nomsg; |
93cea5be | 3112 | |
34b6c855 SW |
3113 | mutex_lock(&session->s_mutex); |
3114 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; | |
3115 | session->s_seq = 0; | |
2f2dc053 | 3116 | |
2f2dc053 | 3117 | dout("session %p state %s\n", session, |
a687ecaf | 3118 | ceph_session_state_name(session->s_state)); |
2f2dc053 | 3119 | |
99a9c273 YZ |
3120 | spin_lock(&session->s_gen_ttl_lock); |
3121 | session->s_cap_gen++; | |
3122 | spin_unlock(&session->s_gen_ttl_lock); | |
3123 | ||
3124 | spin_lock(&session->s_cap_lock); | |
03f4fcb0 YZ |
3125 | /* don't know if session is readonly */ |
3126 | session->s_readonly = 0; | |
99a9c273 YZ |
3127 | /* |
3128 | * notify __ceph_remove_cap() that we are composing cap reconnect. | |
3129 | * If a cap get released before being added to the cap reconnect, | |
3130 | * __ceph_remove_cap() should skip queuing cap release. | |
3131 | */ | |
3132 | session->s_cap_reconnect = 1; | |
e01a5946 | 3133 | /* drop old cap expires; we're about to reestablish that state */ |
c8a96a31 JL |
3134 | detach_cap_releases(session, &dispose); |
3135 | spin_unlock(&session->s_cap_lock); | |
3136 | dispose_cap_releases(mdsc, &dispose); | |
e01a5946 | 3137 | |
5d23371f | 3138 | /* trim unused caps to reduce MDS's cache rejoin time */ |
c0bd50e2 YZ |
3139 | if (mdsc->fsc->sb->s_root) |
3140 | shrink_dcache_parent(mdsc->fsc->sb->s_root); | |
5d23371f YZ |
3141 | |
3142 | ceph_con_close(&session->s_con); | |
3143 | ceph_con_open(&session->s_con, | |
3144 | CEPH_ENTITY_TYPE_MDS, mds, | |
3145 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
3146 | ||
3147 | /* replay unsafe requests */ | |
3148 | replay_unsafe_requests(mdsc, session); | |
3149 | ||
3150 | down_read(&mdsc->snap_rwsem); | |
3151 | ||
2f2dc053 | 3152 | /* traverse this session's caps */ |
44c99757 YZ |
3153 | s_nr_caps = session->s_nr_caps; |
3154 | err = ceph_pagelist_encode_32(pagelist, s_nr_caps); | |
93cea5be SW |
3155 | if (err) |
3156 | goto fail; | |
20cb34ae | 3157 | |
44c99757 | 3158 | recon_state.nr_caps = 0; |
20cb34ae | 3159 | recon_state.pagelist = pagelist; |
121f22a1 YZ |
3160 | if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) |
3161 | recon_state.msg_version = 3; | |
3162 | else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK) | |
3163 | recon_state.msg_version = 2; | |
3164 | else | |
3165 | recon_state.msg_version = 1; | |
20cb34ae | 3166 | err = iterate_session_caps(session, encode_caps_cb, &recon_state); |
2f2dc053 | 3167 | if (err < 0) |
9abf82b8 | 3168 | goto fail; |
2f2dc053 | 3169 | |
99a9c273 YZ |
3170 | spin_lock(&session->s_cap_lock); |
3171 | session->s_cap_reconnect = 0; | |
3172 | spin_unlock(&session->s_cap_lock); | |
3173 | ||
2f2dc053 SW |
3174 | /* |
3175 | * snaprealms. we provide mds with the ino, seq (version), and | |
3176 | * parent for all of our realms. If the mds has any newer info, | |
3177 | * it will tell us. | |
3178 | */ | |
a105f00c SW |
3179 | for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { |
3180 | struct ceph_snap_realm *realm = | |
3181 | rb_entry(p, struct ceph_snap_realm, node); | |
93cea5be | 3182 | struct ceph_mds_snaprealm_reconnect sr_rec; |
2f2dc053 SW |
3183 | |
3184 | dout(" adding snap realm %llx seq %lld parent %llx\n", | |
3185 | realm->ino, realm->seq, realm->parent_ino); | |
93cea5be SW |
3186 | sr_rec.ino = cpu_to_le64(realm->ino); |
3187 | sr_rec.seq = cpu_to_le64(realm->seq); | |
3188 | sr_rec.parent = cpu_to_le64(realm->parent_ino); | |
3189 | err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); | |
3190 | if (err) | |
3191 | goto fail; | |
2f2dc053 | 3192 | } |
2f2dc053 | 3193 | |
121f22a1 | 3194 | reply->hdr.version = cpu_to_le16(recon_state.msg_version); |
44c99757 YZ |
3195 | |
3196 | /* raced with cap release? */ | |
3197 | if (s_nr_caps != recon_state.nr_caps) { | |
3198 | struct page *page = list_first_entry(&pagelist->head, | |
3199 | struct page, lru); | |
3200 | __le32 *addr = kmap_atomic(page); | |
3201 | *addr = cpu_to_le32(recon_state.nr_caps); | |
3202 | kunmap_atomic(addr); | |
ebf18f47 | 3203 | } |
44c99757 YZ |
3204 | |
3205 | reply->hdr.data_len = cpu_to_le32(pagelist->length); | |
3206 | ceph_msg_data_add_pagelist(reply, pagelist); | |
e548e9b9 YZ |
3207 | |
3208 | ceph_early_kick_flushing_caps(mdsc, session); | |
3209 | ||
2f2dc053 SW |
3210 | ceph_con_send(&session->s_con, reply); |
3211 | ||
9abf82b8 SW |
3212 | mutex_unlock(&session->s_mutex); |
3213 | ||
3214 | mutex_lock(&mdsc->mutex); | |
3215 | __wake_requests(mdsc, &session->s_waiting); | |
3216 | mutex_unlock(&mdsc->mutex); | |
3217 | ||
2f2dc053 | 3218 | up_read(&mdsc->snap_rwsem); |
2f2dc053 SW |
3219 | return; |
3220 | ||
93cea5be | 3221 | fail: |
2f2dc053 | 3222 | ceph_msg_put(reply); |
9abf82b8 SW |
3223 | up_read(&mdsc->snap_rwsem); |
3224 | mutex_unlock(&session->s_mutex); | |
93cea5be SW |
3225 | fail_nomsg: |
3226 | ceph_pagelist_release(pagelist); | |
93cea5be | 3227 | fail_nopagelist: |
9abf82b8 | 3228 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
9abf82b8 | 3229 | return; |
2f2dc053 SW |
3230 | } |
3231 | ||
3232 | ||
3233 | /* | |
3234 | * compare old and new mdsmaps, kicking requests | |
3235 | * and closing out old connections as necessary | |
3236 | * | |
3237 | * called under mdsc->mutex. | |
3238 | */ | |
3239 | static void check_new_map(struct ceph_mds_client *mdsc, | |
3240 | struct ceph_mdsmap *newmap, | |
3241 | struct ceph_mdsmap *oldmap) | |
3242 | { | |
3243 | int i; | |
3244 | int oldstate, newstate; | |
3245 | struct ceph_mds_session *s; | |
3246 | ||
3247 | dout("check_new_map new %u old %u\n", | |
3248 | newmap->m_epoch, oldmap->m_epoch); | |
3249 | ||
76201b63 | 3250 | for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) { |
d37b1d99 | 3251 | if (!mdsc->sessions[i]) |
2f2dc053 SW |
3252 | continue; |
3253 | s = mdsc->sessions[i]; | |
3254 | oldstate = ceph_mdsmap_get_state(oldmap, i); | |
3255 | newstate = ceph_mdsmap_get_state(newmap, i); | |
3256 | ||
0deb01c9 | 3257 | dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", |
2f2dc053 | 3258 | i, ceph_mds_state_name(oldstate), |
0deb01c9 | 3259 | ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", |
2f2dc053 | 3260 | ceph_mds_state_name(newstate), |
0deb01c9 | 3261 | ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", |
a687ecaf | 3262 | ceph_session_state_name(s->s_state)); |
2f2dc053 | 3263 | |
76201b63 | 3264 | if (i >= newmap->m_num_mds || |
3e8f43a0 | 3265 | memcmp(ceph_mdsmap_get_addr(oldmap, i), |
2f2dc053 SW |
3266 | ceph_mdsmap_get_addr(newmap, i), |
3267 | sizeof(struct ceph_entity_addr))) { | |
3268 | if (s->s_state == CEPH_MDS_SESSION_OPENING) { | |
3269 | /* the session never opened, just close it | |
3270 | * out now */ | |
0a07fc8c | 3271 | get_session(s); |
2600d2dd | 3272 | __unregister_session(mdsc, s); |
2f2dc053 | 3273 | __wake_requests(mdsc, &s->s_waiting); |
0a07fc8c | 3274 | ceph_put_mds_session(s); |
2827528d YZ |
3275 | } else if (i >= newmap->m_num_mds) { |
3276 | /* force close session for stopped mds */ | |
3277 | get_session(s); | |
2600d2dd | 3278 | __unregister_session(mdsc, s); |
2827528d YZ |
3279 | __wake_requests(mdsc, &s->s_waiting); |
3280 | kick_requests(mdsc, i); | |
3281 | mutex_unlock(&mdsc->mutex); | |
3282 | ||
3283 | mutex_lock(&s->s_mutex); | |
3284 | cleanup_session_requests(mdsc, s); | |
3285 | remove_session_caps(s); | |
3286 | mutex_unlock(&s->s_mutex); | |
3287 | ||
3288 | ceph_put_mds_session(s); | |
3289 | ||
3290 | mutex_lock(&mdsc->mutex); | |
2f2dc053 SW |
3291 | } else { |
3292 | /* just close it */ | |
3293 | mutex_unlock(&mdsc->mutex); | |
3294 | mutex_lock(&s->s_mutex); | |
3295 | mutex_lock(&mdsc->mutex); | |
3296 | ceph_con_close(&s->s_con); | |
3297 | mutex_unlock(&s->s_mutex); | |
3298 | s->s_state = CEPH_MDS_SESSION_RESTARTING; | |
3299 | } | |
2f2dc053 SW |
3300 | } else if (oldstate == newstate) { |
3301 | continue; /* nothing new with this mds */ | |
3302 | } | |
3303 | ||
3304 | /* | |
3305 | * send reconnect? | |
3306 | */ | |
3307 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && | |
34b6c855 SW |
3308 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
3309 | mutex_unlock(&mdsc->mutex); | |
3310 | send_mds_reconnect(mdsc, s); | |
3311 | mutex_lock(&mdsc->mutex); | |
3312 | } | |
2f2dc053 SW |
3313 | |
3314 | /* | |
29790f26 | 3315 | * kick request on any mds that has gone active. |
2f2dc053 SW |
3316 | */ |
3317 | if (oldstate < CEPH_MDS_STATE_ACTIVE && | |
3318 | newstate >= CEPH_MDS_STATE_ACTIVE) { | |
29790f26 SW |
3319 | if (oldstate != CEPH_MDS_STATE_CREATING && |
3320 | oldstate != CEPH_MDS_STATE_STARTING) | |
3321 | pr_info("mds%d recovery completed\n", s->s_mds); | |
3322 | kick_requests(mdsc, i); | |
2f2dc053 | 3323 | ceph_kick_flushing_caps(mdsc, s); |
0dc2570f | 3324 | wake_up_session_caps(s, 1); |
2f2dc053 SW |
3325 | } |
3326 | } | |
cb170a22 | 3327 | |
76201b63 | 3328 | for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) { |
cb170a22 SW |
3329 | s = mdsc->sessions[i]; |
3330 | if (!s) | |
3331 | continue; | |
3332 | if (!ceph_mdsmap_is_laggy(newmap, i)) | |
3333 | continue; | |
3334 | if (s->s_state == CEPH_MDS_SESSION_OPEN || | |
3335 | s->s_state == CEPH_MDS_SESSION_HUNG || | |
3336 | s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3337 | dout(" connecting to export targets of laggy mds%d\n", | |
3338 | i); | |
3339 | __open_export_target_sessions(mdsc, s); | |
3340 | } | |
3341 | } | |
2f2dc053 SW |
3342 | } |
3343 | ||
3344 | ||
3345 | ||
3346 | /* | |
3347 | * leases | |
3348 | */ | |
3349 | ||
3350 | /* | |
3351 | * caller must hold session s_mutex, dentry->d_lock | |
3352 | */ | |
3353 | void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) | |
3354 | { | |
3355 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
3356 | ||
3357 | ceph_put_mds_session(di->lease_session); | |
3358 | di->lease_session = NULL; | |
3359 | } | |
3360 | ||
2600d2dd SW |
3361 | static void handle_lease(struct ceph_mds_client *mdsc, |
3362 | struct ceph_mds_session *session, | |
3363 | struct ceph_msg *msg) | |
2f2dc053 | 3364 | { |
3d14c5d2 | 3365 | struct super_block *sb = mdsc->fsc->sb; |
2f2dc053 | 3366 | struct inode *inode; |
2f2dc053 SW |
3367 | struct dentry *parent, *dentry; |
3368 | struct ceph_dentry_info *di; | |
2600d2dd | 3369 | int mds = session->s_mds; |
2f2dc053 | 3370 | struct ceph_mds_lease *h = msg->front.iov_base; |
1e5ea23d | 3371 | u32 seq; |
2f2dc053 | 3372 | struct ceph_vino vino; |
2f2dc053 SW |
3373 | struct qstr dname; |
3374 | int release = 0; | |
3375 | ||
2f2dc053 SW |
3376 | dout("handle_lease from mds%d\n", mds); |
3377 | ||
3378 | /* decode */ | |
3379 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) | |
3380 | goto bad; | |
3381 | vino.ino = le64_to_cpu(h->ino); | |
3382 | vino.snap = CEPH_NOSNAP; | |
1e5ea23d | 3383 | seq = le32_to_cpu(h->seq); |
2f2dc053 SW |
3384 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); |
3385 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); | |
3386 | if (dname.len != get_unaligned_le32(h+1)) | |
3387 | goto bad; | |
3388 | ||
2f2dc053 SW |
3389 | /* lookup inode */ |
3390 | inode = ceph_find_inode(sb, vino); | |
2f90b852 SW |
3391 | dout("handle_lease %s, ino %llx %p %.*s\n", |
3392 | ceph_lease_op_name(h->action), vino.ino, inode, | |
1e5ea23d | 3393 | dname.len, dname.name); |
6cd3bcad YZ |
3394 | |
3395 | mutex_lock(&session->s_mutex); | |
3396 | session->s_seq++; | |
3397 | ||
d37b1d99 | 3398 | if (!inode) { |
2f2dc053 SW |
3399 | dout("handle_lease no inode %llx\n", vino.ino); |
3400 | goto release; | |
3401 | } | |
2f2dc053 SW |
3402 | |
3403 | /* dentry */ | |
3404 | parent = d_find_alias(inode); | |
3405 | if (!parent) { | |
3406 | dout("no parent dentry on inode %p\n", inode); | |
3407 | WARN_ON(1); | |
3408 | goto release; /* hrm... */ | |
3409 | } | |
8387ff25 | 3410 | dname.hash = full_name_hash(parent, dname.name, dname.len); |
2f2dc053 SW |
3411 | dentry = d_lookup(parent, &dname); |
3412 | dput(parent); | |
3413 | if (!dentry) | |
3414 | goto release; | |
3415 | ||
3416 | spin_lock(&dentry->d_lock); | |
3417 | di = ceph_dentry(dentry); | |
3418 | switch (h->action) { | |
3419 | case CEPH_MDS_LEASE_REVOKE: | |
3d8eb7a9 | 3420 | if (di->lease_session == session) { |
1e5ea23d SW |
3421 | if (ceph_seq_cmp(di->lease_seq, seq) > 0) |
3422 | h->seq = cpu_to_le32(di->lease_seq); | |
2f2dc053 SW |
3423 | __ceph_mdsc_drop_dentry_lease(dentry); |
3424 | } | |
3425 | release = 1; | |
3426 | break; | |
3427 | ||
3428 | case CEPH_MDS_LEASE_RENEW: | |
3d8eb7a9 | 3429 | if (di->lease_session == session && |
2f2dc053 SW |
3430 | di->lease_gen == session->s_cap_gen && |
3431 | di->lease_renew_from && | |
3432 | di->lease_renew_after == 0) { | |
3433 | unsigned long duration = | |
3563dbdd | 3434 | msecs_to_jiffies(le32_to_cpu(h->duration_ms)); |
2f2dc053 | 3435 | |
1e5ea23d | 3436 | di->lease_seq = seq; |
9b16f03c | 3437 | di->time = di->lease_renew_from + duration; |
2f2dc053 SW |
3438 | di->lease_renew_after = di->lease_renew_from + |
3439 | (duration >> 1); | |
3440 | di->lease_renew_from = 0; | |
3441 | } | |
3442 | break; | |
3443 | } | |
3444 | spin_unlock(&dentry->d_lock); | |
3445 | dput(dentry); | |
3446 | ||
3447 | if (!release) | |
3448 | goto out; | |
3449 | ||
3450 | release: | |
3451 | /* let's just reuse the same message */ | |
3452 | h->action = CEPH_MDS_LEASE_REVOKE_ACK; | |
3453 | ceph_msg_get(msg); | |
3454 | ceph_con_send(&session->s_con, msg); | |
3455 | ||
3456 | out: | |
3457 | iput(inode); | |
3458 | mutex_unlock(&session->s_mutex); | |
2f2dc053 SW |
3459 | return; |
3460 | ||
3461 | bad: | |
3462 | pr_err("corrupt lease message\n"); | |
9ec7cab1 | 3463 | ceph_msg_dump(msg); |
2f2dc053 SW |
3464 | } |
3465 | ||
3466 | void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, | |
3467 | struct inode *inode, | |
3468 | struct dentry *dentry, char action, | |
3469 | u32 seq) | |
3470 | { | |
3471 | struct ceph_msg *msg; | |
3472 | struct ceph_mds_lease *lease; | |
3473 | int len = sizeof(*lease) + sizeof(u32); | |
3474 | int dnamelen = 0; | |
3475 | ||
3476 | dout("lease_send_msg inode %p dentry %p %s to mds%d\n", | |
3477 | inode, dentry, ceph_lease_op_name(action), session->s_mds); | |
3478 | dnamelen = dentry->d_name.len; | |
3479 | len += dnamelen; | |
3480 | ||
b61c2763 | 3481 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); |
a79832f2 | 3482 | if (!msg) |
2f2dc053 SW |
3483 | return; |
3484 | lease = msg->front.iov_base; | |
3485 | lease->action = action; | |
2f2dc053 SW |
3486 | lease->ino = cpu_to_le64(ceph_vino(inode).ino); |
3487 | lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); | |
3488 | lease->seq = cpu_to_le32(seq); | |
3489 | put_unaligned_le32(dnamelen, lease + 1); | |
3490 | memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); | |
3491 | ||
3492 | /* | |
3493 | * if this is a preemptive lease RELEASE, no need to | |
3494 | * flush request stream, since the actual request will | |
3495 | * soon follow. | |
3496 | */ | |
3497 | msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); | |
3498 | ||
3499 | ceph_con_send(&session->s_con, msg); | |
3500 | } | |
3501 | ||
2f2dc053 SW |
3502 | /* |
3503 | * drop all leases (and dentry refs) in preparation for umount | |
3504 | */ | |
3505 | static void drop_leases(struct ceph_mds_client *mdsc) | |
3506 | { | |
3507 | int i; | |
3508 | ||
3509 | dout("drop_leases\n"); | |
3510 | mutex_lock(&mdsc->mutex); | |
3511 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3512 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3513 | if (!s) | |
3514 | continue; | |
3515 | mutex_unlock(&mdsc->mutex); | |
3516 | mutex_lock(&s->s_mutex); | |
3517 | mutex_unlock(&s->s_mutex); | |
3518 | ceph_put_mds_session(s); | |
3519 | mutex_lock(&mdsc->mutex); | |
3520 | } | |
3521 | mutex_unlock(&mdsc->mutex); | |
3522 | } | |
3523 | ||
3524 | ||
3525 | ||
3526 | /* | |
3527 | * delayed work -- periodically trim expired leases, renew caps with mds | |
3528 | */ | |
3529 | static void schedule_delayed(struct ceph_mds_client *mdsc) | |
3530 | { | |
3531 | int delay = 5; | |
3532 | unsigned hz = round_jiffies_relative(HZ * delay); | |
3533 | schedule_delayed_work(&mdsc->delayed_work, hz); | |
3534 | } | |
3535 | ||
3536 | static void delayed_work(struct work_struct *work) | |
3537 | { | |
3538 | int i; | |
3539 | struct ceph_mds_client *mdsc = | |
3540 | container_of(work, struct ceph_mds_client, delayed_work.work); | |
3541 | int renew_interval; | |
3542 | int renew_caps; | |
3543 | ||
3544 | dout("mdsc delayed_work\n"); | |
afcdaea3 | 3545 | ceph_check_delayed_caps(mdsc); |
2f2dc053 SW |
3546 | |
3547 | mutex_lock(&mdsc->mutex); | |
3548 | renew_interval = mdsc->mdsmap->m_session_timeout >> 2; | |
3549 | renew_caps = time_after_eq(jiffies, HZ*renew_interval + | |
3550 | mdsc->last_renew_caps); | |
3551 | if (renew_caps) | |
3552 | mdsc->last_renew_caps = jiffies; | |
3553 | ||
3554 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3555 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
d37b1d99 | 3556 | if (!s) |
2f2dc053 SW |
3557 | continue; |
3558 | if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3559 | dout("resending session close request for mds%d\n", | |
3560 | s->s_mds); | |
3561 | request_close_session(mdsc, s); | |
3562 | ceph_put_mds_session(s); | |
3563 | continue; | |
3564 | } | |
3565 | if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
3566 | if (s->s_state == CEPH_MDS_SESSION_OPEN) { | |
3567 | s->s_state = CEPH_MDS_SESSION_HUNG; | |
3568 | pr_info("mds%d hung\n", s->s_mds); | |
3569 | } | |
3570 | } | |
3571 | if (s->s_state < CEPH_MDS_SESSION_OPEN) { | |
3572 | /* this mds is failed or recovering, just wait */ | |
3573 | ceph_put_mds_session(s); | |
3574 | continue; | |
3575 | } | |
3576 | mutex_unlock(&mdsc->mutex); | |
3577 | ||
3578 | mutex_lock(&s->s_mutex); | |
3579 | if (renew_caps) | |
3580 | send_renew_caps(mdsc, s); | |
3581 | else | |
3582 | ceph_con_keepalive(&s->s_con); | |
aab53dd9 SW |
3583 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
3584 | s->s_state == CEPH_MDS_SESSION_HUNG) | |
3d7ded4d | 3585 | ceph_send_cap_releases(mdsc, s); |
2f2dc053 SW |
3586 | mutex_unlock(&s->s_mutex); |
3587 | ceph_put_mds_session(s); | |
3588 | ||
3589 | mutex_lock(&mdsc->mutex); | |
3590 | } | |
3591 | mutex_unlock(&mdsc->mutex); | |
3592 | ||
3593 | schedule_delayed(mdsc); | |
3594 | } | |
3595 | ||
3d14c5d2 | 3596 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
2f2dc053 | 3597 | |
2f2dc053 | 3598 | { |
3d14c5d2 YS |
3599 | struct ceph_mds_client *mdsc; |
3600 | ||
3601 | mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); | |
3602 | if (!mdsc) | |
3603 | return -ENOMEM; | |
3604 | mdsc->fsc = fsc; | |
3605 | fsc->mdsc = mdsc; | |
2f2dc053 SW |
3606 | mutex_init(&mdsc->mutex); |
3607 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | |
d37b1d99 | 3608 | if (!mdsc->mdsmap) { |
fb3101b6 | 3609 | kfree(mdsc); |
2d06eeb8 | 3610 | return -ENOMEM; |
fb3101b6 | 3611 | } |
2d06eeb8 | 3612 | |
2f2dc053 | 3613 | init_completion(&mdsc->safe_umount_waiters); |
f3c60c59 | 3614 | init_waitqueue_head(&mdsc->session_close_wq); |
2f2dc053 SW |
3615 | INIT_LIST_HEAD(&mdsc->waiting_for_map); |
3616 | mdsc->sessions = NULL; | |
86d8f67b | 3617 | atomic_set(&mdsc->num_sessions, 0); |
2f2dc053 SW |
3618 | mdsc->max_sessions = 0; |
3619 | mdsc->stopping = 0; | |
affbc19a | 3620 | mdsc->last_snap_seq = 0; |
2f2dc053 | 3621 | init_rwsem(&mdsc->snap_rwsem); |
a105f00c | 3622 | mdsc->snap_realms = RB_ROOT; |
2f2dc053 SW |
3623 | INIT_LIST_HEAD(&mdsc->snap_empty); |
3624 | spin_lock_init(&mdsc->snap_empty_lock); | |
3625 | mdsc->last_tid = 0; | |
e8a7b8b1 | 3626 | mdsc->oldest_tid = 0; |
44ca18f2 | 3627 | mdsc->request_tree = RB_ROOT; |
2f2dc053 SW |
3628 | INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); |
3629 | mdsc->last_renew_caps = jiffies; | |
3630 | INIT_LIST_HEAD(&mdsc->cap_delay_list); | |
3631 | spin_lock_init(&mdsc->cap_delay_lock); | |
3632 | INIT_LIST_HEAD(&mdsc->snap_flush_list); | |
3633 | spin_lock_init(&mdsc->snap_flush_lock); | |
553adfd9 | 3634 | mdsc->last_cap_flush_tid = 1; |
e4500b5e | 3635 | INIT_LIST_HEAD(&mdsc->cap_flush_list); |
2f2dc053 | 3636 | INIT_LIST_HEAD(&mdsc->cap_dirty); |
db354052 | 3637 | INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); |
2f2dc053 SW |
3638 | mdsc->num_cap_flushing = 0; |
3639 | spin_lock_init(&mdsc->cap_dirty_lock); | |
3640 | init_waitqueue_head(&mdsc->cap_flushing_wq); | |
3641 | spin_lock_init(&mdsc->dentry_lru_lock); | |
3642 | INIT_LIST_HEAD(&mdsc->dentry_lru); | |
2d06eeb8 | 3643 | |
37151668 | 3644 | ceph_caps_init(mdsc); |
3d14c5d2 | 3645 | ceph_adjust_min_caps(mdsc, fsc->min_caps); |
37151668 | 3646 | |
10183a69 YZ |
3647 | init_rwsem(&mdsc->pool_perm_rwsem); |
3648 | mdsc->pool_perm_tree = RB_ROOT; | |
3649 | ||
717e6f28 YZ |
3650 | strncpy(mdsc->nodename, utsname()->nodename, |
3651 | sizeof(mdsc->nodename) - 1); | |
5f44f142 | 3652 | return 0; |
2f2dc053 SW |
3653 | } |
3654 | ||
3655 | /* | |
3656 | * Wait for safe replies on open mds requests. If we time out, drop | |
3657 | * all requests from the tree to avoid dangling dentry refs. | |
3658 | */ | |
3659 | static void wait_requests(struct ceph_mds_client *mdsc) | |
3660 | { | |
a319bf56 | 3661 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 | 3662 | struct ceph_mds_request *req; |
2f2dc053 SW |
3663 | |
3664 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3665 | if (__get_oldest_req(mdsc)) { |
2f2dc053 | 3666 | mutex_unlock(&mdsc->mutex); |
44ca18f2 | 3667 | |
2f2dc053 SW |
3668 | dout("wait_requests waiting for requests\n"); |
3669 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, | |
a319bf56 | 3670 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3671 | |
3672 | /* tear down remaining requests */ | |
44ca18f2 SW |
3673 | mutex_lock(&mdsc->mutex); |
3674 | while ((req = __get_oldest_req(mdsc))) { | |
2f2dc053 SW |
3675 | dout("wait_requests timed out on tid %llu\n", |
3676 | req->r_tid); | |
44ca18f2 | 3677 | __unregister_request(mdsc, req); |
2f2dc053 SW |
3678 | } |
3679 | } | |
3680 | mutex_unlock(&mdsc->mutex); | |
3681 | dout("wait_requests done\n"); | |
3682 | } | |
3683 | ||
3684 | /* | |
3685 | * called before mount is ro, and before dentries are torn down. | |
3686 | * (hmm, does this still race with new lookups?) | |
3687 | */ | |
3688 | void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |
3689 | { | |
3690 | dout("pre_umount\n"); | |
3691 | mdsc->stopping = 1; | |
3692 | ||
3693 | drop_leases(mdsc); | |
afcdaea3 | 3694 | ceph_flush_dirty_caps(mdsc); |
2f2dc053 | 3695 | wait_requests(mdsc); |
17c688c3 SW |
3696 | |
3697 | /* | |
3698 | * wait for reply handlers to drop their request refs and | |
3699 | * their inode/dcache refs | |
3700 | */ | |
3701 | ceph_msgr_flush(); | |
2f2dc053 SW |
3702 | } |
3703 | ||
3704 | /* | |
3705 | * wait for all write mds requests to flush. | |
3706 | */ | |
3707 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) | |
3708 | { | |
80fc7314 | 3709 | struct ceph_mds_request *req = NULL, *nextreq; |
44ca18f2 | 3710 | struct rb_node *n; |
2f2dc053 SW |
3711 | |
3712 | mutex_lock(&mdsc->mutex); | |
3713 | dout("wait_unsafe_requests want %lld\n", want_tid); | |
80fc7314 | 3714 | restart: |
44ca18f2 SW |
3715 | req = __get_oldest_req(mdsc); |
3716 | while (req && req->r_tid <= want_tid) { | |
80fc7314 SW |
3717 | /* find next request */ |
3718 | n = rb_next(&req->r_node); | |
3719 | if (n) | |
3720 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); | |
3721 | else | |
3722 | nextreq = NULL; | |
e8a7b8b1 YZ |
3723 | if (req->r_op != CEPH_MDS_OP_SETFILELOCK && |
3724 | (req->r_op & CEPH_MDS_OP_WRITE)) { | |
44ca18f2 SW |
3725 | /* write op */ |
3726 | ceph_mdsc_get_request(req); | |
80fc7314 SW |
3727 | if (nextreq) |
3728 | ceph_mdsc_get_request(nextreq); | |
44ca18f2 SW |
3729 | mutex_unlock(&mdsc->mutex); |
3730 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", | |
3731 | req->r_tid, want_tid); | |
3732 | wait_for_completion(&req->r_safe_completion); | |
3733 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3734 | ceph_mdsc_put_request(req); |
80fc7314 SW |
3735 | if (!nextreq) |
3736 | break; /* next dne before, so we're done! */ | |
3737 | if (RB_EMPTY_NODE(&nextreq->r_node)) { | |
3738 | /* next request was removed from tree */ | |
3739 | ceph_mdsc_put_request(nextreq); | |
3740 | goto restart; | |
3741 | } | |
3742 | ceph_mdsc_put_request(nextreq); /* won't go away */ | |
44ca18f2 | 3743 | } |
80fc7314 | 3744 | req = nextreq; |
2f2dc053 SW |
3745 | } |
3746 | mutex_unlock(&mdsc->mutex); | |
3747 | dout("wait_unsafe_requests done\n"); | |
3748 | } | |
3749 | ||
3750 | void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |
3751 | { | |
0e294387 | 3752 | u64 want_tid, want_flush; |
2f2dc053 | 3753 | |
52953d55 | 3754 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
56b7cf95 SW |
3755 | return; |
3756 | ||
2f2dc053 SW |
3757 | dout("sync\n"); |
3758 | mutex_lock(&mdsc->mutex); | |
3759 | want_tid = mdsc->last_tid; | |
2f2dc053 | 3760 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 3761 | |
afcdaea3 | 3762 | ceph_flush_dirty_caps(mdsc); |
d3383a8e | 3763 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 | 3764 | want_flush = mdsc->last_cap_flush_tid; |
c8799fc4 YZ |
3765 | if (!list_empty(&mdsc->cap_flush_list)) { |
3766 | struct ceph_cap_flush *cf = | |
3767 | list_last_entry(&mdsc->cap_flush_list, | |
3768 | struct ceph_cap_flush, g_list); | |
3769 | cf->wake = true; | |
3770 | } | |
d3383a8e YZ |
3771 | spin_unlock(&mdsc->cap_dirty_lock); |
3772 | ||
0e294387 YZ |
3773 | dout("sync want tid %lld flush_seq %lld\n", |
3774 | want_tid, want_flush); | |
2f2dc053 SW |
3775 | |
3776 | wait_unsafe_requests(mdsc, want_tid); | |
0e294387 | 3777 | wait_caps_flush(mdsc, want_flush); |
2f2dc053 SW |
3778 | } |
3779 | ||
f3c60c59 SW |
3780 | /* |
3781 | * true if all sessions are closed, or we force unmount | |
3782 | */ | |
fcff415c | 3783 | static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) |
f3c60c59 | 3784 | { |
52953d55 | 3785 | if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
f3c60c59 | 3786 | return true; |
fcff415c | 3787 | return atomic_read(&mdsc->num_sessions) <= skipped; |
f3c60c59 | 3788 | } |
2f2dc053 SW |
3789 | |
3790 | /* | |
3791 | * called after sb is ro. | |
3792 | */ | |
3793 | void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |
3794 | { | |
a319bf56 | 3795 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 SW |
3796 | struct ceph_mds_session *session; |
3797 | int i; | |
fcff415c | 3798 | int skipped = 0; |
2f2dc053 SW |
3799 | |
3800 | dout("close_sessions\n"); | |
3801 | ||
2f2dc053 | 3802 | /* close sessions */ |
f3c60c59 SW |
3803 | mutex_lock(&mdsc->mutex); |
3804 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3805 | session = __ceph_lookup_mds_session(mdsc, i); | |
3806 | if (!session) | |
3807 | continue; | |
2f2dc053 | 3808 | mutex_unlock(&mdsc->mutex); |
f3c60c59 | 3809 | mutex_lock(&session->s_mutex); |
fcff415c YZ |
3810 | if (__close_session(mdsc, session) <= 0) |
3811 | skipped++; | |
f3c60c59 SW |
3812 | mutex_unlock(&session->s_mutex); |
3813 | ceph_put_mds_session(session); | |
2f2dc053 SW |
3814 | mutex_lock(&mdsc->mutex); |
3815 | } | |
f3c60c59 SW |
3816 | mutex_unlock(&mdsc->mutex); |
3817 | ||
3818 | dout("waiting for sessions to close\n"); | |
fcff415c YZ |
3819 | wait_event_timeout(mdsc->session_close_wq, |
3820 | done_closing_sessions(mdsc, skipped), | |
a319bf56 | 3821 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3822 | |
3823 | /* tear down remaining sessions */ | |
f3c60c59 | 3824 | mutex_lock(&mdsc->mutex); |
2f2dc053 SW |
3825 | for (i = 0; i < mdsc->max_sessions; i++) { |
3826 | if (mdsc->sessions[i]) { | |
3827 | session = get_session(mdsc->sessions[i]); | |
2600d2dd | 3828 | __unregister_session(mdsc, session); |
2f2dc053 SW |
3829 | mutex_unlock(&mdsc->mutex); |
3830 | mutex_lock(&session->s_mutex); | |
3831 | remove_session_caps(session); | |
3832 | mutex_unlock(&session->s_mutex); | |
3833 | ceph_put_mds_session(session); | |
3834 | mutex_lock(&mdsc->mutex); | |
3835 | } | |
3836 | } | |
2f2dc053 | 3837 | WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
2f2dc053 SW |
3838 | mutex_unlock(&mdsc->mutex); |
3839 | ||
3840 | ceph_cleanup_empty_realms(mdsc); | |
3841 | ||
3842 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3843 | ||
3844 | dout("stopped\n"); | |
3845 | } | |
3846 | ||
48fec5d0 YZ |
3847 | void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) |
3848 | { | |
3849 | struct ceph_mds_session *session; | |
3850 | int mds; | |
3851 | ||
3852 | dout("force umount\n"); | |
3853 | ||
3854 | mutex_lock(&mdsc->mutex); | |
3855 | for (mds = 0; mds < mdsc->max_sessions; mds++) { | |
3856 | session = __ceph_lookup_mds_session(mdsc, mds); | |
3857 | if (!session) | |
3858 | continue; | |
3859 | mutex_unlock(&mdsc->mutex); | |
3860 | mutex_lock(&session->s_mutex); | |
3861 | __close_session(mdsc, session); | |
3862 | if (session->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3863 | cleanup_session_requests(mdsc, session); | |
3864 | remove_session_caps(session); | |
3865 | } | |
3866 | mutex_unlock(&session->s_mutex); | |
3867 | ceph_put_mds_session(session); | |
3868 | mutex_lock(&mdsc->mutex); | |
3869 | kick_requests(mdsc, mds); | |
3870 | } | |
3871 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3872 | mutex_unlock(&mdsc->mutex); | |
3873 | } | |
3874 | ||
3d14c5d2 | 3875 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
2f2dc053 SW |
3876 | { |
3877 | dout("stop\n"); | |
3878 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3879 | if (mdsc->mdsmap) | |
3880 | ceph_mdsmap_destroy(mdsc->mdsmap); | |
3881 | kfree(mdsc->sessions); | |
37151668 | 3882 | ceph_caps_finalize(mdsc); |
10183a69 | 3883 | ceph_pool_perm_destroy(mdsc); |
2f2dc053 SW |
3884 | } |
3885 | ||
3d14c5d2 YS |
3886 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) |
3887 | { | |
3888 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
ef550f6f | 3889 | dout("mdsc_destroy %p\n", mdsc); |
ef550f6f SW |
3890 | |
3891 | /* flush out any connection work with references to us */ | |
3892 | ceph_msgr_flush(); | |
3893 | ||
62a65f36 YZ |
3894 | ceph_mdsc_stop(mdsc); |
3895 | ||
3d14c5d2 YS |
3896 | fsc->mdsc = NULL; |
3897 | kfree(mdsc); | |
ef550f6f | 3898 | dout("mdsc_destroy %p done\n", mdsc); |
3d14c5d2 YS |
3899 | } |
3900 | ||
430afbad YZ |
3901 | void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
3902 | { | |
3903 | struct ceph_fs_client *fsc = mdsc->fsc; | |
3904 | const char *mds_namespace = fsc->mount_options->mds_namespace; | |
3905 | void *p = msg->front.iov_base; | |
3906 | void *end = p + msg->front.iov_len; | |
3907 | u32 epoch; | |
3908 | u32 map_len; | |
3909 | u32 num_fs; | |
3910 | u32 mount_fscid = (u32)-1; | |
3911 | u8 struct_v, struct_cv; | |
3912 | int err = -EINVAL; | |
3913 | ||
3914 | ceph_decode_need(&p, end, sizeof(u32), bad); | |
3915 | epoch = ceph_decode_32(&p); | |
3916 | ||
3917 | dout("handle_fsmap epoch %u\n", epoch); | |
3918 | ||
3919 | ceph_decode_need(&p, end, 2 + sizeof(u32), bad); | |
3920 | struct_v = ceph_decode_8(&p); | |
3921 | struct_cv = ceph_decode_8(&p); | |
3922 | map_len = ceph_decode_32(&p); | |
3923 | ||
3924 | ceph_decode_need(&p, end, sizeof(u32) * 3, bad); | |
3925 | p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */ | |
3926 | ||
3927 | num_fs = ceph_decode_32(&p); | |
3928 | while (num_fs-- > 0) { | |
3929 | void *info_p, *info_end; | |
3930 | u32 info_len; | |
3931 | u8 info_v, info_cv; | |
3932 | u32 fscid, namelen; | |
3933 | ||
3934 | ceph_decode_need(&p, end, 2 + sizeof(u32), bad); | |
3935 | info_v = ceph_decode_8(&p); | |
3936 | info_cv = ceph_decode_8(&p); | |
3937 | info_len = ceph_decode_32(&p); | |
3938 | ceph_decode_need(&p, end, info_len, bad); | |
3939 | info_p = p; | |
3940 | info_end = p + info_len; | |
3941 | p = info_end; | |
3942 | ||
3943 | ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad); | |
3944 | fscid = ceph_decode_32(&info_p); | |
3945 | namelen = ceph_decode_32(&info_p); | |
3946 | ceph_decode_need(&info_p, info_end, namelen, bad); | |
3947 | ||
3948 | if (mds_namespace && | |
3949 | strlen(mds_namespace) == namelen && | |
3950 | !strncmp(mds_namespace, (char *)info_p, namelen)) { | |
3951 | mount_fscid = fscid; | |
3952 | break; | |
3953 | } | |
3954 | } | |
3955 | ||
3956 | ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch); | |
3957 | if (mount_fscid != (u32)-1) { | |
3958 | fsc->client->monc.fs_cluster_id = mount_fscid; | |
3959 | ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, | |
3960 | 0, true); | |
3961 | ceph_monc_renew_subs(&fsc->client->monc); | |
3962 | } else { | |
3963 | err = -ENOENT; | |
3964 | goto err_out; | |
3965 | } | |
3966 | return; | |
76bd6ec4 | 3967 | |
430afbad YZ |
3968 | bad: |
3969 | pr_err("error decoding fsmap\n"); | |
3970 | err_out: | |
3971 | mutex_lock(&mdsc->mutex); | |
76bd6ec4 | 3972 | mdsc->mdsmap_err = err; |
430afbad YZ |
3973 | __wake_requests(mdsc, &mdsc->waiting_for_map); |
3974 | mutex_unlock(&mdsc->mutex); | |
430afbad | 3975 | } |
2f2dc053 SW |
3976 | |
3977 | /* | |
3978 | * handle mds map update. | |
3979 | */ | |
430afbad | 3980 | void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
2f2dc053 SW |
3981 | { |
3982 | u32 epoch; | |
3983 | u32 maplen; | |
3984 | void *p = msg->front.iov_base; | |
3985 | void *end = p + msg->front.iov_len; | |
3986 | struct ceph_mdsmap *newmap, *oldmap; | |
3987 | struct ceph_fsid fsid; | |
3988 | int err = -EINVAL; | |
3989 | ||
3990 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); | |
3991 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
3d14c5d2 | 3992 | if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
0743304d | 3993 | return; |
c89136ea SW |
3994 | epoch = ceph_decode_32(&p); |
3995 | maplen = ceph_decode_32(&p); | |
2f2dc053 SW |
3996 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); |
3997 | ||
3998 | /* do we need it? */ | |
2f2dc053 SW |
3999 | mutex_lock(&mdsc->mutex); |
4000 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { | |
4001 | dout("handle_map epoch %u <= our %u\n", | |
4002 | epoch, mdsc->mdsmap->m_epoch); | |
4003 | mutex_unlock(&mdsc->mutex); | |
4004 | return; | |
4005 | } | |
4006 | ||
4007 | newmap = ceph_mdsmap_decode(&p, end); | |
4008 | if (IS_ERR(newmap)) { | |
4009 | err = PTR_ERR(newmap); | |
4010 | goto bad_unlock; | |
4011 | } | |
4012 | ||
4013 | /* swap into place */ | |
4014 | if (mdsc->mdsmap) { | |
4015 | oldmap = mdsc->mdsmap; | |
4016 | mdsc->mdsmap = newmap; | |
4017 | check_new_map(mdsc, newmap, oldmap); | |
4018 | ceph_mdsmap_destroy(oldmap); | |
4019 | } else { | |
4020 | mdsc->mdsmap = newmap; /* first mds map */ | |
4021 | } | |
3d14c5d2 | 4022 | mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
2f2dc053 SW |
4023 | |
4024 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
82dcabad ID |
4025 | ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, |
4026 | mdsc->mdsmap->m_epoch); | |
2f2dc053 SW |
4027 | |
4028 | mutex_unlock(&mdsc->mutex); | |
4029 | schedule_delayed(mdsc); | |
4030 | return; | |
4031 | ||
4032 | bad_unlock: | |
4033 | mutex_unlock(&mdsc->mutex); | |
4034 | bad: | |
4035 | pr_err("error decoding mdsmap %d\n", err); | |
4036 | return; | |
4037 | } | |
4038 | ||
4039 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
4040 | { | |
4041 | struct ceph_mds_session *s = con->private; | |
4042 | ||
4043 | if (get_session(s)) { | |
3997c01d | 4044 | dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref)); |
2f2dc053 SW |
4045 | return con; |
4046 | } | |
4047 | dout("mdsc con_get %p FAIL\n", s); | |
4048 | return NULL; | |
4049 | } | |
4050 | ||
4051 | static void con_put(struct ceph_connection *con) | |
4052 | { | |
4053 | struct ceph_mds_session *s = con->private; | |
4054 | ||
3997c01d | 4055 | dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1); |
2f2dc053 SW |
4056 | ceph_put_mds_session(s); |
4057 | } | |
4058 | ||
4059 | /* | |
4060 | * if the client is unresponsive for long enough, the mds will kill | |
4061 | * the session entirely. | |
4062 | */ | |
4063 | static void peer_reset(struct ceph_connection *con) | |
4064 | { | |
4065 | struct ceph_mds_session *s = con->private; | |
7e70f0ed | 4066 | struct ceph_mds_client *mdsc = s->s_mdsc; |
2f2dc053 | 4067 | |
f3ae1b97 | 4068 | pr_warn("mds%d closed our session\n", s->s_mds); |
7e70f0ed | 4069 | send_mds_reconnect(mdsc, s); |
2f2dc053 SW |
4070 | } |
4071 | ||
4072 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
4073 | { | |
4074 | struct ceph_mds_session *s = con->private; | |
4075 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
4076 | int type = le16_to_cpu(msg->hdr.type); | |
4077 | ||
2600d2dd SW |
4078 | mutex_lock(&mdsc->mutex); |
4079 | if (__verify_registered_session(mdsc, s) < 0) { | |
4080 | mutex_unlock(&mdsc->mutex); | |
4081 | goto out; | |
4082 | } | |
4083 | mutex_unlock(&mdsc->mutex); | |
4084 | ||
2f2dc053 SW |
4085 | switch (type) { |
4086 | case CEPH_MSG_MDS_MAP: | |
430afbad YZ |
4087 | ceph_mdsc_handle_mdsmap(mdsc, msg); |
4088 | break; | |
4089 | case CEPH_MSG_FS_MAP_USER: | |
4090 | ceph_mdsc_handle_fsmap(mdsc, msg); | |
2f2dc053 SW |
4091 | break; |
4092 | case CEPH_MSG_CLIENT_SESSION: | |
4093 | handle_session(s, msg); | |
4094 | break; | |
4095 | case CEPH_MSG_CLIENT_REPLY: | |
4096 | handle_reply(s, msg); | |
4097 | break; | |
4098 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: | |
2600d2dd | 4099 | handle_forward(mdsc, s, msg); |
2f2dc053 SW |
4100 | break; |
4101 | case CEPH_MSG_CLIENT_CAPS: | |
4102 | ceph_handle_caps(s, msg); | |
4103 | break; | |
4104 | case CEPH_MSG_CLIENT_SNAP: | |
2600d2dd | 4105 | ceph_handle_snap(mdsc, s, msg); |
2f2dc053 SW |
4106 | break; |
4107 | case CEPH_MSG_CLIENT_LEASE: | |
2600d2dd | 4108 | handle_lease(mdsc, s, msg); |
2f2dc053 SW |
4109 | break; |
4110 | ||
4111 | default: | |
4112 | pr_err("received unknown message type %d %s\n", type, | |
4113 | ceph_msg_type_name(type)); | |
4114 | } | |
2600d2dd | 4115 | out: |
2f2dc053 SW |
4116 | ceph_msg_put(msg); |
4117 | } | |
4118 | ||
4e7a5dcd SW |
4119 | /* |
4120 | * authentication | |
4121 | */ | |
a3530df3 AE |
4122 | |
4123 | /* | |
4124 | * Note: returned pointer is the address of a structure that's | |
4125 | * managed separately. Caller must *not* attempt to free it. | |
4126 | */ | |
4127 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 4128 | int *proto, int force_new) |
4e7a5dcd SW |
4129 | { |
4130 | struct ceph_mds_session *s = con->private; | |
4131 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4132 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
74f1869f | 4133 | struct ceph_auth_handshake *auth = &s->s_auth; |
4e7a5dcd | 4134 | |
74f1869f | 4135 | if (force_new && auth->authorizer) { |
6c1ea260 | 4136 | ceph_auth_destroy_authorizer(auth->authorizer); |
74f1869f | 4137 | auth->authorizer = NULL; |
4e7a5dcd | 4138 | } |
27859f97 SW |
4139 | if (!auth->authorizer) { |
4140 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
4141 | auth); | |
0bed9b5c SW |
4142 | if (ret) |
4143 | return ERR_PTR(ret); | |
27859f97 SW |
4144 | } else { |
4145 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
4146 | auth); | |
a255651d | 4147 | if (ret) |
a3530df3 | 4148 | return ERR_PTR(ret); |
4e7a5dcd | 4149 | } |
4e7a5dcd | 4150 | *proto = ac->protocol; |
74f1869f | 4151 | |
a3530df3 | 4152 | return auth; |
4e7a5dcd SW |
4153 | } |
4154 | ||
4155 | ||
0dde5848 | 4156 | static int verify_authorizer_reply(struct ceph_connection *con) |
4e7a5dcd SW |
4157 | { |
4158 | struct ceph_mds_session *s = con->private; | |
4159 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4160 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd | 4161 | |
0dde5848 | 4162 | return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer); |
4e7a5dcd SW |
4163 | } |
4164 | ||
9bd2e6f8 SW |
4165 | static int invalidate_authorizer(struct ceph_connection *con) |
4166 | { | |
4167 | struct ceph_mds_session *s = con->private; | |
4168 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 4169 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
9bd2e6f8 | 4170 | |
27859f97 | 4171 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
9bd2e6f8 | 4172 | |
3d14c5d2 | 4173 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
9bd2e6f8 SW |
4174 | } |
4175 | ||
53ded495 AE |
4176 | static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, |
4177 | struct ceph_msg_header *hdr, int *skip) | |
4178 | { | |
4179 | struct ceph_msg *msg; | |
4180 | int type = (int) le16_to_cpu(hdr->type); | |
4181 | int front_len = (int) le32_to_cpu(hdr->front_len); | |
4182 | ||
4183 | if (con->in_msg) | |
4184 | return con->in_msg; | |
4185 | ||
4186 | *skip = 0; | |
4187 | msg = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
4188 | if (!msg) { | |
4189 | pr_err("unable to allocate msg type %d len %d\n", | |
4190 | type, front_len); | |
4191 | return NULL; | |
4192 | } | |
53ded495 AE |
4193 | |
4194 | return msg; | |
4195 | } | |
4196 | ||
79dbd1ba | 4197 | static int mds_sign_message(struct ceph_msg *msg) |
33d07337 | 4198 | { |
79dbd1ba | 4199 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 4200 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 4201 | |
33d07337 YZ |
4202 | return ceph_auth_sign_message(auth, msg); |
4203 | } | |
4204 | ||
79dbd1ba | 4205 | static int mds_check_message_signature(struct ceph_msg *msg) |
33d07337 | 4206 | { |
79dbd1ba | 4207 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 4208 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 4209 | |
33d07337 YZ |
4210 | return ceph_auth_check_message_signature(auth, msg); |
4211 | } | |
4212 | ||
9e32789f | 4213 | static const struct ceph_connection_operations mds_con_ops = { |
2f2dc053 SW |
4214 | .get = con_get, |
4215 | .put = con_put, | |
4216 | .dispatch = dispatch, | |
4e7a5dcd SW |
4217 | .get_authorizer = get_authorizer, |
4218 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 4219 | .invalidate_authorizer = invalidate_authorizer, |
2f2dc053 | 4220 | .peer_reset = peer_reset, |
53ded495 | 4221 | .alloc_msg = mds_alloc_msg, |
79dbd1ba ID |
4222 | .sign_message = mds_sign_message, |
4223 | .check_message_signature = mds_check_message_signature, | |
2f2dc053 SW |
4224 | }; |
4225 | ||
2f2dc053 | 4226 | /* eof */ |