]>
Commit | Line | Data |
---|---|---|
3d14c5d2 | 1 | #include <linux/ceph/ceph_debug.h> |
2f2dc053 | 2 | |
496e5955 | 3 | #include <linux/fs.h> |
2f2dc053 | 4 | #include <linux/wait.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
54008399 | 6 | #include <linux/gfp.h> |
2f2dc053 | 7 | #include <linux/sched.h> |
3d14c5d2 YS |
8 | #include <linux/debugfs.h> |
9 | #include <linux/seq_file.h> | |
dbd0c8bf | 10 | #include <linux/utsname.h> |
3e0708b9 | 11 | #include <linux/ratelimit.h> |
2f2dc053 | 12 | |
2f2dc053 | 13 | #include "super.h" |
3d14c5d2 YS |
14 | #include "mds_client.h" |
15 | ||
1fe60e51 | 16 | #include <linux/ceph/ceph_features.h> |
3d14c5d2 YS |
17 | #include <linux/ceph/messenger.h> |
18 | #include <linux/ceph/decode.h> | |
19 | #include <linux/ceph/pagelist.h> | |
20 | #include <linux/ceph/auth.h> | |
21 | #include <linux/ceph/debugfs.h> | |
2f2dc053 SW |
22 | |
23 | /* | |
24 | * A cluster of MDS (metadata server) daemons is responsible for | |
25 | * managing the file system namespace (the directory hierarchy and | |
26 | * inodes) and for coordinating shared access to storage. Metadata is | |
27 | * partitioning hierarchically across a number of servers, and that | |
28 | * partition varies over time as the cluster adjusts the distribution | |
29 | * in order to balance load. | |
30 | * | |
31 | * The MDS client is primarily responsible to managing synchronous | |
32 | * metadata requests for operations like open, unlink, and so forth. | |
33 | * If there is a MDS failure, we find out about it when we (possibly | |
34 | * request and) receive a new MDS map, and can resubmit affected | |
35 | * requests. | |
36 | * | |
37 | * For the most part, though, we take advantage of a lossless | |
38 | * communications channel to the MDS, and do not need to worry about | |
39 | * timing out or resubmitting requests. | |
40 | * | |
41 | * We maintain a stateful "session" with each MDS we interact with. | |
42 | * Within each session, we sent periodic heartbeat messages to ensure | |
43 | * any capabilities or leases we have been issues remain valid. If | |
44 | * the session times out and goes stale, our leases and capabilities | |
45 | * are no longer valid. | |
46 | */ | |
47 | ||
20cb34ae | 48 | struct ceph_reconnect_state { |
44c99757 | 49 | int nr_caps; |
20cb34ae SW |
50 | struct ceph_pagelist *pagelist; |
51 | bool flock; | |
52 | }; | |
53 | ||
2f2dc053 SW |
54 | static void __wake_requests(struct ceph_mds_client *mdsc, |
55 | struct list_head *head); | |
56 | ||
9e32789f | 57 | static const struct ceph_connection_operations mds_con_ops; |
2f2dc053 SW |
58 | |
59 | ||
60 | /* | |
61 | * mds reply parsing | |
62 | */ | |
63 | ||
64 | /* | |
65 | * parse individual inode info | |
66 | */ | |
67 | static int parse_reply_info_in(void **p, void *end, | |
14303d20 | 68 | struct ceph_mds_reply_info_in *info, |
12b4629a | 69 | u64 features) |
2f2dc053 SW |
70 | { |
71 | int err = -EIO; | |
72 | ||
73 | info->in = *p; | |
74 | *p += sizeof(struct ceph_mds_reply_inode) + | |
75 | sizeof(*info->in->fragtree.splits) * | |
76 | le32_to_cpu(info->in->fragtree.nsplits); | |
77 | ||
78 | ceph_decode_32_safe(p, end, info->symlink_len, bad); | |
79 | ceph_decode_need(p, end, info->symlink_len, bad); | |
80 | info->symlink = *p; | |
81 | *p += info->symlink_len; | |
82 | ||
14303d20 SW |
83 | if (features & CEPH_FEATURE_DIRLAYOUTHASH) |
84 | ceph_decode_copy_safe(p, end, &info->dir_layout, | |
85 | sizeof(info->dir_layout), bad); | |
86 | else | |
87 | memset(&info->dir_layout, 0, sizeof(info->dir_layout)); | |
88 | ||
2f2dc053 SW |
89 | ceph_decode_32_safe(p, end, info->xattr_len, bad); |
90 | ceph_decode_need(p, end, info->xattr_len, bad); | |
91 | info->xattr_data = *p; | |
92 | *p += info->xattr_len; | |
fb01d1f8 YZ |
93 | |
94 | if (features & CEPH_FEATURE_MDS_INLINE_DATA) { | |
95 | ceph_decode_64_safe(p, end, info->inline_version, bad); | |
96 | ceph_decode_32_safe(p, end, info->inline_len, bad); | |
97 | ceph_decode_need(p, end, info->inline_len, bad); | |
98 | info->inline_data = *p; | |
99 | *p += info->inline_len; | |
100 | } else | |
101 | info->inline_version = CEPH_INLINE_NONE; | |
102 | ||
5ea5c5e0 YZ |
103 | if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) { |
104 | ceph_decode_32_safe(p, end, info->pool_ns_len, bad); | |
105 | ceph_decode_need(p, end, info->pool_ns_len, bad); | |
106 | *p += info->pool_ns_len; | |
107 | } else { | |
108 | info->pool_ns_len = 0; | |
109 | } | |
110 | ||
2f2dc053 SW |
111 | return 0; |
112 | bad: | |
113 | return err; | |
114 | } | |
115 | ||
116 | /* | |
117 | * parse a normal reply, which may contain a (dir+)dentry and/or a | |
118 | * target inode. | |
119 | */ | |
120 | static int parse_reply_info_trace(void **p, void *end, | |
14303d20 | 121 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 122 | u64 features) |
2f2dc053 SW |
123 | { |
124 | int err; | |
125 | ||
126 | if (info->head->is_dentry) { | |
14303d20 | 127 | err = parse_reply_info_in(p, end, &info->diri, features); |
2f2dc053 SW |
128 | if (err < 0) |
129 | goto out_bad; | |
130 | ||
131 | if (unlikely(*p + sizeof(*info->dirfrag) > end)) | |
132 | goto bad; | |
133 | info->dirfrag = *p; | |
134 | *p += sizeof(*info->dirfrag) + | |
135 | sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); | |
136 | if (unlikely(*p > end)) | |
137 | goto bad; | |
138 | ||
139 | ceph_decode_32_safe(p, end, info->dname_len, bad); | |
140 | ceph_decode_need(p, end, info->dname_len, bad); | |
141 | info->dname = *p; | |
142 | *p += info->dname_len; | |
143 | info->dlease = *p; | |
144 | *p += sizeof(*info->dlease); | |
145 | } | |
146 | ||
147 | if (info->head->is_target) { | |
14303d20 | 148 | err = parse_reply_info_in(p, end, &info->targeti, features); |
2f2dc053 SW |
149 | if (err < 0) |
150 | goto out_bad; | |
151 | } | |
152 | ||
153 | if (unlikely(*p != end)) | |
154 | goto bad; | |
155 | return 0; | |
156 | ||
157 | bad: | |
158 | err = -EIO; | |
159 | out_bad: | |
160 | pr_err("problem parsing mds trace %d\n", err); | |
161 | return err; | |
162 | } | |
163 | ||
164 | /* | |
165 | * parse readdir results | |
166 | */ | |
167 | static int parse_reply_info_dir(void **p, void *end, | |
14303d20 | 168 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 169 | u64 features) |
2f2dc053 SW |
170 | { |
171 | u32 num, i = 0; | |
172 | int err; | |
173 | ||
174 | info->dir_dir = *p; | |
175 | if (*p + sizeof(*info->dir_dir) > end) | |
176 | goto bad; | |
177 | *p += sizeof(*info->dir_dir) + | |
178 | sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); | |
179 | if (*p > end) | |
180 | goto bad; | |
181 | ||
182 | ceph_decode_need(p, end, sizeof(num) + 2, bad); | |
c89136ea SW |
183 | num = ceph_decode_32(p); |
184 | info->dir_end = ceph_decode_8(p); | |
185 | info->dir_complete = ceph_decode_8(p); | |
2f2dc053 SW |
186 | if (num == 0) |
187 | goto done; | |
188 | ||
54008399 | 189 | BUG_ON(!info->dir_in); |
2f2dc053 SW |
190 | info->dir_dname = (void *)(info->dir_in + num); |
191 | info->dir_dname_len = (void *)(info->dir_dname + num); | |
192 | info->dir_dlease = (void *)(info->dir_dname_len + num); | |
54008399 YZ |
193 | if ((unsigned long)(info->dir_dlease + num) > |
194 | (unsigned long)info->dir_in + info->dir_buf_size) { | |
195 | pr_err("dir contents are larger than expected\n"); | |
196 | WARN_ON(1); | |
197 | goto bad; | |
198 | } | |
2f2dc053 | 199 | |
54008399 | 200 | info->dir_nr = num; |
2f2dc053 SW |
201 | while (num) { |
202 | /* dentry */ | |
203 | ceph_decode_need(p, end, sizeof(u32)*2, bad); | |
c89136ea | 204 | info->dir_dname_len[i] = ceph_decode_32(p); |
2f2dc053 SW |
205 | ceph_decode_need(p, end, info->dir_dname_len[i], bad); |
206 | info->dir_dname[i] = *p; | |
207 | *p += info->dir_dname_len[i]; | |
208 | dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i], | |
209 | info->dir_dname[i]); | |
210 | info->dir_dlease[i] = *p; | |
211 | *p += sizeof(struct ceph_mds_reply_lease); | |
212 | ||
213 | /* inode */ | |
14303d20 | 214 | err = parse_reply_info_in(p, end, &info->dir_in[i], features); |
2f2dc053 SW |
215 | if (err < 0) |
216 | goto out_bad; | |
217 | i++; | |
218 | num--; | |
219 | } | |
220 | ||
221 | done: | |
222 | if (*p != end) | |
223 | goto bad; | |
224 | return 0; | |
225 | ||
226 | bad: | |
227 | err = -EIO; | |
228 | out_bad: | |
229 | pr_err("problem parsing dir contents %d\n", err); | |
230 | return err; | |
231 | } | |
232 | ||
25933abd HS |
233 | /* |
234 | * parse fcntl F_GETLK results | |
235 | */ | |
236 | static int parse_reply_info_filelock(void **p, void *end, | |
14303d20 | 237 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 238 | u64 features) |
25933abd HS |
239 | { |
240 | if (*p + sizeof(*info->filelock_reply) > end) | |
241 | goto bad; | |
242 | ||
243 | info->filelock_reply = *p; | |
244 | *p += sizeof(*info->filelock_reply); | |
245 | ||
246 | if (unlikely(*p != end)) | |
247 | goto bad; | |
248 | return 0; | |
249 | ||
250 | bad: | |
251 | return -EIO; | |
252 | } | |
253 | ||
6e8575fa SL |
254 | /* |
255 | * parse create results | |
256 | */ | |
257 | static int parse_reply_info_create(void **p, void *end, | |
258 | struct ceph_mds_reply_info_parsed *info, | |
12b4629a | 259 | u64 features) |
6e8575fa SL |
260 | { |
261 | if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { | |
262 | if (*p == end) { | |
263 | info->has_create_ino = false; | |
264 | } else { | |
265 | info->has_create_ino = true; | |
266 | info->ino = ceph_decode_64(p); | |
267 | } | |
268 | } | |
269 | ||
270 | if (unlikely(*p != end)) | |
271 | goto bad; | |
272 | return 0; | |
273 | ||
274 | bad: | |
275 | return -EIO; | |
276 | } | |
277 | ||
25933abd HS |
278 | /* |
279 | * parse extra results | |
280 | */ | |
281 | static int parse_reply_info_extra(void **p, void *end, | |
14303d20 | 282 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 283 | u64 features) |
25933abd HS |
284 | { |
285 | if (info->head->op == CEPH_MDS_OP_GETFILELOCK) | |
14303d20 | 286 | return parse_reply_info_filelock(p, end, info, features); |
8a034497 YZ |
287 | else if (info->head->op == CEPH_MDS_OP_READDIR || |
288 | info->head->op == CEPH_MDS_OP_LSSNAP) | |
14303d20 | 289 | return parse_reply_info_dir(p, end, info, features); |
6e8575fa SL |
290 | else if (info->head->op == CEPH_MDS_OP_CREATE) |
291 | return parse_reply_info_create(p, end, info, features); | |
292 | else | |
293 | return -EIO; | |
25933abd HS |
294 | } |
295 | ||
2f2dc053 SW |
296 | /* |
297 | * parse entire mds reply | |
298 | */ | |
299 | static int parse_reply_info(struct ceph_msg *msg, | |
14303d20 | 300 | struct ceph_mds_reply_info_parsed *info, |
12b4629a | 301 | u64 features) |
2f2dc053 SW |
302 | { |
303 | void *p, *end; | |
304 | u32 len; | |
305 | int err; | |
306 | ||
307 | info->head = msg->front.iov_base; | |
308 | p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); | |
309 | end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); | |
310 | ||
311 | /* trace */ | |
312 | ceph_decode_32_safe(&p, end, len, bad); | |
313 | if (len > 0) { | |
32852a81 | 314 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 315 | err = parse_reply_info_trace(&p, p+len, info, features); |
2f2dc053 SW |
316 | if (err < 0) |
317 | goto out_bad; | |
318 | } | |
319 | ||
25933abd | 320 | /* extra */ |
2f2dc053 SW |
321 | ceph_decode_32_safe(&p, end, len, bad); |
322 | if (len > 0) { | |
32852a81 | 323 | ceph_decode_need(&p, end, len, bad); |
14303d20 | 324 | err = parse_reply_info_extra(&p, p+len, info, features); |
2f2dc053 SW |
325 | if (err < 0) |
326 | goto out_bad; | |
327 | } | |
328 | ||
329 | /* snap blob */ | |
330 | ceph_decode_32_safe(&p, end, len, bad); | |
331 | info->snapblob_len = len; | |
332 | info->snapblob = p; | |
333 | p += len; | |
334 | ||
335 | if (p != end) | |
336 | goto bad; | |
337 | return 0; | |
338 | ||
339 | bad: | |
340 | err = -EIO; | |
341 | out_bad: | |
342 | pr_err("mds parse_reply err %d\n", err); | |
343 | return err; | |
344 | } | |
345 | ||
346 | static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) | |
347 | { | |
54008399 YZ |
348 | if (!info->dir_in) |
349 | return; | |
350 | free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size)); | |
2f2dc053 SW |
351 | } |
352 | ||
353 | ||
354 | /* | |
355 | * sessions | |
356 | */ | |
a687ecaf | 357 | const char *ceph_session_state_name(int s) |
2f2dc053 SW |
358 | { |
359 | switch (s) { | |
360 | case CEPH_MDS_SESSION_NEW: return "new"; | |
361 | case CEPH_MDS_SESSION_OPENING: return "opening"; | |
362 | case CEPH_MDS_SESSION_OPEN: return "open"; | |
363 | case CEPH_MDS_SESSION_HUNG: return "hung"; | |
364 | case CEPH_MDS_SESSION_CLOSING: return "closing"; | |
44ca18f2 | 365 | case CEPH_MDS_SESSION_RESTARTING: return "restarting"; |
2f2dc053 SW |
366 | case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; |
367 | default: return "???"; | |
368 | } | |
369 | } | |
370 | ||
371 | static struct ceph_mds_session *get_session(struct ceph_mds_session *s) | |
372 | { | |
373 | if (atomic_inc_not_zero(&s->s_ref)) { | |
374 | dout("mdsc get_session %p %d -> %d\n", s, | |
375 | atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); | |
376 | return s; | |
377 | } else { | |
378 | dout("mdsc get_session %p 0 -- FAIL", s); | |
379 | return NULL; | |
380 | } | |
381 | } | |
382 | ||
383 | void ceph_put_mds_session(struct ceph_mds_session *s) | |
384 | { | |
385 | dout("mdsc put_session %p %d -> %d\n", s, | |
386 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); | |
4e7a5dcd | 387 | if (atomic_dec_and_test(&s->s_ref)) { |
6c4a1915 | 388 | if (s->s_auth.authorizer) |
27859f97 SW |
389 | ceph_auth_destroy_authorizer( |
390 | s->s_mdsc->fsc->client->monc.auth, | |
391 | s->s_auth.authorizer); | |
2f2dc053 | 392 | kfree(s); |
4e7a5dcd | 393 | } |
2f2dc053 SW |
394 | } |
395 | ||
396 | /* | |
397 | * called under mdsc->mutex | |
398 | */ | |
399 | struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, | |
400 | int mds) | |
401 | { | |
402 | struct ceph_mds_session *session; | |
403 | ||
404 | if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) | |
405 | return NULL; | |
406 | session = mdsc->sessions[mds]; | |
407 | dout("lookup_mds_session %p %d\n", session, | |
408 | atomic_read(&session->s_ref)); | |
409 | get_session(session); | |
410 | return session; | |
411 | } | |
412 | ||
413 | static bool __have_session(struct ceph_mds_client *mdsc, int mds) | |
414 | { | |
415 | if (mds >= mdsc->max_sessions) | |
416 | return false; | |
417 | return mdsc->sessions[mds]; | |
418 | } | |
419 | ||
2600d2dd SW |
420 | static int __verify_registered_session(struct ceph_mds_client *mdsc, |
421 | struct ceph_mds_session *s) | |
422 | { | |
423 | if (s->s_mds >= mdsc->max_sessions || | |
424 | mdsc->sessions[s->s_mds] != s) | |
425 | return -ENOENT; | |
426 | return 0; | |
427 | } | |
428 | ||
2f2dc053 SW |
429 | /* |
430 | * create+register a new session for given mds. | |
431 | * called under mdsc->mutex. | |
432 | */ | |
433 | static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, | |
434 | int mds) | |
435 | { | |
436 | struct ceph_mds_session *s; | |
437 | ||
c338c07c NY |
438 | if (mds >= mdsc->mdsmap->m_max_mds) |
439 | return ERR_PTR(-EINVAL); | |
440 | ||
2f2dc053 | 441 | s = kzalloc(sizeof(*s), GFP_NOFS); |
4736b009 DC |
442 | if (!s) |
443 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
444 | s->s_mdsc = mdsc; |
445 | s->s_mds = mds; | |
446 | s->s_state = CEPH_MDS_SESSION_NEW; | |
447 | s->s_ttl = 0; | |
448 | s->s_seq = 0; | |
449 | mutex_init(&s->s_mutex); | |
450 | ||
b7a9e5dd | 451 | ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); |
2f2dc053 | 452 | |
d8fb02ab | 453 | spin_lock_init(&s->s_gen_ttl_lock); |
2f2dc053 | 454 | s->s_cap_gen = 0; |
1ce208a6 | 455 | s->s_cap_ttl = jiffies - 1; |
d8fb02ab AE |
456 | |
457 | spin_lock_init(&s->s_cap_lock); | |
2f2dc053 SW |
458 | s->s_renew_requested = 0; |
459 | s->s_renew_seq = 0; | |
460 | INIT_LIST_HEAD(&s->s_caps); | |
461 | s->s_nr_caps = 0; | |
5dacf091 | 462 | s->s_trim_caps = 0; |
2f2dc053 SW |
463 | atomic_set(&s->s_ref, 1); |
464 | INIT_LIST_HEAD(&s->s_waiting); | |
465 | INIT_LIST_HEAD(&s->s_unsafe); | |
466 | s->s_num_cap_releases = 0; | |
99a9c273 | 467 | s->s_cap_reconnect = 0; |
7c1332b8 | 468 | s->s_cap_iterator = NULL; |
2f2dc053 | 469 | INIT_LIST_HEAD(&s->s_cap_releases); |
2f2dc053 SW |
470 | INIT_LIST_HEAD(&s->s_cap_flushing); |
471 | INIT_LIST_HEAD(&s->s_cap_snaps_flushing); | |
472 | ||
473 | dout("register_session mds%d\n", mds); | |
474 | if (mds >= mdsc->max_sessions) { | |
475 | int newmax = 1 << get_count_order(mds+1); | |
476 | struct ceph_mds_session **sa; | |
477 | ||
478 | dout("register_session realloc to %d\n", newmax); | |
479 | sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); | |
480 | if (sa == NULL) | |
42ce56e5 | 481 | goto fail_realloc; |
2f2dc053 SW |
482 | if (mdsc->sessions) { |
483 | memcpy(sa, mdsc->sessions, | |
484 | mdsc->max_sessions * sizeof(void *)); | |
485 | kfree(mdsc->sessions); | |
486 | } | |
487 | mdsc->sessions = sa; | |
488 | mdsc->max_sessions = newmax; | |
489 | } | |
490 | mdsc->sessions[mds] = s; | |
86d8f67b | 491 | atomic_inc(&mdsc->num_sessions); |
2f2dc053 | 492 | atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ |
42ce56e5 | 493 | |
b7a9e5dd SW |
494 | ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, |
495 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
42ce56e5 | 496 | |
2f2dc053 | 497 | return s; |
42ce56e5 SW |
498 | |
499 | fail_realloc: | |
500 | kfree(s); | |
501 | return ERR_PTR(-ENOMEM); | |
2f2dc053 SW |
502 | } |
503 | ||
504 | /* | |
505 | * called under mdsc->mutex | |
506 | */ | |
2600d2dd | 507 | static void __unregister_session(struct ceph_mds_client *mdsc, |
42ce56e5 | 508 | struct ceph_mds_session *s) |
2f2dc053 | 509 | { |
2600d2dd SW |
510 | dout("__unregister_session mds%d %p\n", s->s_mds, s); |
511 | BUG_ON(mdsc->sessions[s->s_mds] != s); | |
42ce56e5 SW |
512 | mdsc->sessions[s->s_mds] = NULL; |
513 | ceph_con_close(&s->s_con); | |
514 | ceph_put_mds_session(s); | |
86d8f67b | 515 | atomic_dec(&mdsc->num_sessions); |
2f2dc053 SW |
516 | } |
517 | ||
518 | /* | |
519 | * drop session refs in request. | |
520 | * | |
521 | * should be last request ref, or hold mdsc->mutex | |
522 | */ | |
523 | static void put_request_session(struct ceph_mds_request *req) | |
524 | { | |
525 | if (req->r_session) { | |
526 | ceph_put_mds_session(req->r_session); | |
527 | req->r_session = NULL; | |
528 | } | |
529 | } | |
530 | ||
153c8e6b | 531 | void ceph_mdsc_release_request(struct kref *kref) |
2f2dc053 | 532 | { |
153c8e6b SW |
533 | struct ceph_mds_request *req = container_of(kref, |
534 | struct ceph_mds_request, | |
535 | r_kref); | |
54008399 | 536 | destroy_reply_info(&req->r_reply_info); |
153c8e6b SW |
537 | if (req->r_request) |
538 | ceph_msg_put(req->r_request); | |
54008399 | 539 | if (req->r_reply) |
153c8e6b | 540 | ceph_msg_put(req->r_reply); |
153c8e6b | 541 | if (req->r_inode) { |
41b02e1f | 542 | ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); |
153c8e6b SW |
543 | iput(req->r_inode); |
544 | } | |
545 | if (req->r_locked_dir) | |
41b02e1f | 546 | ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); |
e96a650a | 547 | iput(req->r_target_inode); |
153c8e6b SW |
548 | if (req->r_dentry) |
549 | dput(req->r_dentry); | |
844d87c3 SW |
550 | if (req->r_old_dentry) |
551 | dput(req->r_old_dentry); | |
552 | if (req->r_old_dentry_dir) { | |
41b02e1f SW |
553 | /* |
554 | * track (and drop pins for) r_old_dentry_dir | |
555 | * separately, since r_old_dentry's d_parent may have | |
556 | * changed between the dir mutex being dropped and | |
557 | * this request being freed. | |
558 | */ | |
559 | ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), | |
560 | CEPH_CAP_PIN); | |
41b02e1f | 561 | iput(req->r_old_dentry_dir); |
2f2dc053 | 562 | } |
153c8e6b SW |
563 | kfree(req->r_path1); |
564 | kfree(req->r_path2); | |
25e6bae3 YZ |
565 | if (req->r_pagelist) |
566 | ceph_pagelist_release(req->r_pagelist); | |
153c8e6b | 567 | put_request_session(req); |
37151668 | 568 | ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); |
153c8e6b | 569 | kfree(req); |
2f2dc053 SW |
570 | } |
571 | ||
572 | /* | |
573 | * lookup session, bump ref if found. | |
574 | * | |
575 | * called under mdsc->mutex. | |
576 | */ | |
577 | static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, | |
578 | u64 tid) | |
579 | { | |
580 | struct ceph_mds_request *req; | |
44ca18f2 SW |
581 | struct rb_node *n = mdsc->request_tree.rb_node; |
582 | ||
583 | while (n) { | |
584 | req = rb_entry(n, struct ceph_mds_request, r_node); | |
585 | if (tid < req->r_tid) | |
586 | n = n->rb_left; | |
587 | else if (tid > req->r_tid) | |
588 | n = n->rb_right; | |
589 | else { | |
590 | ceph_mdsc_get_request(req); | |
591 | return req; | |
592 | } | |
593 | } | |
594 | return NULL; | |
595 | } | |
596 | ||
597 | static void __insert_request(struct ceph_mds_client *mdsc, | |
598 | struct ceph_mds_request *new) | |
599 | { | |
600 | struct rb_node **p = &mdsc->request_tree.rb_node; | |
601 | struct rb_node *parent = NULL; | |
602 | struct ceph_mds_request *req = NULL; | |
603 | ||
604 | while (*p) { | |
605 | parent = *p; | |
606 | req = rb_entry(parent, struct ceph_mds_request, r_node); | |
607 | if (new->r_tid < req->r_tid) | |
608 | p = &(*p)->rb_left; | |
609 | else if (new->r_tid > req->r_tid) | |
610 | p = &(*p)->rb_right; | |
611 | else | |
612 | BUG(); | |
613 | } | |
614 | ||
615 | rb_link_node(&new->r_node, parent, p); | |
616 | rb_insert_color(&new->r_node, &mdsc->request_tree); | |
2f2dc053 SW |
617 | } |
618 | ||
619 | /* | |
620 | * Register an in-flight request, and assign a tid. Link to directory | |
621 | * are modifying (if any). | |
622 | * | |
623 | * Called under mdsc->mutex. | |
624 | */ | |
625 | static void __register_request(struct ceph_mds_client *mdsc, | |
626 | struct ceph_mds_request *req, | |
627 | struct inode *dir) | |
628 | { | |
629 | req->r_tid = ++mdsc->last_tid; | |
630 | if (req->r_num_caps) | |
37151668 YS |
631 | ceph_reserve_caps(mdsc, &req->r_caps_reservation, |
632 | req->r_num_caps); | |
2f2dc053 SW |
633 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
634 | ceph_mdsc_get_request(req); | |
44ca18f2 | 635 | __insert_request(mdsc, req); |
2f2dc053 | 636 | |
cb4276cc SW |
637 | req->r_uid = current_fsuid(); |
638 | req->r_gid = current_fsgid(); | |
639 | ||
e8a7b8b1 YZ |
640 | if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) |
641 | mdsc->oldest_tid = req->r_tid; | |
642 | ||
2f2dc053 | 643 | if (dir) { |
3b663780 | 644 | ihold(dir); |
2f2dc053 | 645 | req->r_unsafe_dir = dir; |
2f2dc053 SW |
646 | } |
647 | } | |
648 | ||
649 | static void __unregister_request(struct ceph_mds_client *mdsc, | |
650 | struct ceph_mds_request *req) | |
651 | { | |
652 | dout("__unregister_request %p tid %lld\n", req, req->r_tid); | |
e8a7b8b1 YZ |
653 | |
654 | if (req->r_tid == mdsc->oldest_tid) { | |
655 | struct rb_node *p = rb_next(&req->r_node); | |
656 | mdsc->oldest_tid = 0; | |
657 | while (p) { | |
658 | struct ceph_mds_request *next_req = | |
659 | rb_entry(p, struct ceph_mds_request, r_node); | |
660 | if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { | |
661 | mdsc->oldest_tid = next_req->r_tid; | |
662 | break; | |
663 | } | |
664 | p = rb_next(p); | |
665 | } | |
666 | } | |
667 | ||
44ca18f2 | 668 | rb_erase(&req->r_node, &mdsc->request_tree); |
80fc7314 | 669 | RB_CLEAR_NODE(&req->r_node); |
2f2dc053 | 670 | |
4c06ace8 | 671 | if (req->r_unsafe_dir && req->r_got_unsafe) { |
2f2dc053 | 672 | struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); |
2f2dc053 SW |
673 | spin_lock(&ci->i_unsafe_lock); |
674 | list_del_init(&req->r_unsafe_dir_item); | |
675 | spin_unlock(&ci->i_unsafe_lock); | |
4c06ace8 | 676 | } |
68cd5b4b YZ |
677 | if (req->r_target_inode && req->r_got_unsafe) { |
678 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); | |
679 | spin_lock(&ci->i_unsafe_lock); | |
680 | list_del_init(&req->r_unsafe_target_item); | |
681 | spin_unlock(&ci->i_unsafe_lock); | |
682 | } | |
3b663780 | 683 | |
4c06ace8 | 684 | if (req->r_unsafe_dir) { |
3b663780 SW |
685 | iput(req->r_unsafe_dir); |
686 | req->r_unsafe_dir = NULL; | |
2f2dc053 | 687 | } |
94aa8ae1 | 688 | |
fc55d2c9 YZ |
689 | complete_all(&req->r_safe_completion); |
690 | ||
94aa8ae1 | 691 | ceph_mdsc_put_request(req); |
2f2dc053 SW |
692 | } |
693 | ||
694 | /* | |
695 | * Choose mds to send request to next. If there is a hint set in the | |
696 | * request (e.g., due to a prior forward hint from the mds), use that. | |
697 | * Otherwise, consult frag tree and/or caps to identify the | |
698 | * appropriate mds. If all else fails, choose randomly. | |
699 | * | |
700 | * Called under mdsc->mutex. | |
701 | */ | |
7fd7d101 | 702 | static struct dentry *get_nonsnap_parent(struct dentry *dentry) |
eb6bb1c5 | 703 | { |
d79698da SW |
704 | /* |
705 | * we don't need to worry about protecting the d_parent access | |
706 | * here because we never renaming inside the snapped namespace | |
707 | * except to resplice to another snapdir, and either the old or new | |
708 | * result is a valid result. | |
709 | */ | |
2b0143b5 | 710 | while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) |
eb6bb1c5 SW |
711 | dentry = dentry->d_parent; |
712 | return dentry; | |
713 | } | |
714 | ||
2f2dc053 SW |
715 | static int __choose_mds(struct ceph_mds_client *mdsc, |
716 | struct ceph_mds_request *req) | |
717 | { | |
718 | struct inode *inode; | |
719 | struct ceph_inode_info *ci; | |
720 | struct ceph_cap *cap; | |
721 | int mode = req->r_direct_mode; | |
722 | int mds = -1; | |
723 | u32 hash = req->r_direct_hash; | |
724 | bool is_hash = req->r_direct_is_hash; | |
725 | ||
726 | /* | |
727 | * is there a specific mds we should try? ignore hint if we have | |
728 | * no session and the mds is not up (active or recovering). | |
729 | */ | |
730 | if (req->r_resend_mds >= 0 && | |
731 | (__have_session(mdsc, req->r_resend_mds) || | |
732 | ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { | |
733 | dout("choose_mds using resend_mds mds%d\n", | |
734 | req->r_resend_mds); | |
735 | return req->r_resend_mds; | |
736 | } | |
737 | ||
738 | if (mode == USE_RANDOM_MDS) | |
739 | goto random; | |
740 | ||
741 | inode = NULL; | |
742 | if (req->r_inode) { | |
743 | inode = req->r_inode; | |
744 | } else if (req->r_dentry) { | |
d79698da SW |
745 | /* ignore race with rename; old or new d_parent is okay */ |
746 | struct dentry *parent = req->r_dentry->d_parent; | |
2b0143b5 | 747 | struct inode *dir = d_inode(parent); |
eb6bb1c5 | 748 | |
3d14c5d2 | 749 | if (dir->i_sb != mdsc->fsc->sb) { |
eb6bb1c5 | 750 | /* not this fs! */ |
2b0143b5 | 751 | inode = d_inode(req->r_dentry); |
eb6bb1c5 SW |
752 | } else if (ceph_snap(dir) != CEPH_NOSNAP) { |
753 | /* direct snapped/virtual snapdir requests | |
754 | * based on parent dir inode */ | |
d79698da | 755 | struct dentry *dn = get_nonsnap_parent(parent); |
2b0143b5 | 756 | inode = d_inode(dn); |
eb6bb1c5 | 757 | dout("__choose_mds using nonsnap parent %p\n", inode); |
ca18bede | 758 | } else { |
eb6bb1c5 | 759 | /* dentry target */ |
2b0143b5 | 760 | inode = d_inode(req->r_dentry); |
ca18bede YZ |
761 | if (!inode || mode == USE_AUTH_MDS) { |
762 | /* dir + name */ | |
763 | inode = dir; | |
764 | hash = ceph_dentry_hash(dir, req->r_dentry); | |
765 | is_hash = true; | |
766 | } | |
2f2dc053 SW |
767 | } |
768 | } | |
eb6bb1c5 | 769 | |
2f2dc053 SW |
770 | dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, |
771 | (int)hash, mode); | |
772 | if (!inode) | |
773 | goto random; | |
774 | ci = ceph_inode(inode); | |
775 | ||
776 | if (is_hash && S_ISDIR(inode->i_mode)) { | |
777 | struct ceph_inode_frag frag; | |
778 | int found; | |
779 | ||
780 | ceph_choose_frag(ci, hash, &frag, &found); | |
781 | if (found) { | |
782 | if (mode == USE_ANY_MDS && frag.ndist > 0) { | |
783 | u8 r; | |
784 | ||
785 | /* choose a random replica */ | |
786 | get_random_bytes(&r, 1); | |
787 | r %= frag.ndist; | |
788 | mds = frag.dist[r]; | |
789 | dout("choose_mds %p %llx.%llx " | |
790 | "frag %u mds%d (%d/%d)\n", | |
791 | inode, ceph_vinop(inode), | |
d66bbd44 | 792 | frag.frag, mds, |
2f2dc053 | 793 | (int)r, frag.ndist); |
d66bbd44 SW |
794 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
795 | CEPH_MDS_STATE_ACTIVE) | |
796 | return mds; | |
2f2dc053 SW |
797 | } |
798 | ||
799 | /* since this file/dir wasn't known to be | |
800 | * replicated, then we want to look for the | |
801 | * authoritative mds. */ | |
802 | mode = USE_AUTH_MDS; | |
803 | if (frag.mds >= 0) { | |
804 | /* choose auth mds */ | |
805 | mds = frag.mds; | |
806 | dout("choose_mds %p %llx.%llx " | |
807 | "frag %u mds%d (auth)\n", | |
808 | inode, ceph_vinop(inode), frag.frag, mds); | |
d66bbd44 SW |
809 | if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= |
810 | CEPH_MDS_STATE_ACTIVE) | |
811 | return mds; | |
2f2dc053 SW |
812 | } |
813 | } | |
814 | } | |
815 | ||
be655596 | 816 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
817 | cap = NULL; |
818 | if (mode == USE_AUTH_MDS) | |
819 | cap = ci->i_auth_cap; | |
820 | if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) | |
821 | cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); | |
822 | if (!cap) { | |
be655596 | 823 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
824 | goto random; |
825 | } | |
826 | mds = cap->session->s_mds; | |
827 | dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", | |
828 | inode, ceph_vinop(inode), mds, | |
829 | cap == ci->i_auth_cap ? "auth " : "", cap); | |
be655596 | 830 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
831 | return mds; |
832 | ||
833 | random: | |
834 | mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); | |
835 | dout("choose_mds chose random mds%d\n", mds); | |
836 | return mds; | |
837 | } | |
838 | ||
839 | ||
840 | /* | |
841 | * session messages | |
842 | */ | |
843 | static struct ceph_msg *create_session_msg(u32 op, u64 seq) | |
844 | { | |
845 | struct ceph_msg *msg; | |
846 | struct ceph_mds_session_head *h; | |
847 | ||
b61c2763 SW |
848 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, |
849 | false); | |
a79832f2 | 850 | if (!msg) { |
2f2dc053 | 851 | pr_err("create_session_msg ENOMEM creating msg\n"); |
a79832f2 | 852 | return NULL; |
2f2dc053 SW |
853 | } |
854 | h = msg->front.iov_base; | |
855 | h->op = cpu_to_le32(op); | |
856 | h->seq = cpu_to_le64(seq); | |
dbd0c8bf JS |
857 | |
858 | return msg; | |
859 | } | |
860 | ||
861 | /* | |
862 | * session message, specialization for CEPH_SESSION_REQUEST_OPEN | |
863 | * to include additional client metadata fields. | |
864 | */ | |
865 | static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) | |
866 | { | |
867 | struct ceph_msg *msg; | |
868 | struct ceph_mds_session_head *h; | |
869 | int i = -1; | |
870 | int metadata_bytes = 0; | |
871 | int metadata_key_count = 0; | |
872 | struct ceph_options *opt = mdsc->fsc->client->options; | |
873 | void *p; | |
874 | ||
a6a5ce4f | 875 | const char* metadata[][2] = { |
dbd0c8bf | 876 | {"hostname", utsname()->nodename}, |
a6a5ce4f | 877 | {"kernel_version", utsname()->release}, |
dbd0c8bf JS |
878 | {"entity_id", opt->name ? opt->name : ""}, |
879 | {NULL, NULL} | |
880 | }; | |
881 | ||
882 | /* Calculate serialized length of metadata */ | |
883 | metadata_bytes = 4; /* map length */ | |
884 | for (i = 0; metadata[i][0] != NULL; ++i) { | |
885 | metadata_bytes += 8 + strlen(metadata[i][0]) + | |
886 | strlen(metadata[i][1]); | |
887 | metadata_key_count++; | |
888 | } | |
889 | ||
890 | /* Allocate the message */ | |
891 | msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, | |
892 | GFP_NOFS, false); | |
893 | if (!msg) { | |
894 | pr_err("create_session_msg ENOMEM creating msg\n"); | |
895 | return NULL; | |
896 | } | |
897 | h = msg->front.iov_base; | |
898 | h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); | |
899 | h->seq = cpu_to_le64(seq); | |
900 | ||
901 | /* | |
902 | * Serialize client metadata into waiting buffer space, using | |
903 | * the format that userspace expects for map<string, string> | |
7cfa0313 JS |
904 | * |
905 | * ClientSession messages with metadata are v2 | |
dbd0c8bf | 906 | */ |
7cfa0313 JS |
907 | msg->hdr.version = cpu_to_le16(2); |
908 | msg->hdr.compat_version = cpu_to_le16(1); | |
dbd0c8bf JS |
909 | |
910 | /* The write pointer, following the session_head structure */ | |
911 | p = msg->front.iov_base + sizeof(*h); | |
912 | ||
913 | /* Number of entries in the map */ | |
914 | ceph_encode_32(&p, metadata_key_count); | |
915 | ||
916 | /* Two length-prefixed strings for each entry in the map */ | |
917 | for (i = 0; metadata[i][0] != NULL; ++i) { | |
918 | size_t const key_len = strlen(metadata[i][0]); | |
919 | size_t const val_len = strlen(metadata[i][1]); | |
920 | ||
921 | ceph_encode_32(&p, key_len); | |
922 | memcpy(p, metadata[i][0], key_len); | |
923 | p += key_len; | |
924 | ceph_encode_32(&p, val_len); | |
925 | memcpy(p, metadata[i][1], val_len); | |
926 | p += val_len; | |
927 | } | |
928 | ||
2f2dc053 SW |
929 | return msg; |
930 | } | |
931 | ||
932 | /* | |
933 | * send session open request. | |
934 | * | |
935 | * called under mdsc->mutex | |
936 | */ | |
937 | static int __open_session(struct ceph_mds_client *mdsc, | |
938 | struct ceph_mds_session *session) | |
939 | { | |
940 | struct ceph_msg *msg; | |
941 | int mstate; | |
942 | int mds = session->s_mds; | |
2f2dc053 SW |
943 | |
944 | /* wait for mds to go active? */ | |
945 | mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); | |
946 | dout("open_session to mds%d (%s)\n", mds, | |
947 | ceph_mds_state_name(mstate)); | |
948 | session->s_state = CEPH_MDS_SESSION_OPENING; | |
949 | session->s_renew_requested = jiffies; | |
950 | ||
951 | /* send connect message */ | |
dbd0c8bf | 952 | msg = create_session_open_msg(mdsc, session->s_seq); |
a79832f2 SW |
953 | if (!msg) |
954 | return -ENOMEM; | |
2f2dc053 | 955 | ceph_con_send(&session->s_con, msg); |
2f2dc053 SW |
956 | return 0; |
957 | } | |
958 | ||
ed0552a1 SW |
959 | /* |
960 | * open sessions for any export targets for the given mds | |
961 | * | |
962 | * called under mdsc->mutex | |
963 | */ | |
5d72d13c YZ |
964 | static struct ceph_mds_session * |
965 | __open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
966 | { | |
967 | struct ceph_mds_session *session; | |
968 | ||
969 | session = __ceph_lookup_mds_session(mdsc, target); | |
970 | if (!session) { | |
971 | session = register_session(mdsc, target); | |
972 | if (IS_ERR(session)) | |
973 | return session; | |
974 | } | |
975 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
976 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
977 | __open_session(mdsc, session); | |
978 | ||
979 | return session; | |
980 | } | |
981 | ||
982 | struct ceph_mds_session * | |
983 | ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) | |
984 | { | |
985 | struct ceph_mds_session *session; | |
986 | ||
987 | dout("open_export_target_session to mds%d\n", target); | |
988 | ||
989 | mutex_lock(&mdsc->mutex); | |
990 | session = __open_export_target_session(mdsc, target); | |
991 | mutex_unlock(&mdsc->mutex); | |
992 | ||
993 | return session; | |
994 | } | |
995 | ||
ed0552a1 SW |
996 | static void __open_export_target_sessions(struct ceph_mds_client *mdsc, |
997 | struct ceph_mds_session *session) | |
998 | { | |
999 | struct ceph_mds_info *mi; | |
1000 | struct ceph_mds_session *ts; | |
1001 | int i, mds = session->s_mds; | |
ed0552a1 SW |
1002 | |
1003 | if (mds >= mdsc->mdsmap->m_max_mds) | |
1004 | return; | |
5d72d13c | 1005 | |
ed0552a1 SW |
1006 | mi = &mdsc->mdsmap->m_info[mds]; |
1007 | dout("open_export_target_sessions for mds%d (%d targets)\n", | |
1008 | session->s_mds, mi->num_export_targets); | |
1009 | ||
1010 | for (i = 0; i < mi->num_export_targets; i++) { | |
5d72d13c YZ |
1011 | ts = __open_export_target_session(mdsc, mi->export_targets[i]); |
1012 | if (!IS_ERR(ts)) | |
1013 | ceph_put_mds_session(ts); | |
ed0552a1 SW |
1014 | } |
1015 | } | |
1016 | ||
154f42c2 SW |
1017 | void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, |
1018 | struct ceph_mds_session *session) | |
1019 | { | |
1020 | mutex_lock(&mdsc->mutex); | |
1021 | __open_export_target_sessions(mdsc, session); | |
1022 | mutex_unlock(&mdsc->mutex); | |
1023 | } | |
1024 | ||
2f2dc053 SW |
1025 | /* |
1026 | * session caps | |
1027 | */ | |
1028 | ||
745a8e3b YZ |
1029 | /* caller holds s_cap_lock, we drop it */ |
1030 | static void cleanup_cap_releases(struct ceph_mds_client *mdsc, | |
1031 | struct ceph_mds_session *session) | |
1032 | __releases(session->s_cap_lock) | |
2f2dc053 | 1033 | { |
745a8e3b YZ |
1034 | LIST_HEAD(tmp_list); |
1035 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1036 | session->s_num_cap_releases = 0; | |
1037 | spin_unlock(&session->s_cap_lock); | |
2f2dc053 | 1038 | |
745a8e3b YZ |
1039 | dout("cleanup_cap_releases mds%d\n", session->s_mds); |
1040 | while (!list_empty(&tmp_list)) { | |
1041 | struct ceph_cap *cap; | |
1042 | /* zero out the in-progress message */ | |
1043 | cap = list_first_entry(&tmp_list, | |
1044 | struct ceph_cap, session_caps); | |
1045 | list_del(&cap->session_caps); | |
1046 | ceph_put_cap(mdsc, cap); | |
2f2dc053 | 1047 | } |
2f2dc053 SW |
1048 | } |
1049 | ||
1c841a96 YZ |
1050 | static void cleanup_session_requests(struct ceph_mds_client *mdsc, |
1051 | struct ceph_mds_session *session) | |
1052 | { | |
1053 | struct ceph_mds_request *req; | |
1054 | struct rb_node *p; | |
1055 | ||
1056 | dout("cleanup_session_requests mds%d\n", session->s_mds); | |
1057 | mutex_lock(&mdsc->mutex); | |
1058 | while (!list_empty(&session->s_unsafe)) { | |
1059 | req = list_first_entry(&session->s_unsafe, | |
1060 | struct ceph_mds_request, r_unsafe_item); | |
1061 | list_del_init(&req->r_unsafe_item); | |
3e0708b9 YZ |
1062 | pr_warn_ratelimited(" dropping unsafe request %llu\n", |
1063 | req->r_tid); | |
1c841a96 YZ |
1064 | __unregister_request(mdsc, req); |
1065 | } | |
1066 | /* zero r_attempts, so kick_requests() will re-send requests */ | |
1067 | p = rb_first(&mdsc->request_tree); | |
1068 | while (p) { | |
1069 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
1070 | p = rb_next(p); | |
1071 | if (req->r_session && | |
1072 | req->r_session->s_mds == session->s_mds) | |
1073 | req->r_attempts = 0; | |
1074 | } | |
1075 | mutex_unlock(&mdsc->mutex); | |
1076 | } | |
1077 | ||
2f2dc053 | 1078 | /* |
f818a736 SW |
1079 | * Helper to safely iterate over all caps associated with a session, with |
1080 | * special care taken to handle a racing __ceph_remove_cap(). | |
2f2dc053 | 1081 | * |
f818a736 | 1082 | * Caller must hold session s_mutex. |
2f2dc053 SW |
1083 | */ |
1084 | static int iterate_session_caps(struct ceph_mds_session *session, | |
1085 | int (*cb)(struct inode *, struct ceph_cap *, | |
1086 | void *), void *arg) | |
1087 | { | |
7c1332b8 SW |
1088 | struct list_head *p; |
1089 | struct ceph_cap *cap; | |
1090 | struct inode *inode, *last_inode = NULL; | |
1091 | struct ceph_cap *old_cap = NULL; | |
2f2dc053 SW |
1092 | int ret; |
1093 | ||
1094 | dout("iterate_session_caps %p mds%d\n", session, session->s_mds); | |
1095 | spin_lock(&session->s_cap_lock); | |
7c1332b8 SW |
1096 | p = session->s_caps.next; |
1097 | while (p != &session->s_caps) { | |
1098 | cap = list_entry(p, struct ceph_cap, session_caps); | |
2f2dc053 | 1099 | inode = igrab(&cap->ci->vfs_inode); |
7c1332b8 SW |
1100 | if (!inode) { |
1101 | p = p->next; | |
2f2dc053 | 1102 | continue; |
7c1332b8 SW |
1103 | } |
1104 | session->s_cap_iterator = cap; | |
2f2dc053 | 1105 | spin_unlock(&session->s_cap_lock); |
7c1332b8 SW |
1106 | |
1107 | if (last_inode) { | |
1108 | iput(last_inode); | |
1109 | last_inode = NULL; | |
1110 | } | |
1111 | if (old_cap) { | |
37151668 | 1112 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 SW |
1113 | old_cap = NULL; |
1114 | } | |
1115 | ||
2f2dc053 | 1116 | ret = cb(inode, cap, arg); |
7c1332b8 SW |
1117 | last_inode = inode; |
1118 | ||
2f2dc053 | 1119 | spin_lock(&session->s_cap_lock); |
7c1332b8 SW |
1120 | p = p->next; |
1121 | if (cap->ci == NULL) { | |
1122 | dout("iterate_session_caps finishing cap %p removal\n", | |
1123 | cap); | |
1124 | BUG_ON(cap->session != session); | |
745a8e3b | 1125 | cap->session = NULL; |
7c1332b8 SW |
1126 | list_del_init(&cap->session_caps); |
1127 | session->s_nr_caps--; | |
745a8e3b YZ |
1128 | if (cap->queue_release) { |
1129 | list_add_tail(&cap->session_caps, | |
1130 | &session->s_cap_releases); | |
1131 | session->s_num_cap_releases++; | |
1132 | } else { | |
1133 | old_cap = cap; /* put_cap it w/o locks held */ | |
1134 | } | |
7c1332b8 | 1135 | } |
5dacf091 SW |
1136 | if (ret < 0) |
1137 | goto out; | |
2f2dc053 | 1138 | } |
5dacf091 SW |
1139 | ret = 0; |
1140 | out: | |
7c1332b8 | 1141 | session->s_cap_iterator = NULL; |
2f2dc053 | 1142 | spin_unlock(&session->s_cap_lock); |
7c1332b8 | 1143 | |
e96a650a | 1144 | iput(last_inode); |
7c1332b8 | 1145 | if (old_cap) |
37151668 | 1146 | ceph_put_cap(session->s_mdsc, old_cap); |
7c1332b8 | 1147 | |
5dacf091 | 1148 | return ret; |
2f2dc053 SW |
1149 | } |
1150 | ||
1151 | static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | |
6c99f254 | 1152 | void *arg) |
2f2dc053 SW |
1153 | { |
1154 | struct ceph_inode_info *ci = ceph_inode(inode); | |
553adfd9 | 1155 | LIST_HEAD(to_remove); |
6c99f254 SW |
1156 | int drop = 0; |
1157 | ||
2f2dc053 SW |
1158 | dout("removing cap %p, ci is %p, inode is %p\n", |
1159 | cap, ci, &ci->vfs_inode); | |
be655596 | 1160 | spin_lock(&ci->i_ceph_lock); |
a096b09a | 1161 | __ceph_remove_cap(cap, false); |
571ade33 | 1162 | if (!ci->i_auth_cap) { |
553adfd9 | 1163 | struct ceph_cap_flush *cf; |
6c99f254 | 1164 | struct ceph_mds_client *mdsc = |
3d14c5d2 | 1165 | ceph_sb_to_client(inode->i_sb)->mdsc; |
6c99f254 | 1166 | |
553adfd9 YZ |
1167 | while (true) { |
1168 | struct rb_node *n = rb_first(&ci->i_cap_flush_tree); | |
1169 | if (!n) | |
1170 | break; | |
1171 | cf = rb_entry(n, struct ceph_cap_flush, i_node); | |
1172 | rb_erase(&cf->i_node, &ci->i_cap_flush_tree); | |
1173 | list_add(&cf->list, &to_remove); | |
1174 | } | |
1175 | ||
6c99f254 | 1176 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 YZ |
1177 | |
1178 | list_for_each_entry(cf, &to_remove, list) | |
1179 | rb_erase(&cf->g_node, &mdsc->cap_flush_tree); | |
1180 | ||
6c99f254 | 1181 | if (!list_empty(&ci->i_dirty_item)) { |
3e0708b9 YZ |
1182 | pr_warn_ratelimited( |
1183 | " dropping dirty %s state for %p %lld\n", | |
6c99f254 SW |
1184 | ceph_cap_string(ci->i_dirty_caps), |
1185 | inode, ceph_ino(inode)); | |
1186 | ci->i_dirty_caps = 0; | |
1187 | list_del_init(&ci->i_dirty_item); | |
1188 | drop = 1; | |
1189 | } | |
1190 | if (!list_empty(&ci->i_flushing_item)) { | |
3e0708b9 YZ |
1191 | pr_warn_ratelimited( |
1192 | " dropping dirty+flushing %s state for %p %lld\n", | |
6c99f254 SW |
1193 | ceph_cap_string(ci->i_flushing_caps), |
1194 | inode, ceph_ino(inode)); | |
1195 | ci->i_flushing_caps = 0; | |
1196 | list_del_init(&ci->i_flushing_item); | |
1197 | mdsc->num_cap_flushing--; | |
1198 | drop = 1; | |
1199 | } | |
6c99f254 | 1200 | spin_unlock(&mdsc->cap_dirty_lock); |
553adfd9 | 1201 | |
f66fd9f0 YZ |
1202 | if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { |
1203 | list_add(&ci->i_prealloc_cap_flush->list, &to_remove); | |
1204 | ci->i_prealloc_cap_flush = NULL; | |
1205 | } | |
6c99f254 | 1206 | } |
be655596 | 1207 | spin_unlock(&ci->i_ceph_lock); |
553adfd9 YZ |
1208 | while (!list_empty(&to_remove)) { |
1209 | struct ceph_cap_flush *cf; | |
1210 | cf = list_first_entry(&to_remove, | |
1211 | struct ceph_cap_flush, list); | |
1212 | list_del(&cf->list); | |
f66fd9f0 | 1213 | ceph_free_cap_flush(cf); |
553adfd9 | 1214 | } |
6c99f254 SW |
1215 | while (drop--) |
1216 | iput(inode); | |
2f2dc053 SW |
1217 | return 0; |
1218 | } | |
1219 | ||
1220 | /* | |
1221 | * caller must hold session s_mutex | |
1222 | */ | |
1223 | static void remove_session_caps(struct ceph_mds_session *session) | |
1224 | { | |
1225 | dout("remove_session_caps on %p\n", session); | |
1226 | iterate_session_caps(session, remove_session_caps_cb, NULL); | |
6f60f889 YZ |
1227 | |
1228 | spin_lock(&session->s_cap_lock); | |
1229 | if (session->s_nr_caps > 0) { | |
1230 | struct super_block *sb = session->s_mdsc->fsc->sb; | |
1231 | struct inode *inode; | |
1232 | struct ceph_cap *cap, *prev = NULL; | |
1233 | struct ceph_vino vino; | |
1234 | /* | |
1235 | * iterate_session_caps() skips inodes that are being | |
1236 | * deleted, we need to wait until deletions are complete. | |
1237 | * __wait_on_freeing_inode() is designed for the job, | |
1238 | * but it is not exported, so use lookup inode function | |
1239 | * to access it. | |
1240 | */ | |
1241 | while (!list_empty(&session->s_caps)) { | |
1242 | cap = list_entry(session->s_caps.next, | |
1243 | struct ceph_cap, session_caps); | |
1244 | if (cap == prev) | |
1245 | break; | |
1246 | prev = cap; | |
1247 | vino = cap->ci->i_vino; | |
1248 | spin_unlock(&session->s_cap_lock); | |
1249 | ||
ed284c49 | 1250 | inode = ceph_find_inode(sb, vino); |
6f60f889 YZ |
1251 | iput(inode); |
1252 | ||
1253 | spin_lock(&session->s_cap_lock); | |
1254 | } | |
1255 | } | |
745a8e3b YZ |
1256 | |
1257 | // drop cap expires and unlock s_cap_lock | |
1258 | cleanup_cap_releases(session->s_mdsc, session); | |
6f60f889 | 1259 | |
2f2dc053 | 1260 | BUG_ON(session->s_nr_caps > 0); |
6c99f254 | 1261 | BUG_ON(!list_empty(&session->s_cap_flushing)); |
2f2dc053 SW |
1262 | } |
1263 | ||
1264 | /* | |
1265 | * wake up any threads waiting on this session's caps. if the cap is | |
1266 | * old (didn't get renewed on the client reconnect), remove it now. | |
1267 | * | |
1268 | * caller must hold s_mutex. | |
1269 | */ | |
1270 | static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, | |
1271 | void *arg) | |
1272 | { | |
0dc2570f SW |
1273 | struct ceph_inode_info *ci = ceph_inode(inode); |
1274 | ||
03066f23 | 1275 | wake_up_all(&ci->i_cap_wq); |
0dc2570f | 1276 | if (arg) { |
be655596 | 1277 | spin_lock(&ci->i_ceph_lock); |
0dc2570f SW |
1278 | ci->i_wanted_max_size = 0; |
1279 | ci->i_requested_max_size = 0; | |
be655596 | 1280 | spin_unlock(&ci->i_ceph_lock); |
0dc2570f | 1281 | } |
2f2dc053 SW |
1282 | return 0; |
1283 | } | |
1284 | ||
0dc2570f SW |
1285 | static void wake_up_session_caps(struct ceph_mds_session *session, |
1286 | int reconnect) | |
2f2dc053 SW |
1287 | { |
1288 | dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); | |
0dc2570f SW |
1289 | iterate_session_caps(session, wake_up_session_cb, |
1290 | (void *)(unsigned long)reconnect); | |
2f2dc053 SW |
1291 | } |
1292 | ||
1293 | /* | |
1294 | * Send periodic message to MDS renewing all currently held caps. The | |
1295 | * ack will reset the expiration for all caps from this session. | |
1296 | * | |
1297 | * caller holds s_mutex | |
1298 | */ | |
1299 | static int send_renew_caps(struct ceph_mds_client *mdsc, | |
1300 | struct ceph_mds_session *session) | |
1301 | { | |
1302 | struct ceph_msg *msg; | |
1303 | int state; | |
1304 | ||
1305 | if (time_after_eq(jiffies, session->s_cap_ttl) && | |
1306 | time_after_eq(session->s_cap_ttl, session->s_renew_requested)) | |
1307 | pr_info("mds%d caps stale\n", session->s_mds); | |
e4cb4cb8 | 1308 | session->s_renew_requested = jiffies; |
2f2dc053 SW |
1309 | |
1310 | /* do not try to renew caps until a recovering mds has reconnected | |
1311 | * with its clients. */ | |
1312 | state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); | |
1313 | if (state < CEPH_MDS_STATE_RECONNECT) { | |
1314 | dout("send_renew_caps ignoring mds%d (%s)\n", | |
1315 | session->s_mds, ceph_mds_state_name(state)); | |
1316 | return 0; | |
1317 | } | |
1318 | ||
1319 | dout("send_renew_caps to mds%d (%s)\n", session->s_mds, | |
1320 | ceph_mds_state_name(state)); | |
2f2dc053 SW |
1321 | msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, |
1322 | ++session->s_renew_seq); | |
a79832f2 SW |
1323 | if (!msg) |
1324 | return -ENOMEM; | |
2f2dc053 SW |
1325 | ceph_con_send(&session->s_con, msg); |
1326 | return 0; | |
1327 | } | |
1328 | ||
186e4f7a YZ |
1329 | static int send_flushmsg_ack(struct ceph_mds_client *mdsc, |
1330 | struct ceph_mds_session *session, u64 seq) | |
1331 | { | |
1332 | struct ceph_msg *msg; | |
1333 | ||
1334 | dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", | |
a687ecaf | 1335 | session->s_mds, ceph_session_state_name(session->s_state), seq); |
186e4f7a YZ |
1336 | msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); |
1337 | if (!msg) | |
1338 | return -ENOMEM; | |
1339 | ceph_con_send(&session->s_con, msg); | |
1340 | return 0; | |
1341 | } | |
1342 | ||
1343 | ||
2f2dc053 SW |
1344 | /* |
1345 | * Note new cap ttl, and any transition from stale -> not stale (fresh?). | |
0dc2570f SW |
1346 | * |
1347 | * Called under session->s_mutex | |
2f2dc053 SW |
1348 | */ |
1349 | static void renewed_caps(struct ceph_mds_client *mdsc, | |
1350 | struct ceph_mds_session *session, int is_renew) | |
1351 | { | |
1352 | int was_stale; | |
1353 | int wake = 0; | |
1354 | ||
1355 | spin_lock(&session->s_cap_lock); | |
1ce208a6 | 1356 | was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); |
2f2dc053 SW |
1357 | |
1358 | session->s_cap_ttl = session->s_renew_requested + | |
1359 | mdsc->mdsmap->m_session_timeout*HZ; | |
1360 | ||
1361 | if (was_stale) { | |
1362 | if (time_before(jiffies, session->s_cap_ttl)) { | |
1363 | pr_info("mds%d caps renewed\n", session->s_mds); | |
1364 | wake = 1; | |
1365 | } else { | |
1366 | pr_info("mds%d caps still stale\n", session->s_mds); | |
1367 | } | |
1368 | } | |
1369 | dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", | |
1370 | session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", | |
1371 | time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); | |
1372 | spin_unlock(&session->s_cap_lock); | |
1373 | ||
1374 | if (wake) | |
0dc2570f | 1375 | wake_up_session_caps(session, 0); |
2f2dc053 SW |
1376 | } |
1377 | ||
1378 | /* | |
1379 | * send a session close request | |
1380 | */ | |
1381 | static int request_close_session(struct ceph_mds_client *mdsc, | |
1382 | struct ceph_mds_session *session) | |
1383 | { | |
1384 | struct ceph_msg *msg; | |
2f2dc053 SW |
1385 | |
1386 | dout("request_close_session mds%d state %s seq %lld\n", | |
a687ecaf | 1387 | session->s_mds, ceph_session_state_name(session->s_state), |
2f2dc053 SW |
1388 | session->s_seq); |
1389 | msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); | |
a79832f2 SW |
1390 | if (!msg) |
1391 | return -ENOMEM; | |
1392 | ceph_con_send(&session->s_con, msg); | |
1393 | return 0; | |
2f2dc053 SW |
1394 | } |
1395 | ||
1396 | /* | |
1397 | * Called with s_mutex held. | |
1398 | */ | |
1399 | static int __close_session(struct ceph_mds_client *mdsc, | |
1400 | struct ceph_mds_session *session) | |
1401 | { | |
1402 | if (session->s_state >= CEPH_MDS_SESSION_CLOSING) | |
1403 | return 0; | |
1404 | session->s_state = CEPH_MDS_SESSION_CLOSING; | |
1405 | return request_close_session(mdsc, session); | |
1406 | } | |
1407 | ||
1408 | /* | |
1409 | * Trim old(er) caps. | |
1410 | * | |
1411 | * Because we can't cache an inode without one or more caps, we do | |
1412 | * this indirectly: if a cap is unused, we prune its aliases, at which | |
1413 | * point the inode will hopefully get dropped to. | |
1414 | * | |
1415 | * Yes, this is a bit sloppy. Our only real goal here is to respond to | |
1416 | * memory pressure from the MDS, though, so it needn't be perfect. | |
1417 | */ | |
1418 | static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) | |
1419 | { | |
1420 | struct ceph_mds_session *session = arg; | |
1421 | struct ceph_inode_info *ci = ceph_inode(inode); | |
979abfdd | 1422 | int used, wanted, oissued, mine; |
2f2dc053 SW |
1423 | |
1424 | if (session->s_trim_caps <= 0) | |
1425 | return -1; | |
1426 | ||
be655596 | 1427 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
1428 | mine = cap->issued | cap->implemented; |
1429 | used = __ceph_caps_used(ci); | |
979abfdd | 1430 | wanted = __ceph_caps_file_wanted(ci); |
2f2dc053 SW |
1431 | oissued = __ceph_caps_issued_other(ci, cap); |
1432 | ||
979abfdd | 1433 | dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", |
2f2dc053 | 1434 | inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), |
979abfdd YZ |
1435 | ceph_cap_string(used), ceph_cap_string(wanted)); |
1436 | if (cap == ci->i_auth_cap) { | |
622f3e25 YZ |
1437 | if (ci->i_dirty_caps || ci->i_flushing_caps || |
1438 | !list_empty(&ci->i_cap_snaps)) | |
979abfdd YZ |
1439 | goto out; |
1440 | if ((used | wanted) & CEPH_CAP_ANY_WR) | |
1441 | goto out; | |
1442 | } | |
5e804ac4 YZ |
1443 | /* The inode has cached pages, but it's no longer used. |
1444 | * we can safely drop it */ | |
1445 | if (wanted == 0 && used == CEPH_CAP_FILE_CACHE && | |
1446 | !(oissued & CEPH_CAP_FILE_CACHE)) { | |
1447 | used = 0; | |
1448 | oissued = 0; | |
1449 | } | |
979abfdd | 1450 | if ((used | wanted) & ~oissued & mine) |
2f2dc053 SW |
1451 | goto out; /* we need these caps */ |
1452 | ||
1453 | session->s_trim_caps--; | |
1454 | if (oissued) { | |
1455 | /* we aren't the only cap.. just remove us */ | |
a096b09a | 1456 | __ceph_remove_cap(cap, true); |
2f2dc053 | 1457 | } else { |
5e804ac4 | 1458 | /* try dropping referring dentries */ |
be655596 | 1459 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1460 | d_prune_aliases(inode); |
1461 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | |
1462 | inode, cap, atomic_read(&inode->i_count)); | |
1463 | return 0; | |
1464 | } | |
1465 | ||
1466 | out: | |
be655596 | 1467 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 SW |
1468 | return 0; |
1469 | } | |
1470 | ||
1471 | /* | |
1472 | * Trim session cap count down to some max number. | |
1473 | */ | |
1474 | static int trim_caps(struct ceph_mds_client *mdsc, | |
1475 | struct ceph_mds_session *session, | |
1476 | int max_caps) | |
1477 | { | |
1478 | int trim_caps = session->s_nr_caps - max_caps; | |
1479 | ||
1480 | dout("trim_caps mds%d start: %d / %d, trim %d\n", | |
1481 | session->s_mds, session->s_nr_caps, max_caps, trim_caps); | |
1482 | if (trim_caps > 0) { | |
1483 | session->s_trim_caps = trim_caps; | |
1484 | iterate_session_caps(session, trim_caps_cb, session); | |
1485 | dout("trim_caps mds%d done: %d / %d, trimmed %d\n", | |
1486 | session->s_mds, session->s_nr_caps, max_caps, | |
1487 | trim_caps - session->s_trim_caps); | |
5dacf091 | 1488 | session->s_trim_caps = 0; |
2f2dc053 | 1489 | } |
a56371d9 | 1490 | |
a56371d9 | 1491 | ceph_send_cap_releases(mdsc, session); |
2f2dc053 SW |
1492 | return 0; |
1493 | } | |
1494 | ||
8310b089 YZ |
1495 | static int check_capsnap_flush(struct ceph_inode_info *ci, |
1496 | u64 want_snap_seq) | |
d3383a8e | 1497 | { |
8310b089 | 1498 | int ret = 1; |
d3383a8e | 1499 | spin_lock(&ci->i_ceph_lock); |
affbc19a YZ |
1500 | if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) { |
1501 | struct ceph_cap_snap *capsnap = | |
1502 | list_first_entry(&ci->i_cap_snaps, | |
1503 | struct ceph_cap_snap, ci_item); | |
8310b089 | 1504 | ret = capsnap->follows >= want_snap_seq; |
affbc19a | 1505 | } |
d3383a8e | 1506 | spin_unlock(&ci->i_ceph_lock); |
8310b089 YZ |
1507 | return ret; |
1508 | } | |
1509 | ||
1510 | static int check_caps_flush(struct ceph_mds_client *mdsc, | |
1511 | u64 want_flush_tid) | |
1512 | { | |
1513 | struct rb_node *n; | |
1514 | struct ceph_cap_flush *cf; | |
1515 | int ret = 1; | |
1516 | ||
1517 | spin_lock(&mdsc->cap_dirty_lock); | |
1518 | n = rb_first(&mdsc->cap_flush_tree); | |
1519 | cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL; | |
1520 | if (cf && cf->tid <= want_flush_tid) { | |
1521 | dout("check_caps_flush still flushing tid %llu <= %llu\n", | |
1522 | cf->tid, want_flush_tid); | |
1523 | ret = 0; | |
1524 | } | |
1525 | spin_unlock(&mdsc->cap_dirty_lock); | |
1526 | return ret; | |
d3383a8e YZ |
1527 | } |
1528 | ||
2f2dc053 SW |
1529 | /* |
1530 | * flush all dirty inode data to disk. | |
1531 | * | |
8310b089 | 1532 | * returns true if we've flushed through want_flush_tid |
2f2dc053 | 1533 | */ |
affbc19a | 1534 | static void wait_caps_flush(struct ceph_mds_client *mdsc, |
8310b089 | 1535 | u64 want_flush_tid, u64 want_snap_seq) |
2f2dc053 | 1536 | { |
d3383a8e | 1537 | int mds; |
2f2dc053 | 1538 | |
8310b089 YZ |
1539 | dout("check_caps_flush want %llu snap want %llu\n", |
1540 | want_flush_tid, want_snap_seq); | |
2f2dc053 | 1541 | mutex_lock(&mdsc->mutex); |
affbc19a | 1542 | for (mds = 0; mds < mdsc->max_sessions; ) { |
2f2dc053 | 1543 | struct ceph_mds_session *session = mdsc->sessions[mds]; |
8310b089 | 1544 | struct inode *inode = NULL; |
2f2dc053 | 1545 | |
affbc19a YZ |
1546 | if (!session) { |
1547 | mds++; | |
2f2dc053 | 1548 | continue; |
affbc19a | 1549 | } |
2f2dc053 SW |
1550 | get_session(session); |
1551 | mutex_unlock(&mdsc->mutex); | |
1552 | ||
1553 | mutex_lock(&session->s_mutex); | |
affbc19a YZ |
1554 | if (!list_empty(&session->s_cap_snaps_flushing)) { |
1555 | struct ceph_cap_snap *capsnap = | |
1556 | list_first_entry(&session->s_cap_snaps_flushing, | |
1557 | struct ceph_cap_snap, | |
1558 | flushing_item); | |
1559 | struct ceph_inode_info *ci = capsnap->ci; | |
8310b089 | 1560 | if (!check_capsnap_flush(ci, want_snap_seq)) { |
affbc19a YZ |
1561 | dout("check_cap_flush still flushing snap %p " |
1562 | "follows %lld <= %lld to mds%d\n", | |
1563 | &ci->vfs_inode, capsnap->follows, | |
1564 | want_snap_seq, mds); | |
8310b089 | 1565 | inode = igrab(&ci->vfs_inode); |
2f2dc053 | 1566 | } |
2f2dc053 SW |
1567 | } |
1568 | mutex_unlock(&session->s_mutex); | |
1569 | ceph_put_mds_session(session); | |
1570 | ||
8310b089 | 1571 | if (inode) { |
affbc19a | 1572 | wait_event(mdsc->cap_flushing_wq, |
8310b089 YZ |
1573 | check_capsnap_flush(ceph_inode(inode), |
1574 | want_snap_seq)); | |
1575 | iput(inode); | |
1576 | } else { | |
affbc19a | 1577 | mds++; |
8310b089 | 1578 | } |
affbc19a | 1579 | |
2f2dc053 SW |
1580 | mutex_lock(&mdsc->mutex); |
1581 | } | |
2f2dc053 | 1582 | mutex_unlock(&mdsc->mutex); |
8310b089 YZ |
1583 | |
1584 | wait_event(mdsc->cap_flushing_wq, | |
1585 | check_caps_flush(mdsc, want_flush_tid)); | |
1586 | ||
1587 | dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); | |
2f2dc053 SW |
1588 | } |
1589 | ||
1590 | /* | |
1591 | * called under s_mutex | |
1592 | */ | |
3d7ded4d SW |
1593 | void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
1594 | struct ceph_mds_session *session) | |
2f2dc053 | 1595 | { |
745a8e3b YZ |
1596 | struct ceph_msg *msg = NULL; |
1597 | struct ceph_mds_cap_release *head; | |
1598 | struct ceph_mds_cap_item *item; | |
1599 | struct ceph_cap *cap; | |
1600 | LIST_HEAD(tmp_list); | |
1601 | int num_cap_releases; | |
2f2dc053 | 1602 | |
0f8605f2 | 1603 | spin_lock(&session->s_cap_lock); |
745a8e3b YZ |
1604 | again: |
1605 | list_splice_init(&session->s_cap_releases, &tmp_list); | |
1606 | num_cap_releases = session->s_num_cap_releases; | |
1607 | session->s_num_cap_releases = 0; | |
2f2dc053 | 1608 | spin_unlock(&session->s_cap_lock); |
e01a5946 | 1609 | |
745a8e3b YZ |
1610 | while (!list_empty(&tmp_list)) { |
1611 | if (!msg) { | |
1612 | msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, | |
1613 | PAGE_CACHE_SIZE, GFP_NOFS, false); | |
1614 | if (!msg) | |
1615 | goto out_err; | |
1616 | head = msg->front.iov_base; | |
1617 | head->num = cpu_to_le32(0); | |
1618 | msg->front.iov_len = sizeof(*head); | |
1619 | } | |
1620 | cap = list_first_entry(&tmp_list, struct ceph_cap, | |
1621 | session_caps); | |
1622 | list_del(&cap->session_caps); | |
1623 | num_cap_releases--; | |
e01a5946 | 1624 | |
00bd8edb | 1625 | head = msg->front.iov_base; |
745a8e3b YZ |
1626 | le32_add_cpu(&head->num, 1); |
1627 | item = msg->front.iov_base + msg->front.iov_len; | |
1628 | item->ino = cpu_to_le64(cap->cap_ino); | |
1629 | item->cap_id = cpu_to_le64(cap->cap_id); | |
1630 | item->migrate_seq = cpu_to_le32(cap->mseq); | |
1631 | item->seq = cpu_to_le32(cap->issue_seq); | |
1632 | msg->front.iov_len += sizeof(*item); | |
1633 | ||
1634 | ceph_put_cap(mdsc, cap); | |
1635 | ||
1636 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { | |
1637 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
1638 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1639 | ceph_con_send(&session->s_con, msg); | |
1640 | msg = NULL; | |
1641 | } | |
00bd8edb | 1642 | } |
e01a5946 | 1643 | |
745a8e3b | 1644 | BUG_ON(num_cap_releases != 0); |
e01a5946 | 1645 | |
745a8e3b YZ |
1646 | spin_lock(&session->s_cap_lock); |
1647 | if (!list_empty(&session->s_cap_releases)) | |
1648 | goto again; | |
1649 | spin_unlock(&session->s_cap_lock); | |
1650 | ||
1651 | if (msg) { | |
1652 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
1653 | dout("send_cap_releases mds%d %p\n", session->s_mds, msg); | |
1654 | ceph_con_send(&session->s_con, msg); | |
e01a5946 | 1655 | } |
745a8e3b YZ |
1656 | return; |
1657 | out_err: | |
1658 | pr_err("send_cap_releases mds%d, failed to allocate message\n", | |
1659 | session->s_mds); | |
1660 | spin_lock(&session->s_cap_lock); | |
1661 | list_splice(&tmp_list, &session->s_cap_releases); | |
1662 | session->s_num_cap_releases += num_cap_releases; | |
1663 | spin_unlock(&session->s_cap_lock); | |
e01a5946 SW |
1664 | } |
1665 | ||
2f2dc053 SW |
1666 | /* |
1667 | * requests | |
1668 | */ | |
1669 | ||
54008399 YZ |
1670 | int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, |
1671 | struct inode *dir) | |
1672 | { | |
1673 | struct ceph_inode_info *ci = ceph_inode(dir); | |
1674 | struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; | |
1675 | struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; | |
1676 | size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) + | |
1677 | sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease); | |
1678 | int order, num_entries; | |
1679 | ||
1680 | spin_lock(&ci->i_ceph_lock); | |
1681 | num_entries = ci->i_files + ci->i_subdirs; | |
1682 | spin_unlock(&ci->i_ceph_lock); | |
1683 | num_entries = max(num_entries, 1); | |
1684 | num_entries = min(num_entries, opt->max_readdir); | |
1685 | ||
1686 | order = get_order(size * num_entries); | |
1687 | while (order >= 0) { | |
687265e5 YZ |
1688 | rinfo->dir_in = (void*)__get_free_pages(GFP_KERNEL | |
1689 | __GFP_NOWARN, | |
54008399 YZ |
1690 | order); |
1691 | if (rinfo->dir_in) | |
1692 | break; | |
1693 | order--; | |
1694 | } | |
1695 | if (!rinfo->dir_in) | |
1696 | return -ENOMEM; | |
1697 | ||
1698 | num_entries = (PAGE_SIZE << order) / size; | |
1699 | num_entries = min(num_entries, opt->max_readdir); | |
1700 | ||
1701 | rinfo->dir_buf_size = PAGE_SIZE << order; | |
1702 | req->r_num_caps = num_entries + 1; | |
1703 | req->r_args.readdir.max_entries = cpu_to_le32(num_entries); | |
1704 | req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); | |
1705 | return 0; | |
1706 | } | |
1707 | ||
2f2dc053 SW |
1708 | /* |
1709 | * Create an mds request. | |
1710 | */ | |
1711 | struct ceph_mds_request * | |
1712 | ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) | |
1713 | { | |
1714 | struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); | |
1715 | ||
1716 | if (!req) | |
1717 | return ERR_PTR(-ENOMEM); | |
1718 | ||
b4556396 | 1719 | mutex_init(&req->r_fill_mutex); |
37151668 | 1720 | req->r_mdsc = mdsc; |
2f2dc053 SW |
1721 | req->r_started = jiffies; |
1722 | req->r_resend_mds = -1; | |
1723 | INIT_LIST_HEAD(&req->r_unsafe_dir_item); | |
68cd5b4b | 1724 | INIT_LIST_HEAD(&req->r_unsafe_target_item); |
2f2dc053 | 1725 | req->r_fmode = -1; |
153c8e6b | 1726 | kref_init(&req->r_kref); |
2f2dc053 SW |
1727 | INIT_LIST_HEAD(&req->r_wait); |
1728 | init_completion(&req->r_completion); | |
1729 | init_completion(&req->r_safe_completion); | |
1730 | INIT_LIST_HEAD(&req->r_unsafe_item); | |
1731 | ||
b8e69066 SW |
1732 | req->r_stamp = CURRENT_TIME; |
1733 | ||
2f2dc053 SW |
1734 | req->r_op = op; |
1735 | req->r_direct_mode = mode; | |
1736 | return req; | |
1737 | } | |
1738 | ||
1739 | /* | |
44ca18f2 | 1740 | * return oldest (lowest) request, tid in request tree, 0 if none. |
2f2dc053 SW |
1741 | * |
1742 | * called under mdsc->mutex. | |
1743 | */ | |
44ca18f2 SW |
1744 | static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) |
1745 | { | |
1746 | if (RB_EMPTY_ROOT(&mdsc->request_tree)) | |
1747 | return NULL; | |
1748 | return rb_entry(rb_first(&mdsc->request_tree), | |
1749 | struct ceph_mds_request, r_node); | |
1750 | } | |
1751 | ||
e8a7b8b1 | 1752 | static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) |
2f2dc053 | 1753 | { |
e8a7b8b1 | 1754 | return mdsc->oldest_tid; |
2f2dc053 SW |
1755 | } |
1756 | ||
1757 | /* | |
1758 | * Build a dentry's path. Allocate on heap; caller must kfree. Based | |
1759 | * on build_path_from_dentry in fs/cifs/dir.c. | |
1760 | * | |
1761 | * If @stop_on_nosnap, generate path relative to the first non-snapped | |
1762 | * inode. | |
1763 | * | |
1764 | * Encode hidden .snap dirs as a double /, i.e. | |
1765 | * foo/.snap/bar -> foo//bar | |
1766 | */ | |
1767 | char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, | |
1768 | int stop_on_nosnap) | |
1769 | { | |
1770 | struct dentry *temp; | |
1771 | char *path; | |
1772 | int len, pos; | |
1b71fe2e | 1773 | unsigned seq; |
2f2dc053 SW |
1774 | |
1775 | if (dentry == NULL) | |
1776 | return ERR_PTR(-EINVAL); | |
1777 | ||
1778 | retry: | |
1779 | len = 0; | |
1b71fe2e AV |
1780 | seq = read_seqbegin(&rename_lock); |
1781 | rcu_read_lock(); | |
2f2dc053 | 1782 | for (temp = dentry; !IS_ROOT(temp);) { |
2b0143b5 | 1783 | struct inode *inode = d_inode(temp); |
2f2dc053 SW |
1784 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) |
1785 | len++; /* slash only */ | |
1786 | else if (stop_on_nosnap && inode && | |
1787 | ceph_snap(inode) == CEPH_NOSNAP) | |
1788 | break; | |
1789 | else | |
1790 | len += 1 + temp->d_name.len; | |
1791 | temp = temp->d_parent; | |
2f2dc053 | 1792 | } |
1b71fe2e | 1793 | rcu_read_unlock(); |
2f2dc053 SW |
1794 | if (len) |
1795 | len--; /* no leading '/' */ | |
1796 | ||
1797 | path = kmalloc(len+1, GFP_NOFS); | |
1798 | if (path == NULL) | |
1799 | return ERR_PTR(-ENOMEM); | |
1800 | pos = len; | |
1801 | path[pos] = 0; /* trailing null */ | |
1b71fe2e | 1802 | rcu_read_lock(); |
2f2dc053 | 1803 | for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { |
1b71fe2e | 1804 | struct inode *inode; |
2f2dc053 | 1805 | |
1b71fe2e | 1806 | spin_lock(&temp->d_lock); |
2b0143b5 | 1807 | inode = d_inode(temp); |
2f2dc053 | 1808 | if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { |
104648ad | 1809 | dout("build_path path+%d: %p SNAPDIR\n", |
2f2dc053 SW |
1810 | pos, temp); |
1811 | } else if (stop_on_nosnap && inode && | |
1812 | ceph_snap(inode) == CEPH_NOSNAP) { | |
9d5a09e6 | 1813 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1814 | break; |
1815 | } else { | |
1816 | pos -= temp->d_name.len; | |
1b71fe2e AV |
1817 | if (pos < 0) { |
1818 | spin_unlock(&temp->d_lock); | |
2f2dc053 | 1819 | break; |
1b71fe2e | 1820 | } |
2f2dc053 SW |
1821 | strncpy(path + pos, temp->d_name.name, |
1822 | temp->d_name.len); | |
2f2dc053 | 1823 | } |
1b71fe2e | 1824 | spin_unlock(&temp->d_lock); |
2f2dc053 SW |
1825 | if (pos) |
1826 | path[--pos] = '/'; | |
1827 | temp = temp->d_parent; | |
2f2dc053 | 1828 | } |
1b71fe2e AV |
1829 | rcu_read_unlock(); |
1830 | if (pos != 0 || read_seqretry(&rename_lock, seq)) { | |
104648ad | 1831 | pr_err("build_path did not end path lookup where " |
2f2dc053 SW |
1832 | "expected, namelen is %d, pos is %d\n", len, pos); |
1833 | /* presumably this is only possible if racing with a | |
1834 | rename of one of the parent directories (we can not | |
1835 | lock the dentries above us to prevent this, but | |
1836 | retrying should be harmless) */ | |
1837 | kfree(path); | |
1838 | goto retry; | |
1839 | } | |
1840 | ||
2b0143b5 | 1841 | *base = ceph_ino(d_inode(temp)); |
2f2dc053 | 1842 | *plen = len; |
104648ad | 1843 | dout("build_path on %p %d built %llx '%.*s'\n", |
84d08fa8 | 1844 | dentry, d_count(dentry), *base, len, path); |
2f2dc053 SW |
1845 | return path; |
1846 | } | |
1847 | ||
1848 | static int build_dentry_path(struct dentry *dentry, | |
1849 | const char **ppath, int *ppathlen, u64 *pino, | |
1850 | int *pfreepath) | |
1851 | { | |
1852 | char *path; | |
1853 | ||
2b0143b5 DH |
1854 | if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) { |
1855 | *pino = ceph_ino(d_inode(dentry->d_parent)); | |
2f2dc053 SW |
1856 | *ppath = dentry->d_name.name; |
1857 | *ppathlen = dentry->d_name.len; | |
1858 | return 0; | |
1859 | } | |
1860 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1861 | if (IS_ERR(path)) | |
1862 | return PTR_ERR(path); | |
1863 | *ppath = path; | |
1864 | *pfreepath = 1; | |
1865 | return 0; | |
1866 | } | |
1867 | ||
1868 | static int build_inode_path(struct inode *inode, | |
1869 | const char **ppath, int *ppathlen, u64 *pino, | |
1870 | int *pfreepath) | |
1871 | { | |
1872 | struct dentry *dentry; | |
1873 | char *path; | |
1874 | ||
1875 | if (ceph_snap(inode) == CEPH_NOSNAP) { | |
1876 | *pino = ceph_ino(inode); | |
1877 | *ppathlen = 0; | |
1878 | return 0; | |
1879 | } | |
1880 | dentry = d_find_alias(inode); | |
1881 | path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); | |
1882 | dput(dentry); | |
1883 | if (IS_ERR(path)) | |
1884 | return PTR_ERR(path); | |
1885 | *ppath = path; | |
1886 | *pfreepath = 1; | |
1887 | return 0; | |
1888 | } | |
1889 | ||
1890 | /* | |
1891 | * request arguments may be specified via an inode *, a dentry *, or | |
1892 | * an explicit ino+path. | |
1893 | */ | |
1894 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | |
1895 | const char *rpath, u64 rino, | |
1896 | const char **ppath, int *pathlen, | |
1897 | u64 *ino, int *freepath) | |
1898 | { | |
1899 | int r = 0; | |
1900 | ||
1901 | if (rinode) { | |
1902 | r = build_inode_path(rinode, ppath, pathlen, ino, freepath); | |
1903 | dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), | |
1904 | ceph_snap(rinode)); | |
1905 | } else if (rdentry) { | |
1906 | r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); | |
1907 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, | |
1908 | *ppath); | |
795858db | 1909 | } else if (rpath || rino) { |
2f2dc053 SW |
1910 | *ino = rino; |
1911 | *ppath = rpath; | |
b000056a | 1912 | *pathlen = rpath ? strlen(rpath) : 0; |
2f2dc053 SW |
1913 | dout(" path %.*s\n", *pathlen, rpath); |
1914 | } | |
1915 | ||
1916 | return r; | |
1917 | } | |
1918 | ||
1919 | /* | |
1920 | * called under mdsc->mutex | |
1921 | */ | |
1922 | static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | |
1923 | struct ceph_mds_request *req, | |
6e6f0923 | 1924 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
1925 | { |
1926 | struct ceph_msg *msg; | |
1927 | struct ceph_mds_request_head *head; | |
1928 | const char *path1 = NULL; | |
1929 | const char *path2 = NULL; | |
1930 | u64 ino1 = 0, ino2 = 0; | |
1931 | int pathlen1 = 0, pathlen2 = 0; | |
1932 | int freepath1 = 0, freepath2 = 0; | |
1933 | int len; | |
1934 | u16 releases; | |
1935 | void *p, *end; | |
1936 | int ret; | |
1937 | ||
1938 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | |
1939 | req->r_path1, req->r_ino1.ino, | |
1940 | &path1, &pathlen1, &ino1, &freepath1); | |
1941 | if (ret < 0) { | |
1942 | msg = ERR_PTR(ret); | |
1943 | goto out; | |
1944 | } | |
1945 | ||
1946 | ret = set_request_path_attr(NULL, req->r_old_dentry, | |
1947 | req->r_path2, req->r_ino2.ino, | |
1948 | &path2, &pathlen2, &ino2, &freepath2); | |
1949 | if (ret < 0) { | |
1950 | msg = ERR_PTR(ret); | |
1951 | goto out_free1; | |
1952 | } | |
1953 | ||
1954 | len = sizeof(*head) + | |
b8e69066 | 1955 | pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + |
777d738a | 1956 | sizeof(struct ceph_timespec); |
2f2dc053 SW |
1957 | |
1958 | /* calculate (max) length for cap releases */ | |
1959 | len += sizeof(struct ceph_mds_request_release) * | |
1960 | (!!req->r_inode_drop + !!req->r_dentry_drop + | |
1961 | !!req->r_old_inode_drop + !!req->r_old_dentry_drop); | |
1962 | if (req->r_dentry_drop) | |
1963 | len += req->r_dentry->d_name.len; | |
1964 | if (req->r_old_dentry_drop) | |
1965 | len += req->r_old_dentry->d_name.len; | |
1966 | ||
b61c2763 | 1967 | msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); |
a79832f2 SW |
1968 | if (!msg) { |
1969 | msg = ERR_PTR(-ENOMEM); | |
2f2dc053 | 1970 | goto out_free2; |
a79832f2 | 1971 | } |
2f2dc053 | 1972 | |
7cfa0313 | 1973 | msg->hdr.version = cpu_to_le16(2); |
6df058c0 SW |
1974 | msg->hdr.tid = cpu_to_le64(req->r_tid); |
1975 | ||
2f2dc053 SW |
1976 | head = msg->front.iov_base; |
1977 | p = msg->front.iov_base + sizeof(*head); | |
1978 | end = msg->front.iov_base + msg->front.iov_len; | |
1979 | ||
1980 | head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); | |
1981 | head->op = cpu_to_le32(req->r_op); | |
ff3d0046 EB |
1982 | head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); |
1983 | head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); | |
2f2dc053 SW |
1984 | head->args = req->r_args; |
1985 | ||
1986 | ceph_encode_filepath(&p, end, ino1, path1); | |
1987 | ceph_encode_filepath(&p, end, ino2, path2); | |
1988 | ||
e979cf50 SW |
1989 | /* make note of release offset, in case we need to replay */ |
1990 | req->r_request_release_offset = p - msg->front.iov_base; | |
1991 | ||
2f2dc053 SW |
1992 | /* cap releases */ |
1993 | releases = 0; | |
1994 | if (req->r_inode_drop) | |
1995 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 1996 | req->r_inode ? req->r_inode : d_inode(req->r_dentry), |
2f2dc053 SW |
1997 | mds, req->r_inode_drop, req->r_inode_unless, 0); |
1998 | if (req->r_dentry_drop) | |
1999 | releases += ceph_encode_dentry_release(&p, req->r_dentry, | |
2000 | mds, req->r_dentry_drop, req->r_dentry_unless); | |
2001 | if (req->r_old_dentry_drop) | |
2002 | releases += ceph_encode_dentry_release(&p, req->r_old_dentry, | |
2003 | mds, req->r_old_dentry_drop, req->r_old_dentry_unless); | |
2004 | if (req->r_old_inode_drop) | |
2005 | releases += ceph_encode_inode_release(&p, | |
2b0143b5 | 2006 | d_inode(req->r_old_dentry), |
2f2dc053 | 2007 | mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); |
6e6f0923 YZ |
2008 | |
2009 | if (drop_cap_releases) { | |
2010 | releases = 0; | |
2011 | p = msg->front.iov_base + req->r_request_release_offset; | |
2012 | } | |
2013 | ||
2f2dc053 SW |
2014 | head->num_releases = cpu_to_le16(releases); |
2015 | ||
b8e69066 | 2016 | /* time stamp */ |
1f041a89 YZ |
2017 | { |
2018 | struct ceph_timespec ts; | |
2019 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2020 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2021 | } | |
b8e69066 | 2022 | |
2f2dc053 SW |
2023 | BUG_ON(p > end); |
2024 | msg->front.iov_len = p - msg->front.iov_base; | |
2025 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
2026 | ||
25e6bae3 YZ |
2027 | if (req->r_pagelist) { |
2028 | struct ceph_pagelist *pagelist = req->r_pagelist; | |
2029 | atomic_inc(&pagelist->refcnt); | |
2030 | ceph_msg_data_add_pagelist(msg, pagelist); | |
2031 | msg->hdr.data_len = cpu_to_le32(pagelist->length); | |
2032 | } else { | |
2033 | msg->hdr.data_len = 0; | |
ebf18f47 | 2034 | } |
02afca6c | 2035 | |
2f2dc053 SW |
2036 | msg->hdr.data_off = cpu_to_le16(0); |
2037 | ||
2038 | out_free2: | |
2039 | if (freepath2) | |
2040 | kfree((char *)path2); | |
2041 | out_free1: | |
2042 | if (freepath1) | |
2043 | kfree((char *)path1); | |
2044 | out: | |
2045 | return msg; | |
2046 | } | |
2047 | ||
2048 | /* | |
2049 | * called under mdsc->mutex if error, under no mutex if | |
2050 | * success. | |
2051 | */ | |
2052 | static void complete_request(struct ceph_mds_client *mdsc, | |
2053 | struct ceph_mds_request *req) | |
2054 | { | |
2055 | if (req->r_callback) | |
2056 | req->r_callback(mdsc, req); | |
2057 | else | |
03066f23 | 2058 | complete_all(&req->r_completion); |
2f2dc053 SW |
2059 | } |
2060 | ||
2061 | /* | |
2062 | * called under mdsc->mutex | |
2063 | */ | |
2064 | static int __prepare_send_request(struct ceph_mds_client *mdsc, | |
2065 | struct ceph_mds_request *req, | |
6e6f0923 | 2066 | int mds, bool drop_cap_releases) |
2f2dc053 SW |
2067 | { |
2068 | struct ceph_mds_request_head *rhead; | |
2069 | struct ceph_msg *msg; | |
2070 | int flags = 0; | |
2071 | ||
2f2dc053 | 2072 | req->r_attempts++; |
e55b71f8 GF |
2073 | if (req->r_inode) { |
2074 | struct ceph_cap *cap = | |
2075 | ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); | |
2076 | ||
2077 | if (cap) | |
2078 | req->r_sent_on_mseq = cap->mseq; | |
2079 | else | |
2080 | req->r_sent_on_mseq = -1; | |
2081 | } | |
2f2dc053 SW |
2082 | dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, |
2083 | req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); | |
2084 | ||
01a92f17 | 2085 | if (req->r_got_unsafe) { |
c5c9a0bf | 2086 | void *p; |
01a92f17 SW |
2087 | /* |
2088 | * Replay. Do not regenerate message (and rebuild | |
2089 | * paths, etc.); just use the original message. | |
2090 | * Rebuilding paths will break for renames because | |
2091 | * d_move mangles the src name. | |
2092 | */ | |
2093 | msg = req->r_request; | |
2094 | rhead = msg->front.iov_base; | |
2095 | ||
2096 | flags = le32_to_cpu(rhead->flags); | |
2097 | flags |= CEPH_MDS_FLAG_REPLAY; | |
2098 | rhead->flags = cpu_to_le32(flags); | |
2099 | ||
2100 | if (req->r_target_inode) | |
2101 | rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); | |
2102 | ||
2103 | rhead->num_retry = req->r_attempts - 1; | |
e979cf50 SW |
2104 | |
2105 | /* remove cap/dentry releases from message */ | |
2106 | rhead->num_releases = 0; | |
c5c9a0bf YZ |
2107 | |
2108 | /* time stamp */ | |
2109 | p = msg->front.iov_base + req->r_request_release_offset; | |
1f041a89 YZ |
2110 | { |
2111 | struct ceph_timespec ts; | |
2112 | ceph_encode_timespec(&ts, &req->r_stamp); | |
2113 | ceph_encode_copy(&p, &ts, sizeof(ts)); | |
2114 | } | |
c5c9a0bf YZ |
2115 | |
2116 | msg->front.iov_len = p - msg->front.iov_base; | |
2117 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
01a92f17 SW |
2118 | return 0; |
2119 | } | |
2120 | ||
2f2dc053 SW |
2121 | if (req->r_request) { |
2122 | ceph_msg_put(req->r_request); | |
2123 | req->r_request = NULL; | |
2124 | } | |
6e6f0923 | 2125 | msg = create_request_message(mdsc, req, mds, drop_cap_releases); |
2f2dc053 | 2126 | if (IS_ERR(msg)) { |
e1518c7c | 2127 | req->r_err = PTR_ERR(msg); |
a79832f2 | 2128 | return PTR_ERR(msg); |
2f2dc053 SW |
2129 | } |
2130 | req->r_request = msg; | |
2131 | ||
2132 | rhead = msg->front.iov_base; | |
2f2dc053 SW |
2133 | rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); |
2134 | if (req->r_got_unsafe) | |
2135 | flags |= CEPH_MDS_FLAG_REPLAY; | |
2136 | if (req->r_locked_dir) | |
2137 | flags |= CEPH_MDS_FLAG_WANT_DENTRY; | |
2138 | rhead->flags = cpu_to_le32(flags); | |
2139 | rhead->num_fwd = req->r_num_fwd; | |
2140 | rhead->num_retry = req->r_attempts - 1; | |
01a92f17 | 2141 | rhead->ino = 0; |
2f2dc053 SW |
2142 | |
2143 | dout(" r_locked_dir = %p\n", req->r_locked_dir); | |
2f2dc053 SW |
2144 | return 0; |
2145 | } | |
2146 | ||
2147 | /* | |
2148 | * send request, or put it on the appropriate wait list. | |
2149 | */ | |
2150 | static int __do_request(struct ceph_mds_client *mdsc, | |
2151 | struct ceph_mds_request *req) | |
2152 | { | |
2153 | struct ceph_mds_session *session = NULL; | |
2154 | int mds = -1; | |
48fec5d0 | 2155 | int err = 0; |
2f2dc053 | 2156 | |
eb1b8af3 YZ |
2157 | if (req->r_err || req->r_got_result) { |
2158 | if (req->r_aborted) | |
2159 | __unregister_request(mdsc, req); | |
2f2dc053 | 2160 | goto out; |
eb1b8af3 | 2161 | } |
2f2dc053 SW |
2162 | |
2163 | if (req->r_timeout && | |
2164 | time_after_eq(jiffies, req->r_started + req->r_timeout)) { | |
2165 | dout("do_request timed out\n"); | |
2166 | err = -EIO; | |
2167 | goto finish; | |
2168 | } | |
48fec5d0 YZ |
2169 | if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { |
2170 | dout("do_request forced umount\n"); | |
2171 | err = -EIO; | |
2172 | goto finish; | |
2173 | } | |
2f2dc053 | 2174 | |
dc69e2e9 SW |
2175 | put_request_session(req); |
2176 | ||
2f2dc053 SW |
2177 | mds = __choose_mds(mdsc, req); |
2178 | if (mds < 0 || | |
2179 | ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { | |
2180 | dout("do_request no mds or not active, waiting for map\n"); | |
2181 | list_add(&req->r_wait, &mdsc->waiting_for_map); | |
2182 | goto out; | |
2183 | } | |
2184 | ||
2185 | /* get, open session */ | |
2186 | session = __ceph_lookup_mds_session(mdsc, mds); | |
9c423956 | 2187 | if (!session) { |
2f2dc053 | 2188 | session = register_session(mdsc, mds); |
9c423956 SW |
2189 | if (IS_ERR(session)) { |
2190 | err = PTR_ERR(session); | |
2191 | goto finish; | |
2192 | } | |
2193 | } | |
dc69e2e9 SW |
2194 | req->r_session = get_session(session); |
2195 | ||
2f2dc053 | 2196 | dout("do_request mds%d session %p state %s\n", mds, session, |
a687ecaf | 2197 | ceph_session_state_name(session->s_state)); |
2f2dc053 SW |
2198 | if (session->s_state != CEPH_MDS_SESSION_OPEN && |
2199 | session->s_state != CEPH_MDS_SESSION_HUNG) { | |
2200 | if (session->s_state == CEPH_MDS_SESSION_NEW || | |
2201 | session->s_state == CEPH_MDS_SESSION_CLOSING) | |
2202 | __open_session(mdsc, session); | |
2203 | list_add(&req->r_wait, &session->s_waiting); | |
2204 | goto out_session; | |
2205 | } | |
2206 | ||
2207 | /* send request */ | |
2f2dc053 SW |
2208 | req->r_resend_mds = -1; /* forget any previous mds hint */ |
2209 | ||
2210 | if (req->r_request_started == 0) /* note request start time */ | |
2211 | req->r_request_started = jiffies; | |
2212 | ||
6e6f0923 | 2213 | err = __prepare_send_request(mdsc, req, mds, false); |
2f2dc053 SW |
2214 | if (!err) { |
2215 | ceph_msg_get(req->r_request); | |
2216 | ceph_con_send(&session->s_con, req->r_request); | |
2217 | } | |
2218 | ||
2219 | out_session: | |
2220 | ceph_put_mds_session(session); | |
48fec5d0 YZ |
2221 | finish: |
2222 | if (err) { | |
2223 | dout("__do_request early error %d\n", err); | |
2224 | req->r_err = err; | |
2225 | complete_request(mdsc, req); | |
2226 | __unregister_request(mdsc, req); | |
2227 | } | |
2f2dc053 SW |
2228 | out: |
2229 | return err; | |
2f2dc053 SW |
2230 | } |
2231 | ||
2232 | /* | |
2233 | * called under mdsc->mutex | |
2234 | */ | |
2235 | static void __wake_requests(struct ceph_mds_client *mdsc, | |
2236 | struct list_head *head) | |
2237 | { | |
ed75ec2c YZ |
2238 | struct ceph_mds_request *req; |
2239 | LIST_HEAD(tmp_list); | |
2240 | ||
2241 | list_splice_init(head, &tmp_list); | |
2f2dc053 | 2242 | |
ed75ec2c YZ |
2243 | while (!list_empty(&tmp_list)) { |
2244 | req = list_entry(tmp_list.next, | |
2245 | struct ceph_mds_request, r_wait); | |
2f2dc053 | 2246 | list_del_init(&req->r_wait); |
7971bd92 | 2247 | dout(" wake request %p tid %llu\n", req, req->r_tid); |
2f2dc053 SW |
2248 | __do_request(mdsc, req); |
2249 | } | |
2250 | } | |
2251 | ||
2252 | /* | |
2253 | * Wake up threads with requests pending for @mds, so that they can | |
29790f26 | 2254 | * resubmit their requests to a possibly different mds. |
2f2dc053 | 2255 | */ |
29790f26 | 2256 | static void kick_requests(struct ceph_mds_client *mdsc, int mds) |
2f2dc053 | 2257 | { |
44ca18f2 | 2258 | struct ceph_mds_request *req; |
282c1052 | 2259 | struct rb_node *p = rb_first(&mdsc->request_tree); |
2f2dc053 SW |
2260 | |
2261 | dout("kick_requests mds%d\n", mds); | |
282c1052 | 2262 | while (p) { |
44ca18f2 | 2263 | req = rb_entry(p, struct ceph_mds_request, r_node); |
282c1052 | 2264 | p = rb_next(p); |
44ca18f2 SW |
2265 | if (req->r_got_unsafe) |
2266 | continue; | |
3de22be6 YZ |
2267 | if (req->r_attempts > 0) |
2268 | continue; /* only new requests */ | |
44ca18f2 SW |
2269 | if (req->r_session && |
2270 | req->r_session->s_mds == mds) { | |
2271 | dout(" kicking tid %llu\n", req->r_tid); | |
03974e81 | 2272 | list_del_init(&req->r_wait); |
44ca18f2 | 2273 | __do_request(mdsc, req); |
2f2dc053 SW |
2274 | } |
2275 | } | |
2276 | } | |
2277 | ||
2278 | void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, | |
2279 | struct ceph_mds_request *req) | |
2280 | { | |
2281 | dout("submit_request on %p\n", req); | |
2282 | mutex_lock(&mdsc->mutex); | |
2283 | __register_request(mdsc, req, NULL); | |
2284 | __do_request(mdsc, req); | |
2285 | mutex_unlock(&mdsc->mutex); | |
2286 | } | |
2287 | ||
2288 | /* | |
2289 | * Synchrously perform an mds request. Take care of all of the | |
2290 | * session setup, forwarding, retry details. | |
2291 | */ | |
2292 | int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, | |
2293 | struct inode *dir, | |
2294 | struct ceph_mds_request *req) | |
2295 | { | |
2296 | int err; | |
2297 | ||
2298 | dout("do_request on %p\n", req); | |
2299 | ||
2300 | /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ | |
2301 | if (req->r_inode) | |
2302 | ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); | |
2303 | if (req->r_locked_dir) | |
2304 | ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); | |
844d87c3 | 2305 | if (req->r_old_dentry_dir) |
41b02e1f SW |
2306 | ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), |
2307 | CEPH_CAP_PIN); | |
2f2dc053 | 2308 | |
5ea5c5e0 YZ |
2309 | /* deny access to directories with pool_ns layouts */ |
2310 | if (req->r_inode && S_ISDIR(req->r_inode->i_mode) && | |
2311 | ceph_inode(req->r_inode)->i_pool_ns_len) | |
2312 | return -EIO; | |
2313 | if (req->r_locked_dir && | |
2314 | ceph_inode(req->r_locked_dir)->i_pool_ns_len) | |
2315 | return -EIO; | |
2316 | ||
2f2dc053 SW |
2317 | /* issue */ |
2318 | mutex_lock(&mdsc->mutex); | |
2319 | __register_request(mdsc, req, dir); | |
2320 | __do_request(mdsc, req); | |
2321 | ||
e1518c7c SW |
2322 | if (req->r_err) { |
2323 | err = req->r_err; | |
e1518c7c | 2324 | goto out; |
2f2dc053 SW |
2325 | } |
2326 | ||
e1518c7c SW |
2327 | /* wait */ |
2328 | mutex_unlock(&mdsc->mutex); | |
2329 | dout("do_request waiting\n"); | |
5be73034 | 2330 | if (!req->r_timeout && req->r_wait_for_completion) { |
9280be24 | 2331 | err = req->r_wait_for_completion(mdsc, req); |
e1518c7c | 2332 | } else { |
5be73034 ID |
2333 | long timeleft = wait_for_completion_killable_timeout( |
2334 | &req->r_completion, | |
2335 | ceph_timeout_jiffies(req->r_timeout)); | |
2336 | if (timeleft > 0) | |
2337 | err = 0; | |
2338 | else if (!timeleft) | |
2339 | err = -EIO; /* timed out */ | |
2340 | else | |
2341 | err = timeleft; /* killed */ | |
e1518c7c SW |
2342 | } |
2343 | dout("do_request waited, got %d\n", err); | |
2344 | mutex_lock(&mdsc->mutex); | |
5b1daecd | 2345 | |
e1518c7c SW |
2346 | /* only abort if we didn't race with a real reply */ |
2347 | if (req->r_got_result) { | |
2348 | err = le32_to_cpu(req->r_reply_info.head->result); | |
2349 | } else if (err < 0) { | |
2350 | dout("aborted request %lld with %d\n", req->r_tid, err); | |
b4556396 SW |
2351 | |
2352 | /* | |
2353 | * ensure we aren't running concurrently with | |
2354 | * ceph_fill_trace or ceph_readdir_prepopulate, which | |
2355 | * rely on locks (dir mutex) held by our caller. | |
2356 | */ | |
2357 | mutex_lock(&req->r_fill_mutex); | |
e1518c7c SW |
2358 | req->r_err = err; |
2359 | req->r_aborted = true; | |
b4556396 | 2360 | mutex_unlock(&req->r_fill_mutex); |
5b1daecd | 2361 | |
e1518c7c | 2362 | if (req->r_locked_dir && |
167c9e35 SW |
2363 | (req->r_op & CEPH_MDS_OP_WRITE)) |
2364 | ceph_invalidate_dir_request(req); | |
2f2dc053 | 2365 | } else { |
e1518c7c | 2366 | err = req->r_err; |
2f2dc053 | 2367 | } |
2f2dc053 | 2368 | |
e1518c7c SW |
2369 | out: |
2370 | mutex_unlock(&mdsc->mutex); | |
2f2dc053 SW |
2371 | dout("do_request %p done, result %d\n", req, err); |
2372 | return err; | |
2373 | } | |
2374 | ||
167c9e35 | 2375 | /* |
2f276c51 | 2376 | * Invalidate dir's completeness, dentry lease state on an aborted MDS |
167c9e35 SW |
2377 | * namespace request. |
2378 | */ | |
2379 | void ceph_invalidate_dir_request(struct ceph_mds_request *req) | |
2380 | { | |
2381 | struct inode *inode = req->r_locked_dir; | |
167c9e35 | 2382 | |
2f276c51 | 2383 | dout("invalidate_dir_request %p (complete, lease(s))\n", inode); |
167c9e35 | 2384 | |
2f276c51 | 2385 | ceph_dir_clear_complete(inode); |
167c9e35 SW |
2386 | if (req->r_dentry) |
2387 | ceph_invalidate_dentry_lease(req->r_dentry); | |
2388 | if (req->r_old_dentry) | |
2389 | ceph_invalidate_dentry_lease(req->r_old_dentry); | |
2390 | } | |
2391 | ||
2f2dc053 SW |
2392 | /* |
2393 | * Handle mds reply. | |
2394 | * | |
2395 | * We take the session mutex and parse and process the reply immediately. | |
2396 | * This preserves the logical ordering of replies, capabilities, etc., sent | |
2397 | * by the MDS as they are applied to our local cache. | |
2398 | */ | |
2399 | static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) | |
2400 | { | |
2401 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2402 | struct ceph_mds_request *req; | |
2403 | struct ceph_mds_reply_head *head = msg->front.iov_base; | |
2404 | struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ | |
982d6011 | 2405 | struct ceph_snap_realm *realm; |
2f2dc053 SW |
2406 | u64 tid; |
2407 | int err, result; | |
2600d2dd | 2408 | int mds = session->s_mds; |
2f2dc053 | 2409 | |
2f2dc053 SW |
2410 | if (msg->front.iov_len < sizeof(*head)) { |
2411 | pr_err("mdsc_handle_reply got corrupt (short) reply\n"); | |
9ec7cab1 | 2412 | ceph_msg_dump(msg); |
2f2dc053 SW |
2413 | return; |
2414 | } | |
2415 | ||
2416 | /* get request, session */ | |
6df058c0 | 2417 | tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2418 | mutex_lock(&mdsc->mutex); |
2419 | req = __lookup_request(mdsc, tid); | |
2420 | if (!req) { | |
2421 | dout("handle_reply on unknown tid %llu\n", tid); | |
2422 | mutex_unlock(&mdsc->mutex); | |
2423 | return; | |
2424 | } | |
2425 | dout("handle_reply %p\n", req); | |
2f2dc053 SW |
2426 | |
2427 | /* correct session? */ | |
d96d6049 | 2428 | if (req->r_session != session) { |
2f2dc053 SW |
2429 | pr_err("mdsc_handle_reply got %llu on session mds%d" |
2430 | " not mds%d\n", tid, session->s_mds, | |
2431 | req->r_session ? req->r_session->s_mds : -1); | |
2432 | mutex_unlock(&mdsc->mutex); | |
2433 | goto out; | |
2434 | } | |
2435 | ||
2436 | /* dup? */ | |
2437 | if ((req->r_got_unsafe && !head->safe) || | |
2438 | (req->r_got_safe && head->safe)) { | |
f3ae1b97 | 2439 | pr_warn("got a dup %s reply on %llu from mds%d\n", |
2f2dc053 SW |
2440 | head->safe ? "safe" : "unsafe", tid, mds); |
2441 | mutex_unlock(&mdsc->mutex); | |
2442 | goto out; | |
2443 | } | |
1550d34e | 2444 | if (req->r_got_safe) { |
f3ae1b97 | 2445 | pr_warn("got unsafe after safe on %llu from mds%d\n", |
85792d0d SW |
2446 | tid, mds); |
2447 | mutex_unlock(&mdsc->mutex); | |
2448 | goto out; | |
2449 | } | |
2f2dc053 SW |
2450 | |
2451 | result = le32_to_cpu(head->result); | |
2452 | ||
2453 | /* | |
e55b71f8 GF |
2454 | * Handle an ESTALE |
2455 | * if we're not talking to the authority, send to them | |
2456 | * if the authority has changed while we weren't looking, | |
2457 | * send to new authority | |
2458 | * Otherwise we just have to return an ESTALE | |
2f2dc053 SW |
2459 | */ |
2460 | if (result == -ESTALE) { | |
e55b71f8 | 2461 | dout("got ESTALE on request %llu", req->r_tid); |
51da8e8c | 2462 | req->r_resend_mds = -1; |
ca18bede | 2463 | if (req->r_direct_mode != USE_AUTH_MDS) { |
e55b71f8 GF |
2464 | dout("not using auth, setting for that now"); |
2465 | req->r_direct_mode = USE_AUTH_MDS; | |
2f2dc053 SW |
2466 | __do_request(mdsc, req); |
2467 | mutex_unlock(&mdsc->mutex); | |
2468 | goto out; | |
e55b71f8 | 2469 | } else { |
ca18bede YZ |
2470 | int mds = __choose_mds(mdsc, req); |
2471 | if (mds >= 0 && mds != req->r_session->s_mds) { | |
2472 | dout("but auth changed, so resending"); | |
e55b71f8 GF |
2473 | __do_request(mdsc, req); |
2474 | mutex_unlock(&mdsc->mutex); | |
2475 | goto out; | |
2476 | } | |
2f2dc053 | 2477 | } |
e55b71f8 | 2478 | dout("have to return ESTALE on request %llu", req->r_tid); |
2f2dc053 SW |
2479 | } |
2480 | ||
e55b71f8 | 2481 | |
2f2dc053 SW |
2482 | if (head->safe) { |
2483 | req->r_got_safe = true; | |
2484 | __unregister_request(mdsc, req); | |
2f2dc053 SW |
2485 | |
2486 | if (req->r_got_unsafe) { | |
2487 | /* | |
2488 | * We already handled the unsafe response, now do the | |
2489 | * cleanup. No need to examine the response; the MDS | |
2490 | * doesn't include any result info in the safe | |
2491 | * response. And even if it did, there is nothing | |
2492 | * useful we could do with a revised return value. | |
2493 | */ | |
2494 | dout("got safe reply %llu, mds%d\n", tid, mds); | |
2495 | list_del_init(&req->r_unsafe_item); | |
2496 | ||
2497 | /* last unsafe request during umount? */ | |
44ca18f2 | 2498 | if (mdsc->stopping && !__get_oldest_req(mdsc)) |
03066f23 | 2499 | complete_all(&mdsc->safe_umount_waiters); |
2f2dc053 SW |
2500 | mutex_unlock(&mdsc->mutex); |
2501 | goto out; | |
2502 | } | |
e1518c7c | 2503 | } else { |
2f2dc053 SW |
2504 | req->r_got_unsafe = true; |
2505 | list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); | |
4c06ace8 YZ |
2506 | if (req->r_unsafe_dir) { |
2507 | struct ceph_inode_info *ci = | |
2508 | ceph_inode(req->r_unsafe_dir); | |
2509 | spin_lock(&ci->i_unsafe_lock); | |
2510 | list_add_tail(&req->r_unsafe_dir_item, | |
2511 | &ci->i_unsafe_dirops); | |
2512 | spin_unlock(&ci->i_unsafe_lock); | |
2513 | } | |
2f2dc053 SW |
2514 | } |
2515 | ||
2516 | dout("handle_reply tid %lld result %d\n", tid, result); | |
2517 | rinfo = &req->r_reply_info; | |
14303d20 | 2518 | err = parse_reply_info(msg, rinfo, session->s_con.peer_features); |
2f2dc053 SW |
2519 | mutex_unlock(&mdsc->mutex); |
2520 | ||
2521 | mutex_lock(&session->s_mutex); | |
2522 | if (err < 0) { | |
25933abd | 2523 | pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); |
9ec7cab1 | 2524 | ceph_msg_dump(msg); |
2f2dc053 SW |
2525 | goto out_err; |
2526 | } | |
2527 | ||
2528 | /* snap trace */ | |
982d6011 | 2529 | realm = NULL; |
2f2dc053 SW |
2530 | if (rinfo->snapblob_len) { |
2531 | down_write(&mdsc->snap_rwsem); | |
2532 | ceph_update_snap_trace(mdsc, rinfo->snapblob, | |
982d6011 YZ |
2533 | rinfo->snapblob + rinfo->snapblob_len, |
2534 | le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, | |
2535 | &realm); | |
2f2dc053 SW |
2536 | downgrade_write(&mdsc->snap_rwsem); |
2537 | } else { | |
2538 | down_read(&mdsc->snap_rwsem); | |
2539 | } | |
2540 | ||
2541 | /* insert trace into our cache */ | |
b4556396 | 2542 | mutex_lock(&req->r_fill_mutex); |
3d14c5d2 | 2543 | err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); |
2f2dc053 | 2544 | if (err == 0) { |
6e8575fa | 2545 | if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || |
81c6aea5 | 2546 | req->r_op == CEPH_MDS_OP_LSSNAP)) |
2f2dc053 | 2547 | ceph_readdir_prepopulate(req, req->r_session); |
37151668 | 2548 | ceph_unreserve_caps(mdsc, &req->r_caps_reservation); |
2f2dc053 | 2549 | } |
b4556396 | 2550 | mutex_unlock(&req->r_fill_mutex); |
2f2dc053 SW |
2551 | |
2552 | up_read(&mdsc->snap_rwsem); | |
982d6011 YZ |
2553 | if (realm) |
2554 | ceph_put_snap_realm(mdsc, realm); | |
68cd5b4b YZ |
2555 | |
2556 | if (err == 0 && req->r_got_unsafe && req->r_target_inode) { | |
2557 | struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); | |
2558 | spin_lock(&ci->i_unsafe_lock); | |
2559 | list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); | |
2560 | spin_unlock(&ci->i_unsafe_lock); | |
2561 | } | |
2f2dc053 | 2562 | out_err: |
e1518c7c SW |
2563 | mutex_lock(&mdsc->mutex); |
2564 | if (!req->r_aborted) { | |
2565 | if (err) { | |
2566 | req->r_err = err; | |
2567 | } else { | |
5fdb1389 | 2568 | req->r_reply = ceph_msg_get(msg); |
e1518c7c SW |
2569 | req->r_got_result = true; |
2570 | } | |
2f2dc053 | 2571 | } else { |
e1518c7c | 2572 | dout("reply arrived after request %lld was aborted\n", tid); |
2f2dc053 | 2573 | } |
e1518c7c | 2574 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 2575 | |
2f2dc053 SW |
2576 | mutex_unlock(&session->s_mutex); |
2577 | ||
2578 | /* kick calling process */ | |
2579 | complete_request(mdsc, req); | |
2580 | out: | |
2581 | ceph_mdsc_put_request(req); | |
2582 | return; | |
2583 | } | |
2584 | ||
2585 | ||
2586 | ||
2587 | /* | |
2588 | * handle mds notification that our request has been forwarded. | |
2589 | */ | |
2600d2dd SW |
2590 | static void handle_forward(struct ceph_mds_client *mdsc, |
2591 | struct ceph_mds_session *session, | |
2592 | struct ceph_msg *msg) | |
2f2dc053 SW |
2593 | { |
2594 | struct ceph_mds_request *req; | |
a1ea787c | 2595 | u64 tid = le64_to_cpu(msg->hdr.tid); |
2f2dc053 SW |
2596 | u32 next_mds; |
2597 | u32 fwd_seq; | |
2f2dc053 SW |
2598 | int err = -EINVAL; |
2599 | void *p = msg->front.iov_base; | |
2600 | void *end = p + msg->front.iov_len; | |
2f2dc053 | 2601 | |
a1ea787c | 2602 | ceph_decode_need(&p, end, 2*sizeof(u32), bad); |
c89136ea SW |
2603 | next_mds = ceph_decode_32(&p); |
2604 | fwd_seq = ceph_decode_32(&p); | |
2f2dc053 SW |
2605 | |
2606 | mutex_lock(&mdsc->mutex); | |
2607 | req = __lookup_request(mdsc, tid); | |
2608 | if (!req) { | |
2a8e5e36 | 2609 | dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); |
2f2dc053 SW |
2610 | goto out; /* dup reply? */ |
2611 | } | |
2612 | ||
2a8e5e36 SW |
2613 | if (req->r_aborted) { |
2614 | dout("forward tid %llu aborted, unregistering\n", tid); | |
2615 | __unregister_request(mdsc, req); | |
2616 | } else if (fwd_seq <= req->r_num_fwd) { | |
2617 | dout("forward tid %llu to mds%d - old seq %d <= %d\n", | |
2f2dc053 SW |
2618 | tid, next_mds, req->r_num_fwd, fwd_seq); |
2619 | } else { | |
2620 | /* resend. forward race not possible; mds would drop */ | |
2a8e5e36 SW |
2621 | dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); |
2622 | BUG_ON(req->r_err); | |
2623 | BUG_ON(req->r_got_result); | |
3de22be6 | 2624 | req->r_attempts = 0; |
2f2dc053 SW |
2625 | req->r_num_fwd = fwd_seq; |
2626 | req->r_resend_mds = next_mds; | |
2627 | put_request_session(req); | |
2628 | __do_request(mdsc, req); | |
2629 | } | |
2630 | ceph_mdsc_put_request(req); | |
2631 | out: | |
2632 | mutex_unlock(&mdsc->mutex); | |
2633 | return; | |
2634 | ||
2635 | bad: | |
2636 | pr_err("mdsc_handle_forward decode error err=%d\n", err); | |
2637 | } | |
2638 | ||
2639 | /* | |
2640 | * handle a mds session control message | |
2641 | */ | |
2642 | static void handle_session(struct ceph_mds_session *session, | |
2643 | struct ceph_msg *msg) | |
2644 | { | |
2645 | struct ceph_mds_client *mdsc = session->s_mdsc; | |
2646 | u32 op; | |
2647 | u64 seq; | |
2600d2dd | 2648 | int mds = session->s_mds; |
2f2dc053 SW |
2649 | struct ceph_mds_session_head *h = msg->front.iov_base; |
2650 | int wake = 0; | |
2651 | ||
2f2dc053 SW |
2652 | /* decode */ |
2653 | if (msg->front.iov_len != sizeof(*h)) | |
2654 | goto bad; | |
2655 | op = le32_to_cpu(h->op); | |
2656 | seq = le64_to_cpu(h->seq); | |
2657 | ||
2658 | mutex_lock(&mdsc->mutex); | |
2600d2dd SW |
2659 | if (op == CEPH_SESSION_CLOSE) |
2660 | __unregister_session(mdsc, session); | |
2f2dc053 SW |
2661 | /* FIXME: this ttl calculation is generous */ |
2662 | session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; | |
2663 | mutex_unlock(&mdsc->mutex); | |
2664 | ||
2665 | mutex_lock(&session->s_mutex); | |
2666 | ||
2667 | dout("handle_session mds%d %s %p state %s seq %llu\n", | |
2668 | mds, ceph_session_op_name(op), session, | |
a687ecaf | 2669 | ceph_session_state_name(session->s_state), seq); |
2f2dc053 SW |
2670 | |
2671 | if (session->s_state == CEPH_MDS_SESSION_HUNG) { | |
2672 | session->s_state = CEPH_MDS_SESSION_OPEN; | |
2673 | pr_info("mds%d came back\n", session->s_mds); | |
2674 | } | |
2675 | ||
2676 | switch (op) { | |
2677 | case CEPH_SESSION_OPEN: | |
29790f26 SW |
2678 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2679 | pr_info("mds%d reconnect success\n", session->s_mds); | |
2f2dc053 SW |
2680 | session->s_state = CEPH_MDS_SESSION_OPEN; |
2681 | renewed_caps(mdsc, session, 0); | |
2682 | wake = 1; | |
2683 | if (mdsc->stopping) | |
2684 | __close_session(mdsc, session); | |
2685 | break; | |
2686 | ||
2687 | case CEPH_SESSION_RENEWCAPS: | |
2688 | if (session->s_renew_seq == seq) | |
2689 | renewed_caps(mdsc, session, 1); | |
2690 | break; | |
2691 | ||
2692 | case CEPH_SESSION_CLOSE: | |
29790f26 SW |
2693 | if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) |
2694 | pr_info("mds%d reconnect denied\n", session->s_mds); | |
1c841a96 | 2695 | cleanup_session_requests(mdsc, session); |
2f2dc053 | 2696 | remove_session_caps(session); |
656e4382 | 2697 | wake = 2; /* for good measure */ |
f3c60c59 | 2698 | wake_up_all(&mdsc->session_close_wq); |
2f2dc053 SW |
2699 | break; |
2700 | ||
2701 | case CEPH_SESSION_STALE: | |
2702 | pr_info("mds%d caps went stale, renewing\n", | |
2703 | session->s_mds); | |
d8fb02ab | 2704 | spin_lock(&session->s_gen_ttl_lock); |
2f2dc053 | 2705 | session->s_cap_gen++; |
1ce208a6 | 2706 | session->s_cap_ttl = jiffies - 1; |
d8fb02ab | 2707 | spin_unlock(&session->s_gen_ttl_lock); |
2f2dc053 SW |
2708 | send_renew_caps(mdsc, session); |
2709 | break; | |
2710 | ||
2711 | case CEPH_SESSION_RECALL_STATE: | |
2712 | trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); | |
2713 | break; | |
2714 | ||
186e4f7a YZ |
2715 | case CEPH_SESSION_FLUSHMSG: |
2716 | send_flushmsg_ack(mdsc, session, seq); | |
2717 | break; | |
2718 | ||
03f4fcb0 YZ |
2719 | case CEPH_SESSION_FORCE_RO: |
2720 | dout("force_session_readonly %p\n", session); | |
2721 | spin_lock(&session->s_cap_lock); | |
2722 | session->s_readonly = true; | |
2723 | spin_unlock(&session->s_cap_lock); | |
2724 | wake_up_session_caps(session, 0); | |
2725 | break; | |
2726 | ||
2f2dc053 SW |
2727 | default: |
2728 | pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); | |
2729 | WARN_ON(1); | |
2730 | } | |
2731 | ||
2732 | mutex_unlock(&session->s_mutex); | |
2733 | if (wake) { | |
2734 | mutex_lock(&mdsc->mutex); | |
2735 | __wake_requests(mdsc, &session->s_waiting); | |
656e4382 YZ |
2736 | if (wake == 2) |
2737 | kick_requests(mdsc, mds); | |
2f2dc053 SW |
2738 | mutex_unlock(&mdsc->mutex); |
2739 | } | |
2740 | return; | |
2741 | ||
2742 | bad: | |
2743 | pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, | |
2744 | (int)msg->front.iov_len); | |
9ec7cab1 | 2745 | ceph_msg_dump(msg); |
2f2dc053 SW |
2746 | return; |
2747 | } | |
2748 | ||
2749 | ||
2750 | /* | |
2751 | * called under session->mutex. | |
2752 | */ | |
2753 | static void replay_unsafe_requests(struct ceph_mds_client *mdsc, | |
2754 | struct ceph_mds_session *session) | |
2755 | { | |
2756 | struct ceph_mds_request *req, *nreq; | |
3de22be6 | 2757 | struct rb_node *p; |
2f2dc053 SW |
2758 | int err; |
2759 | ||
2760 | dout("replay_unsafe_requests mds%d\n", session->s_mds); | |
2761 | ||
2762 | mutex_lock(&mdsc->mutex); | |
2763 | list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { | |
6e6f0923 | 2764 | err = __prepare_send_request(mdsc, req, session->s_mds, true); |
2f2dc053 SW |
2765 | if (!err) { |
2766 | ceph_msg_get(req->r_request); | |
2767 | ceph_con_send(&session->s_con, req->r_request); | |
2768 | } | |
2769 | } | |
3de22be6 YZ |
2770 | |
2771 | /* | |
2772 | * also re-send old requests when MDS enters reconnect stage. So that MDS | |
2773 | * can process completed request in clientreplay stage. | |
2774 | */ | |
2775 | p = rb_first(&mdsc->request_tree); | |
2776 | while (p) { | |
2777 | req = rb_entry(p, struct ceph_mds_request, r_node); | |
2778 | p = rb_next(p); | |
2779 | if (req->r_got_unsafe) | |
2780 | continue; | |
2781 | if (req->r_attempts == 0) | |
2782 | continue; /* only old requests */ | |
2783 | if (req->r_session && | |
2784 | req->r_session->s_mds == session->s_mds) { | |
6e6f0923 YZ |
2785 | err = __prepare_send_request(mdsc, req, |
2786 | session->s_mds, true); | |
3de22be6 YZ |
2787 | if (!err) { |
2788 | ceph_msg_get(req->r_request); | |
2789 | ceph_con_send(&session->s_con, req->r_request); | |
2790 | } | |
2791 | } | |
2792 | } | |
2f2dc053 SW |
2793 | mutex_unlock(&mdsc->mutex); |
2794 | } | |
2795 | ||
2796 | /* | |
2797 | * Encode information about a cap for a reconnect with the MDS. | |
2798 | */ | |
2f2dc053 SW |
2799 | static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, |
2800 | void *arg) | |
2801 | { | |
20cb34ae SW |
2802 | union { |
2803 | struct ceph_mds_cap_reconnect v2; | |
2804 | struct ceph_mds_cap_reconnect_v1 v1; | |
2805 | } rec; | |
2806 | size_t reclen; | |
2f2dc053 | 2807 | struct ceph_inode_info *ci; |
20cb34ae SW |
2808 | struct ceph_reconnect_state *recon_state = arg; |
2809 | struct ceph_pagelist *pagelist = recon_state->pagelist; | |
2f2dc053 SW |
2810 | char *path; |
2811 | int pathlen, err; | |
2812 | u64 pathbase; | |
2813 | struct dentry *dentry; | |
2814 | ||
2815 | ci = cap->ci; | |
2816 | ||
2817 | dout(" adding %p ino %llx.%llx cap %p %lld %s\n", | |
2818 | inode, ceph_vinop(inode), cap, cap->cap_id, | |
2819 | ceph_cap_string(cap->issued)); | |
93cea5be SW |
2820 | err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); |
2821 | if (err) | |
2822 | return err; | |
2f2dc053 SW |
2823 | |
2824 | dentry = d_find_alias(inode); | |
2825 | if (dentry) { | |
2826 | path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); | |
2827 | if (IS_ERR(path)) { | |
2828 | err = PTR_ERR(path); | |
e072f8aa | 2829 | goto out_dput; |
2f2dc053 SW |
2830 | } |
2831 | } else { | |
2832 | path = NULL; | |
2833 | pathlen = 0; | |
2834 | } | |
93cea5be SW |
2835 | err = ceph_pagelist_encode_string(pagelist, path, pathlen); |
2836 | if (err) | |
e072f8aa | 2837 | goto out_free; |
2f2dc053 | 2838 | |
be655596 | 2839 | spin_lock(&ci->i_ceph_lock); |
2f2dc053 SW |
2840 | cap->seq = 0; /* reset cap seq */ |
2841 | cap->issue_seq = 0; /* and issue_seq */ | |
667ca05c | 2842 | cap->mseq = 0; /* and migrate_seq */ |
99a9c273 | 2843 | cap->cap_gen = cap->session->s_cap_gen; |
20cb34ae SW |
2844 | |
2845 | if (recon_state->flock) { | |
2846 | rec.v2.cap_id = cpu_to_le64(cap->cap_id); | |
2847 | rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2848 | rec.v2.issued = cpu_to_le32(cap->issued); | |
2849 | rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2850 | rec.v2.pathbase = cpu_to_le64(pathbase); | |
2851 | rec.v2.flock_len = 0; | |
2852 | reclen = sizeof(rec.v2); | |
2853 | } else { | |
2854 | rec.v1.cap_id = cpu_to_le64(cap->cap_id); | |
2855 | rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); | |
2856 | rec.v1.issued = cpu_to_le32(cap->issued); | |
2857 | rec.v1.size = cpu_to_le64(inode->i_size); | |
2858 | ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); | |
2859 | ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); | |
2860 | rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); | |
2861 | rec.v1.pathbase = cpu_to_le64(pathbase); | |
2862 | reclen = sizeof(rec.v1); | |
2863 | } | |
be655596 | 2864 | spin_unlock(&ci->i_ceph_lock); |
2f2dc053 | 2865 | |
40819f6f GF |
2866 | if (recon_state->flock) { |
2867 | int num_fcntl_locks, num_flock_locks; | |
39be95e9 JS |
2868 | struct ceph_filelock *flocks; |
2869 | ||
2870 | encode_again: | |
39be95e9 | 2871 | ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); |
39be95e9 JS |
2872 | flocks = kmalloc((num_fcntl_locks+num_flock_locks) * |
2873 | sizeof(struct ceph_filelock), GFP_NOFS); | |
2874 | if (!flocks) { | |
2875 | err = -ENOMEM; | |
2876 | goto out_free; | |
2877 | } | |
39be95e9 JS |
2878 | err = ceph_encode_locks_to_buffer(inode, flocks, |
2879 | num_fcntl_locks, | |
2880 | num_flock_locks); | |
39be95e9 JS |
2881 | if (err) { |
2882 | kfree(flocks); | |
2883 | if (err == -ENOSPC) | |
2884 | goto encode_again; | |
2885 | goto out_free; | |
2886 | } | |
2887 | /* | |
2888 | * number of encoded locks is stable, so copy to pagelist | |
2889 | */ | |
2890 | rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) + | |
2891 | (num_fcntl_locks+num_flock_locks) * | |
2892 | sizeof(struct ceph_filelock)); | |
2893 | err = ceph_pagelist_append(pagelist, &rec, reclen); | |
2894 | if (!err) | |
2895 | err = ceph_locks_to_pagelist(flocks, pagelist, | |
2896 | num_fcntl_locks, | |
2897 | num_flock_locks); | |
2898 | kfree(flocks); | |
3612abbd SW |
2899 | } else { |
2900 | err = ceph_pagelist_append(pagelist, &rec, reclen); | |
40819f6f | 2901 | } |
44c99757 YZ |
2902 | |
2903 | recon_state->nr_caps++; | |
e072f8aa | 2904 | out_free: |
2f2dc053 | 2905 | kfree(path); |
e072f8aa | 2906 | out_dput: |
2f2dc053 | 2907 | dput(dentry); |
93cea5be | 2908 | return err; |
2f2dc053 SW |
2909 | } |
2910 | ||
2911 | ||
2912 | /* | |
2913 | * If an MDS fails and recovers, clients need to reconnect in order to | |
2914 | * reestablish shared state. This includes all caps issued through | |
2915 | * this session _and_ the snap_realm hierarchy. Because it's not | |
2916 | * clear which snap realms the mds cares about, we send everything we | |
2917 | * know about.. that ensures we'll then get any new info the | |
2918 | * recovering MDS might have. | |
2919 | * | |
2920 | * This is a relatively heavyweight operation, but it's rare. | |
2921 | * | |
2922 | * called with mdsc->mutex held. | |
2923 | */ | |
34b6c855 SW |
2924 | static void send_mds_reconnect(struct ceph_mds_client *mdsc, |
2925 | struct ceph_mds_session *session) | |
2f2dc053 | 2926 | { |
2f2dc053 | 2927 | struct ceph_msg *reply; |
a105f00c | 2928 | struct rb_node *p; |
34b6c855 | 2929 | int mds = session->s_mds; |
9abf82b8 | 2930 | int err = -ENOMEM; |
44c99757 | 2931 | int s_nr_caps; |
93cea5be | 2932 | struct ceph_pagelist *pagelist; |
20cb34ae | 2933 | struct ceph_reconnect_state recon_state; |
2f2dc053 | 2934 | |
34b6c855 | 2935 | pr_info("mds%d reconnect start\n", mds); |
2f2dc053 | 2936 | |
93cea5be SW |
2937 | pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); |
2938 | if (!pagelist) | |
2939 | goto fail_nopagelist; | |
2940 | ceph_pagelist_init(pagelist); | |
2941 | ||
b61c2763 | 2942 | reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); |
a79832f2 | 2943 | if (!reply) |
93cea5be | 2944 | goto fail_nomsg; |
93cea5be | 2945 | |
34b6c855 SW |
2946 | mutex_lock(&session->s_mutex); |
2947 | session->s_state = CEPH_MDS_SESSION_RECONNECTING; | |
2948 | session->s_seq = 0; | |
2f2dc053 | 2949 | |
2f2dc053 | 2950 | dout("session %p state %s\n", session, |
a687ecaf | 2951 | ceph_session_state_name(session->s_state)); |
2f2dc053 | 2952 | |
99a9c273 YZ |
2953 | spin_lock(&session->s_gen_ttl_lock); |
2954 | session->s_cap_gen++; | |
2955 | spin_unlock(&session->s_gen_ttl_lock); | |
2956 | ||
2957 | spin_lock(&session->s_cap_lock); | |
03f4fcb0 YZ |
2958 | /* don't know if session is readonly */ |
2959 | session->s_readonly = 0; | |
99a9c273 YZ |
2960 | /* |
2961 | * notify __ceph_remove_cap() that we are composing cap reconnect. | |
2962 | * If a cap get released before being added to the cap reconnect, | |
2963 | * __ceph_remove_cap() should skip queuing cap release. | |
2964 | */ | |
2965 | session->s_cap_reconnect = 1; | |
e01a5946 | 2966 | /* drop old cap expires; we're about to reestablish that state */ |
745a8e3b | 2967 | cleanup_cap_releases(mdsc, session); |
e01a5946 | 2968 | |
5d23371f | 2969 | /* trim unused caps to reduce MDS's cache rejoin time */ |
c0bd50e2 YZ |
2970 | if (mdsc->fsc->sb->s_root) |
2971 | shrink_dcache_parent(mdsc->fsc->sb->s_root); | |
5d23371f YZ |
2972 | |
2973 | ceph_con_close(&session->s_con); | |
2974 | ceph_con_open(&session->s_con, | |
2975 | CEPH_ENTITY_TYPE_MDS, mds, | |
2976 | ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); | |
2977 | ||
2978 | /* replay unsafe requests */ | |
2979 | replay_unsafe_requests(mdsc, session); | |
2980 | ||
2981 | down_read(&mdsc->snap_rwsem); | |
2982 | ||
2f2dc053 | 2983 | /* traverse this session's caps */ |
44c99757 YZ |
2984 | s_nr_caps = session->s_nr_caps; |
2985 | err = ceph_pagelist_encode_32(pagelist, s_nr_caps); | |
93cea5be SW |
2986 | if (err) |
2987 | goto fail; | |
20cb34ae | 2988 | |
44c99757 | 2989 | recon_state.nr_caps = 0; |
20cb34ae SW |
2990 | recon_state.pagelist = pagelist; |
2991 | recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; | |
2992 | err = iterate_session_caps(session, encode_caps_cb, &recon_state); | |
2f2dc053 | 2993 | if (err < 0) |
9abf82b8 | 2994 | goto fail; |
2f2dc053 | 2995 | |
99a9c273 YZ |
2996 | spin_lock(&session->s_cap_lock); |
2997 | session->s_cap_reconnect = 0; | |
2998 | spin_unlock(&session->s_cap_lock); | |
2999 | ||
2f2dc053 SW |
3000 | /* |
3001 | * snaprealms. we provide mds with the ino, seq (version), and | |
3002 | * parent for all of our realms. If the mds has any newer info, | |
3003 | * it will tell us. | |
3004 | */ | |
a105f00c SW |
3005 | for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { |
3006 | struct ceph_snap_realm *realm = | |
3007 | rb_entry(p, struct ceph_snap_realm, node); | |
93cea5be | 3008 | struct ceph_mds_snaprealm_reconnect sr_rec; |
2f2dc053 SW |
3009 | |
3010 | dout(" adding snap realm %llx seq %lld parent %llx\n", | |
3011 | realm->ino, realm->seq, realm->parent_ino); | |
93cea5be SW |
3012 | sr_rec.ino = cpu_to_le64(realm->ino); |
3013 | sr_rec.seq = cpu_to_le64(realm->seq); | |
3014 | sr_rec.parent = cpu_to_le64(realm->parent_ino); | |
3015 | err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); | |
3016 | if (err) | |
3017 | goto fail; | |
2f2dc053 | 3018 | } |
2f2dc053 | 3019 | |
20cb34ae SW |
3020 | if (recon_state.flock) |
3021 | reply->hdr.version = cpu_to_le16(2); | |
44c99757 YZ |
3022 | |
3023 | /* raced with cap release? */ | |
3024 | if (s_nr_caps != recon_state.nr_caps) { | |
3025 | struct page *page = list_first_entry(&pagelist->head, | |
3026 | struct page, lru); | |
3027 | __le32 *addr = kmap_atomic(page); | |
3028 | *addr = cpu_to_le32(recon_state.nr_caps); | |
3029 | kunmap_atomic(addr); | |
ebf18f47 | 3030 | } |
44c99757 YZ |
3031 | |
3032 | reply->hdr.data_len = cpu_to_le32(pagelist->length); | |
3033 | ceph_msg_data_add_pagelist(reply, pagelist); | |
e548e9b9 YZ |
3034 | |
3035 | ceph_early_kick_flushing_caps(mdsc, session); | |
3036 | ||
2f2dc053 SW |
3037 | ceph_con_send(&session->s_con, reply); |
3038 | ||
9abf82b8 SW |
3039 | mutex_unlock(&session->s_mutex); |
3040 | ||
3041 | mutex_lock(&mdsc->mutex); | |
3042 | __wake_requests(mdsc, &session->s_waiting); | |
3043 | mutex_unlock(&mdsc->mutex); | |
3044 | ||
2f2dc053 | 3045 | up_read(&mdsc->snap_rwsem); |
2f2dc053 SW |
3046 | return; |
3047 | ||
93cea5be | 3048 | fail: |
2f2dc053 | 3049 | ceph_msg_put(reply); |
9abf82b8 SW |
3050 | up_read(&mdsc->snap_rwsem); |
3051 | mutex_unlock(&session->s_mutex); | |
93cea5be SW |
3052 | fail_nomsg: |
3053 | ceph_pagelist_release(pagelist); | |
93cea5be | 3054 | fail_nopagelist: |
9abf82b8 | 3055 | pr_err("error %d preparing reconnect for mds%d\n", err, mds); |
9abf82b8 | 3056 | return; |
2f2dc053 SW |
3057 | } |
3058 | ||
3059 | ||
3060 | /* | |
3061 | * compare old and new mdsmaps, kicking requests | |
3062 | * and closing out old connections as necessary | |
3063 | * | |
3064 | * called under mdsc->mutex. | |
3065 | */ | |
3066 | static void check_new_map(struct ceph_mds_client *mdsc, | |
3067 | struct ceph_mdsmap *newmap, | |
3068 | struct ceph_mdsmap *oldmap) | |
3069 | { | |
3070 | int i; | |
3071 | int oldstate, newstate; | |
3072 | struct ceph_mds_session *s; | |
3073 | ||
3074 | dout("check_new_map new %u old %u\n", | |
3075 | newmap->m_epoch, oldmap->m_epoch); | |
3076 | ||
3077 | for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { | |
3078 | if (mdsc->sessions[i] == NULL) | |
3079 | continue; | |
3080 | s = mdsc->sessions[i]; | |
3081 | oldstate = ceph_mdsmap_get_state(oldmap, i); | |
3082 | newstate = ceph_mdsmap_get_state(newmap, i); | |
3083 | ||
0deb01c9 | 3084 | dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", |
2f2dc053 | 3085 | i, ceph_mds_state_name(oldstate), |
0deb01c9 | 3086 | ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", |
2f2dc053 | 3087 | ceph_mds_state_name(newstate), |
0deb01c9 | 3088 | ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", |
a687ecaf | 3089 | ceph_session_state_name(s->s_state)); |
2f2dc053 | 3090 | |
3e8f43a0 YZ |
3091 | if (i >= newmap->m_max_mds || |
3092 | memcmp(ceph_mdsmap_get_addr(oldmap, i), | |
2f2dc053 SW |
3093 | ceph_mdsmap_get_addr(newmap, i), |
3094 | sizeof(struct ceph_entity_addr))) { | |
3095 | if (s->s_state == CEPH_MDS_SESSION_OPENING) { | |
3096 | /* the session never opened, just close it | |
3097 | * out now */ | |
3098 | __wake_requests(mdsc, &s->s_waiting); | |
2600d2dd | 3099 | __unregister_session(mdsc, s); |
2f2dc053 SW |
3100 | } else { |
3101 | /* just close it */ | |
3102 | mutex_unlock(&mdsc->mutex); | |
3103 | mutex_lock(&s->s_mutex); | |
3104 | mutex_lock(&mdsc->mutex); | |
3105 | ceph_con_close(&s->s_con); | |
3106 | mutex_unlock(&s->s_mutex); | |
3107 | s->s_state = CEPH_MDS_SESSION_RESTARTING; | |
3108 | } | |
2f2dc053 SW |
3109 | } else if (oldstate == newstate) { |
3110 | continue; /* nothing new with this mds */ | |
3111 | } | |
3112 | ||
3113 | /* | |
3114 | * send reconnect? | |
3115 | */ | |
3116 | if (s->s_state == CEPH_MDS_SESSION_RESTARTING && | |
34b6c855 SW |
3117 | newstate >= CEPH_MDS_STATE_RECONNECT) { |
3118 | mutex_unlock(&mdsc->mutex); | |
3119 | send_mds_reconnect(mdsc, s); | |
3120 | mutex_lock(&mdsc->mutex); | |
3121 | } | |
2f2dc053 SW |
3122 | |
3123 | /* | |
29790f26 | 3124 | * kick request on any mds that has gone active. |
2f2dc053 SW |
3125 | */ |
3126 | if (oldstate < CEPH_MDS_STATE_ACTIVE && | |
3127 | newstate >= CEPH_MDS_STATE_ACTIVE) { | |
29790f26 SW |
3128 | if (oldstate != CEPH_MDS_STATE_CREATING && |
3129 | oldstate != CEPH_MDS_STATE_STARTING) | |
3130 | pr_info("mds%d recovery completed\n", s->s_mds); | |
3131 | kick_requests(mdsc, i); | |
2f2dc053 | 3132 | ceph_kick_flushing_caps(mdsc, s); |
0dc2570f | 3133 | wake_up_session_caps(s, 1); |
2f2dc053 SW |
3134 | } |
3135 | } | |
cb170a22 SW |
3136 | |
3137 | for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { | |
3138 | s = mdsc->sessions[i]; | |
3139 | if (!s) | |
3140 | continue; | |
3141 | if (!ceph_mdsmap_is_laggy(newmap, i)) | |
3142 | continue; | |
3143 | if (s->s_state == CEPH_MDS_SESSION_OPEN || | |
3144 | s->s_state == CEPH_MDS_SESSION_HUNG || | |
3145 | s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3146 | dout(" connecting to export targets of laggy mds%d\n", | |
3147 | i); | |
3148 | __open_export_target_sessions(mdsc, s); | |
3149 | } | |
3150 | } | |
2f2dc053 SW |
3151 | } |
3152 | ||
3153 | ||
3154 | ||
3155 | /* | |
3156 | * leases | |
3157 | */ | |
3158 | ||
3159 | /* | |
3160 | * caller must hold session s_mutex, dentry->d_lock | |
3161 | */ | |
3162 | void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) | |
3163 | { | |
3164 | struct ceph_dentry_info *di = ceph_dentry(dentry); | |
3165 | ||
3166 | ceph_put_mds_session(di->lease_session); | |
3167 | di->lease_session = NULL; | |
3168 | } | |
3169 | ||
2600d2dd SW |
3170 | static void handle_lease(struct ceph_mds_client *mdsc, |
3171 | struct ceph_mds_session *session, | |
3172 | struct ceph_msg *msg) | |
2f2dc053 | 3173 | { |
3d14c5d2 | 3174 | struct super_block *sb = mdsc->fsc->sb; |
2f2dc053 | 3175 | struct inode *inode; |
2f2dc053 SW |
3176 | struct dentry *parent, *dentry; |
3177 | struct ceph_dentry_info *di; | |
2600d2dd | 3178 | int mds = session->s_mds; |
2f2dc053 | 3179 | struct ceph_mds_lease *h = msg->front.iov_base; |
1e5ea23d | 3180 | u32 seq; |
2f2dc053 | 3181 | struct ceph_vino vino; |
2f2dc053 SW |
3182 | struct qstr dname; |
3183 | int release = 0; | |
3184 | ||
2f2dc053 SW |
3185 | dout("handle_lease from mds%d\n", mds); |
3186 | ||
3187 | /* decode */ | |
3188 | if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) | |
3189 | goto bad; | |
3190 | vino.ino = le64_to_cpu(h->ino); | |
3191 | vino.snap = CEPH_NOSNAP; | |
1e5ea23d | 3192 | seq = le32_to_cpu(h->seq); |
2f2dc053 SW |
3193 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); |
3194 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); | |
3195 | if (dname.len != get_unaligned_le32(h+1)) | |
3196 | goto bad; | |
3197 | ||
2f2dc053 SW |
3198 | /* lookup inode */ |
3199 | inode = ceph_find_inode(sb, vino); | |
2f90b852 SW |
3200 | dout("handle_lease %s, ino %llx %p %.*s\n", |
3201 | ceph_lease_op_name(h->action), vino.ino, inode, | |
1e5ea23d | 3202 | dname.len, dname.name); |
6cd3bcad YZ |
3203 | |
3204 | mutex_lock(&session->s_mutex); | |
3205 | session->s_seq++; | |
3206 | ||
2f2dc053 SW |
3207 | if (inode == NULL) { |
3208 | dout("handle_lease no inode %llx\n", vino.ino); | |
3209 | goto release; | |
3210 | } | |
2f2dc053 SW |
3211 | |
3212 | /* dentry */ | |
3213 | parent = d_find_alias(inode); | |
3214 | if (!parent) { | |
3215 | dout("no parent dentry on inode %p\n", inode); | |
3216 | WARN_ON(1); | |
3217 | goto release; /* hrm... */ | |
3218 | } | |
3219 | dname.hash = full_name_hash(dname.name, dname.len); | |
3220 | dentry = d_lookup(parent, &dname); | |
3221 | dput(parent); | |
3222 | if (!dentry) | |
3223 | goto release; | |
3224 | ||
3225 | spin_lock(&dentry->d_lock); | |
3226 | di = ceph_dentry(dentry); | |
3227 | switch (h->action) { | |
3228 | case CEPH_MDS_LEASE_REVOKE: | |
3d8eb7a9 | 3229 | if (di->lease_session == session) { |
1e5ea23d SW |
3230 | if (ceph_seq_cmp(di->lease_seq, seq) > 0) |
3231 | h->seq = cpu_to_le32(di->lease_seq); | |
2f2dc053 SW |
3232 | __ceph_mdsc_drop_dentry_lease(dentry); |
3233 | } | |
3234 | release = 1; | |
3235 | break; | |
3236 | ||
3237 | case CEPH_MDS_LEASE_RENEW: | |
3d8eb7a9 | 3238 | if (di->lease_session == session && |
2f2dc053 SW |
3239 | di->lease_gen == session->s_cap_gen && |
3240 | di->lease_renew_from && | |
3241 | di->lease_renew_after == 0) { | |
3242 | unsigned long duration = | |
3563dbdd | 3243 | msecs_to_jiffies(le32_to_cpu(h->duration_ms)); |
2f2dc053 | 3244 | |
1e5ea23d | 3245 | di->lease_seq = seq; |
2f2dc053 SW |
3246 | dentry->d_time = di->lease_renew_from + duration; |
3247 | di->lease_renew_after = di->lease_renew_from + | |
3248 | (duration >> 1); | |
3249 | di->lease_renew_from = 0; | |
3250 | } | |
3251 | break; | |
3252 | } | |
3253 | spin_unlock(&dentry->d_lock); | |
3254 | dput(dentry); | |
3255 | ||
3256 | if (!release) | |
3257 | goto out; | |
3258 | ||
3259 | release: | |
3260 | /* let's just reuse the same message */ | |
3261 | h->action = CEPH_MDS_LEASE_REVOKE_ACK; | |
3262 | ceph_msg_get(msg); | |
3263 | ceph_con_send(&session->s_con, msg); | |
3264 | ||
3265 | out: | |
3266 | iput(inode); | |
3267 | mutex_unlock(&session->s_mutex); | |
2f2dc053 SW |
3268 | return; |
3269 | ||
3270 | bad: | |
3271 | pr_err("corrupt lease message\n"); | |
9ec7cab1 | 3272 | ceph_msg_dump(msg); |
2f2dc053 SW |
3273 | } |
3274 | ||
3275 | void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, | |
3276 | struct inode *inode, | |
3277 | struct dentry *dentry, char action, | |
3278 | u32 seq) | |
3279 | { | |
3280 | struct ceph_msg *msg; | |
3281 | struct ceph_mds_lease *lease; | |
3282 | int len = sizeof(*lease) + sizeof(u32); | |
3283 | int dnamelen = 0; | |
3284 | ||
3285 | dout("lease_send_msg inode %p dentry %p %s to mds%d\n", | |
3286 | inode, dentry, ceph_lease_op_name(action), session->s_mds); | |
3287 | dnamelen = dentry->d_name.len; | |
3288 | len += dnamelen; | |
3289 | ||
b61c2763 | 3290 | msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); |
a79832f2 | 3291 | if (!msg) |
2f2dc053 SW |
3292 | return; |
3293 | lease = msg->front.iov_base; | |
3294 | lease->action = action; | |
2f2dc053 SW |
3295 | lease->ino = cpu_to_le64(ceph_vino(inode).ino); |
3296 | lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); | |
3297 | lease->seq = cpu_to_le32(seq); | |
3298 | put_unaligned_le32(dnamelen, lease + 1); | |
3299 | memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); | |
3300 | ||
3301 | /* | |
3302 | * if this is a preemptive lease RELEASE, no need to | |
3303 | * flush request stream, since the actual request will | |
3304 | * soon follow. | |
3305 | */ | |
3306 | msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); | |
3307 | ||
3308 | ceph_con_send(&session->s_con, msg); | |
3309 | } | |
3310 | ||
3311 | /* | |
3312 | * Preemptively release a lease we expect to invalidate anyway. | |
3313 | * Pass @inode always, @dentry is optional. | |
3314 | */ | |
3315 | void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, | |
2f90b852 | 3316 | struct dentry *dentry) |
2f2dc053 SW |
3317 | { |
3318 | struct ceph_dentry_info *di; | |
3319 | struct ceph_mds_session *session; | |
3320 | u32 seq; | |
3321 | ||
3322 | BUG_ON(inode == NULL); | |
3323 | BUG_ON(dentry == NULL); | |
2f2dc053 SW |
3324 | |
3325 | /* is dentry lease valid? */ | |
3326 | spin_lock(&dentry->d_lock); | |
3327 | di = ceph_dentry(dentry); | |
3328 | if (!di || !di->lease_session || | |
3329 | di->lease_session->s_mds < 0 || | |
3330 | di->lease_gen != di->lease_session->s_cap_gen || | |
3331 | !time_before(jiffies, dentry->d_time)) { | |
3332 | dout("lease_release inode %p dentry %p -- " | |
2f90b852 SW |
3333 | "no lease\n", |
3334 | inode, dentry); | |
2f2dc053 SW |
3335 | spin_unlock(&dentry->d_lock); |
3336 | return; | |
3337 | } | |
3338 | ||
3339 | /* we do have a lease on this dentry; note mds and seq */ | |
3340 | session = ceph_get_mds_session(di->lease_session); | |
3341 | seq = di->lease_seq; | |
3342 | __ceph_mdsc_drop_dentry_lease(dentry); | |
3343 | spin_unlock(&dentry->d_lock); | |
3344 | ||
2f90b852 SW |
3345 | dout("lease_release inode %p dentry %p to mds%d\n", |
3346 | inode, dentry, session->s_mds); | |
2f2dc053 SW |
3347 | ceph_mdsc_lease_send_msg(session, inode, dentry, |
3348 | CEPH_MDS_LEASE_RELEASE, seq); | |
3349 | ceph_put_mds_session(session); | |
3350 | } | |
3351 | ||
3352 | /* | |
3353 | * drop all leases (and dentry refs) in preparation for umount | |
3354 | */ | |
3355 | static void drop_leases(struct ceph_mds_client *mdsc) | |
3356 | { | |
3357 | int i; | |
3358 | ||
3359 | dout("drop_leases\n"); | |
3360 | mutex_lock(&mdsc->mutex); | |
3361 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3362 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3363 | if (!s) | |
3364 | continue; | |
3365 | mutex_unlock(&mdsc->mutex); | |
3366 | mutex_lock(&s->s_mutex); | |
3367 | mutex_unlock(&s->s_mutex); | |
3368 | ceph_put_mds_session(s); | |
3369 | mutex_lock(&mdsc->mutex); | |
3370 | } | |
3371 | mutex_unlock(&mdsc->mutex); | |
3372 | } | |
3373 | ||
3374 | ||
3375 | ||
3376 | /* | |
3377 | * delayed work -- periodically trim expired leases, renew caps with mds | |
3378 | */ | |
3379 | static void schedule_delayed(struct ceph_mds_client *mdsc) | |
3380 | { | |
3381 | int delay = 5; | |
3382 | unsigned hz = round_jiffies_relative(HZ * delay); | |
3383 | schedule_delayed_work(&mdsc->delayed_work, hz); | |
3384 | } | |
3385 | ||
3386 | static void delayed_work(struct work_struct *work) | |
3387 | { | |
3388 | int i; | |
3389 | struct ceph_mds_client *mdsc = | |
3390 | container_of(work, struct ceph_mds_client, delayed_work.work); | |
3391 | int renew_interval; | |
3392 | int renew_caps; | |
3393 | ||
3394 | dout("mdsc delayed_work\n"); | |
afcdaea3 | 3395 | ceph_check_delayed_caps(mdsc); |
2f2dc053 SW |
3396 | |
3397 | mutex_lock(&mdsc->mutex); | |
3398 | renew_interval = mdsc->mdsmap->m_session_timeout >> 2; | |
3399 | renew_caps = time_after_eq(jiffies, HZ*renew_interval + | |
3400 | mdsc->last_renew_caps); | |
3401 | if (renew_caps) | |
3402 | mdsc->last_renew_caps = jiffies; | |
3403 | ||
3404 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3405 | struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); | |
3406 | if (s == NULL) | |
3407 | continue; | |
3408 | if (s->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3409 | dout("resending session close request for mds%d\n", | |
3410 | s->s_mds); | |
3411 | request_close_session(mdsc, s); | |
3412 | ceph_put_mds_session(s); | |
3413 | continue; | |
3414 | } | |
3415 | if (s->s_ttl && time_after(jiffies, s->s_ttl)) { | |
3416 | if (s->s_state == CEPH_MDS_SESSION_OPEN) { | |
3417 | s->s_state = CEPH_MDS_SESSION_HUNG; | |
3418 | pr_info("mds%d hung\n", s->s_mds); | |
3419 | } | |
3420 | } | |
3421 | if (s->s_state < CEPH_MDS_SESSION_OPEN) { | |
3422 | /* this mds is failed or recovering, just wait */ | |
3423 | ceph_put_mds_session(s); | |
3424 | continue; | |
3425 | } | |
3426 | mutex_unlock(&mdsc->mutex); | |
3427 | ||
3428 | mutex_lock(&s->s_mutex); | |
3429 | if (renew_caps) | |
3430 | send_renew_caps(mdsc, s); | |
3431 | else | |
3432 | ceph_con_keepalive(&s->s_con); | |
aab53dd9 SW |
3433 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
3434 | s->s_state == CEPH_MDS_SESSION_HUNG) | |
3d7ded4d | 3435 | ceph_send_cap_releases(mdsc, s); |
2f2dc053 SW |
3436 | mutex_unlock(&s->s_mutex); |
3437 | ceph_put_mds_session(s); | |
3438 | ||
3439 | mutex_lock(&mdsc->mutex); | |
3440 | } | |
3441 | mutex_unlock(&mdsc->mutex); | |
3442 | ||
3443 | schedule_delayed(mdsc); | |
3444 | } | |
3445 | ||
3d14c5d2 | 3446 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
2f2dc053 | 3447 | |
2f2dc053 | 3448 | { |
3d14c5d2 YS |
3449 | struct ceph_mds_client *mdsc; |
3450 | ||
3451 | mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); | |
3452 | if (!mdsc) | |
3453 | return -ENOMEM; | |
3454 | mdsc->fsc = fsc; | |
3455 | fsc->mdsc = mdsc; | |
2f2dc053 SW |
3456 | mutex_init(&mdsc->mutex); |
3457 | mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); | |
fb3101b6 | 3458 | if (mdsc->mdsmap == NULL) { |
3459 | kfree(mdsc); | |
2d06eeb8 | 3460 | return -ENOMEM; |
fb3101b6 | 3461 | } |
2d06eeb8 | 3462 | |
2f2dc053 | 3463 | init_completion(&mdsc->safe_umount_waiters); |
f3c60c59 | 3464 | init_waitqueue_head(&mdsc->session_close_wq); |
2f2dc053 SW |
3465 | INIT_LIST_HEAD(&mdsc->waiting_for_map); |
3466 | mdsc->sessions = NULL; | |
86d8f67b | 3467 | atomic_set(&mdsc->num_sessions, 0); |
2f2dc053 SW |
3468 | mdsc->max_sessions = 0; |
3469 | mdsc->stopping = 0; | |
affbc19a | 3470 | mdsc->last_snap_seq = 0; |
2f2dc053 | 3471 | init_rwsem(&mdsc->snap_rwsem); |
a105f00c | 3472 | mdsc->snap_realms = RB_ROOT; |
2f2dc053 SW |
3473 | INIT_LIST_HEAD(&mdsc->snap_empty); |
3474 | spin_lock_init(&mdsc->snap_empty_lock); | |
3475 | mdsc->last_tid = 0; | |
e8a7b8b1 | 3476 | mdsc->oldest_tid = 0; |
44ca18f2 | 3477 | mdsc->request_tree = RB_ROOT; |
2f2dc053 SW |
3478 | INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); |
3479 | mdsc->last_renew_caps = jiffies; | |
3480 | INIT_LIST_HEAD(&mdsc->cap_delay_list); | |
3481 | spin_lock_init(&mdsc->cap_delay_lock); | |
3482 | INIT_LIST_HEAD(&mdsc->snap_flush_list); | |
3483 | spin_lock_init(&mdsc->snap_flush_lock); | |
553adfd9 | 3484 | mdsc->last_cap_flush_tid = 1; |
8310b089 | 3485 | mdsc->cap_flush_tree = RB_ROOT; |
2f2dc053 | 3486 | INIT_LIST_HEAD(&mdsc->cap_dirty); |
db354052 | 3487 | INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); |
2f2dc053 SW |
3488 | mdsc->num_cap_flushing = 0; |
3489 | spin_lock_init(&mdsc->cap_dirty_lock); | |
3490 | init_waitqueue_head(&mdsc->cap_flushing_wq); | |
3491 | spin_lock_init(&mdsc->dentry_lru_lock); | |
3492 | INIT_LIST_HEAD(&mdsc->dentry_lru); | |
2d06eeb8 | 3493 | |
37151668 | 3494 | ceph_caps_init(mdsc); |
3d14c5d2 | 3495 | ceph_adjust_min_caps(mdsc, fsc->min_caps); |
37151668 | 3496 | |
10183a69 YZ |
3497 | init_rwsem(&mdsc->pool_perm_rwsem); |
3498 | mdsc->pool_perm_tree = RB_ROOT; | |
3499 | ||
5f44f142 | 3500 | return 0; |
2f2dc053 SW |
3501 | } |
3502 | ||
3503 | /* | |
3504 | * Wait for safe replies on open mds requests. If we time out, drop | |
3505 | * all requests from the tree to avoid dangling dentry refs. | |
3506 | */ | |
3507 | static void wait_requests(struct ceph_mds_client *mdsc) | |
3508 | { | |
a319bf56 | 3509 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 | 3510 | struct ceph_mds_request *req; |
2f2dc053 SW |
3511 | |
3512 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3513 | if (__get_oldest_req(mdsc)) { |
2f2dc053 | 3514 | mutex_unlock(&mdsc->mutex); |
44ca18f2 | 3515 | |
2f2dc053 SW |
3516 | dout("wait_requests waiting for requests\n"); |
3517 | wait_for_completion_timeout(&mdsc->safe_umount_waiters, | |
a319bf56 | 3518 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3519 | |
3520 | /* tear down remaining requests */ | |
44ca18f2 SW |
3521 | mutex_lock(&mdsc->mutex); |
3522 | while ((req = __get_oldest_req(mdsc))) { | |
2f2dc053 SW |
3523 | dout("wait_requests timed out on tid %llu\n", |
3524 | req->r_tid); | |
44ca18f2 | 3525 | __unregister_request(mdsc, req); |
2f2dc053 SW |
3526 | } |
3527 | } | |
3528 | mutex_unlock(&mdsc->mutex); | |
3529 | dout("wait_requests done\n"); | |
3530 | } | |
3531 | ||
3532 | /* | |
3533 | * called before mount is ro, and before dentries are torn down. | |
3534 | * (hmm, does this still race with new lookups?) | |
3535 | */ | |
3536 | void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) | |
3537 | { | |
3538 | dout("pre_umount\n"); | |
3539 | mdsc->stopping = 1; | |
3540 | ||
3541 | drop_leases(mdsc); | |
afcdaea3 | 3542 | ceph_flush_dirty_caps(mdsc); |
2f2dc053 | 3543 | wait_requests(mdsc); |
17c688c3 SW |
3544 | |
3545 | /* | |
3546 | * wait for reply handlers to drop their request refs and | |
3547 | * their inode/dcache refs | |
3548 | */ | |
3549 | ceph_msgr_flush(); | |
2f2dc053 SW |
3550 | } |
3551 | ||
3552 | /* | |
3553 | * wait for all write mds requests to flush. | |
3554 | */ | |
3555 | static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) | |
3556 | { | |
80fc7314 | 3557 | struct ceph_mds_request *req = NULL, *nextreq; |
44ca18f2 | 3558 | struct rb_node *n; |
2f2dc053 SW |
3559 | |
3560 | mutex_lock(&mdsc->mutex); | |
3561 | dout("wait_unsafe_requests want %lld\n", want_tid); | |
80fc7314 | 3562 | restart: |
44ca18f2 SW |
3563 | req = __get_oldest_req(mdsc); |
3564 | while (req && req->r_tid <= want_tid) { | |
80fc7314 SW |
3565 | /* find next request */ |
3566 | n = rb_next(&req->r_node); | |
3567 | if (n) | |
3568 | nextreq = rb_entry(n, struct ceph_mds_request, r_node); | |
3569 | else | |
3570 | nextreq = NULL; | |
e8a7b8b1 YZ |
3571 | if (req->r_op != CEPH_MDS_OP_SETFILELOCK && |
3572 | (req->r_op & CEPH_MDS_OP_WRITE)) { | |
44ca18f2 SW |
3573 | /* write op */ |
3574 | ceph_mdsc_get_request(req); | |
80fc7314 SW |
3575 | if (nextreq) |
3576 | ceph_mdsc_get_request(nextreq); | |
44ca18f2 SW |
3577 | mutex_unlock(&mdsc->mutex); |
3578 | dout("wait_unsafe_requests wait on %llu (want %llu)\n", | |
3579 | req->r_tid, want_tid); | |
3580 | wait_for_completion(&req->r_safe_completion); | |
3581 | mutex_lock(&mdsc->mutex); | |
44ca18f2 | 3582 | ceph_mdsc_put_request(req); |
80fc7314 SW |
3583 | if (!nextreq) |
3584 | break; /* next dne before, so we're done! */ | |
3585 | if (RB_EMPTY_NODE(&nextreq->r_node)) { | |
3586 | /* next request was removed from tree */ | |
3587 | ceph_mdsc_put_request(nextreq); | |
3588 | goto restart; | |
3589 | } | |
3590 | ceph_mdsc_put_request(nextreq); /* won't go away */ | |
44ca18f2 | 3591 | } |
80fc7314 | 3592 | req = nextreq; |
2f2dc053 SW |
3593 | } |
3594 | mutex_unlock(&mdsc->mutex); | |
3595 | dout("wait_unsafe_requests done\n"); | |
3596 | } | |
3597 | ||
3598 | void ceph_mdsc_sync(struct ceph_mds_client *mdsc) | |
3599 | { | |
affbc19a | 3600 | u64 want_tid, want_flush, want_snap; |
2f2dc053 | 3601 | |
48fec5d0 | 3602 | if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
56b7cf95 SW |
3603 | return; |
3604 | ||
2f2dc053 SW |
3605 | dout("sync\n"); |
3606 | mutex_lock(&mdsc->mutex); | |
3607 | want_tid = mdsc->last_tid; | |
2f2dc053 | 3608 | mutex_unlock(&mdsc->mutex); |
2f2dc053 | 3609 | |
afcdaea3 | 3610 | ceph_flush_dirty_caps(mdsc); |
d3383a8e | 3611 | spin_lock(&mdsc->cap_dirty_lock); |
8310b089 | 3612 | want_flush = mdsc->last_cap_flush_tid; |
d3383a8e YZ |
3613 | spin_unlock(&mdsc->cap_dirty_lock); |
3614 | ||
affbc19a YZ |
3615 | down_read(&mdsc->snap_rwsem); |
3616 | want_snap = mdsc->last_snap_seq; | |
3617 | up_read(&mdsc->snap_rwsem); | |
3618 | ||
3619 | dout("sync want tid %lld flush_seq %lld snap_seq %lld\n", | |
3620 | want_tid, want_flush, want_snap); | |
2f2dc053 SW |
3621 | |
3622 | wait_unsafe_requests(mdsc, want_tid); | |
affbc19a | 3623 | wait_caps_flush(mdsc, want_flush, want_snap); |
2f2dc053 SW |
3624 | } |
3625 | ||
f3c60c59 SW |
3626 | /* |
3627 | * true if all sessions are closed, or we force unmount | |
3628 | */ | |
7fd7d101 | 3629 | static bool done_closing_sessions(struct ceph_mds_client *mdsc) |
f3c60c59 | 3630 | { |
48fec5d0 | 3631 | if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) |
f3c60c59 | 3632 | return true; |
86d8f67b | 3633 | return atomic_read(&mdsc->num_sessions) == 0; |
f3c60c59 | 3634 | } |
2f2dc053 SW |
3635 | |
3636 | /* | |
3637 | * called after sb is ro. | |
3638 | */ | |
3639 | void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) | |
3640 | { | |
a319bf56 | 3641 | struct ceph_options *opts = mdsc->fsc->client->options; |
2f2dc053 SW |
3642 | struct ceph_mds_session *session; |
3643 | int i; | |
2f2dc053 SW |
3644 | |
3645 | dout("close_sessions\n"); | |
3646 | ||
2f2dc053 | 3647 | /* close sessions */ |
f3c60c59 SW |
3648 | mutex_lock(&mdsc->mutex); |
3649 | for (i = 0; i < mdsc->max_sessions; i++) { | |
3650 | session = __ceph_lookup_mds_session(mdsc, i); | |
3651 | if (!session) | |
3652 | continue; | |
2f2dc053 | 3653 | mutex_unlock(&mdsc->mutex); |
f3c60c59 SW |
3654 | mutex_lock(&session->s_mutex); |
3655 | __close_session(mdsc, session); | |
3656 | mutex_unlock(&session->s_mutex); | |
3657 | ceph_put_mds_session(session); | |
2f2dc053 SW |
3658 | mutex_lock(&mdsc->mutex); |
3659 | } | |
f3c60c59 SW |
3660 | mutex_unlock(&mdsc->mutex); |
3661 | ||
3662 | dout("waiting for sessions to close\n"); | |
3663 | wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), | |
a319bf56 | 3664 | ceph_timeout_jiffies(opts->mount_timeout)); |
2f2dc053 SW |
3665 | |
3666 | /* tear down remaining sessions */ | |
f3c60c59 | 3667 | mutex_lock(&mdsc->mutex); |
2f2dc053 SW |
3668 | for (i = 0; i < mdsc->max_sessions; i++) { |
3669 | if (mdsc->sessions[i]) { | |
3670 | session = get_session(mdsc->sessions[i]); | |
2600d2dd | 3671 | __unregister_session(mdsc, session); |
2f2dc053 SW |
3672 | mutex_unlock(&mdsc->mutex); |
3673 | mutex_lock(&session->s_mutex); | |
3674 | remove_session_caps(session); | |
3675 | mutex_unlock(&session->s_mutex); | |
3676 | ceph_put_mds_session(session); | |
3677 | mutex_lock(&mdsc->mutex); | |
3678 | } | |
3679 | } | |
2f2dc053 | 3680 | WARN_ON(!list_empty(&mdsc->cap_delay_list)); |
2f2dc053 SW |
3681 | mutex_unlock(&mdsc->mutex); |
3682 | ||
3683 | ceph_cleanup_empty_realms(mdsc); | |
3684 | ||
3685 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3686 | ||
3687 | dout("stopped\n"); | |
3688 | } | |
3689 | ||
48fec5d0 YZ |
3690 | void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) |
3691 | { | |
3692 | struct ceph_mds_session *session; | |
3693 | int mds; | |
3694 | ||
3695 | dout("force umount\n"); | |
3696 | ||
3697 | mutex_lock(&mdsc->mutex); | |
3698 | for (mds = 0; mds < mdsc->max_sessions; mds++) { | |
3699 | session = __ceph_lookup_mds_session(mdsc, mds); | |
3700 | if (!session) | |
3701 | continue; | |
3702 | mutex_unlock(&mdsc->mutex); | |
3703 | mutex_lock(&session->s_mutex); | |
3704 | __close_session(mdsc, session); | |
3705 | if (session->s_state == CEPH_MDS_SESSION_CLOSING) { | |
3706 | cleanup_session_requests(mdsc, session); | |
3707 | remove_session_caps(session); | |
3708 | } | |
3709 | mutex_unlock(&session->s_mutex); | |
3710 | ceph_put_mds_session(session); | |
3711 | mutex_lock(&mdsc->mutex); | |
3712 | kick_requests(mdsc, mds); | |
3713 | } | |
3714 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3715 | mutex_unlock(&mdsc->mutex); | |
3716 | } | |
3717 | ||
3d14c5d2 | 3718 | static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) |
2f2dc053 SW |
3719 | { |
3720 | dout("stop\n"); | |
3721 | cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ | |
3722 | if (mdsc->mdsmap) | |
3723 | ceph_mdsmap_destroy(mdsc->mdsmap); | |
3724 | kfree(mdsc->sessions); | |
37151668 | 3725 | ceph_caps_finalize(mdsc); |
10183a69 | 3726 | ceph_pool_perm_destroy(mdsc); |
2f2dc053 SW |
3727 | } |
3728 | ||
3d14c5d2 YS |
3729 | void ceph_mdsc_destroy(struct ceph_fs_client *fsc) |
3730 | { | |
3731 | struct ceph_mds_client *mdsc = fsc->mdsc; | |
3732 | ||
ef550f6f | 3733 | dout("mdsc_destroy %p\n", mdsc); |
3d14c5d2 | 3734 | ceph_mdsc_stop(mdsc); |
ef550f6f SW |
3735 | |
3736 | /* flush out any connection work with references to us */ | |
3737 | ceph_msgr_flush(); | |
3738 | ||
3d14c5d2 YS |
3739 | fsc->mdsc = NULL; |
3740 | kfree(mdsc); | |
ef550f6f | 3741 | dout("mdsc_destroy %p done\n", mdsc); |
3d14c5d2 YS |
3742 | } |
3743 | ||
2f2dc053 SW |
3744 | |
3745 | /* | |
3746 | * handle mds map update. | |
3747 | */ | |
3748 | void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) | |
3749 | { | |
3750 | u32 epoch; | |
3751 | u32 maplen; | |
3752 | void *p = msg->front.iov_base; | |
3753 | void *end = p + msg->front.iov_len; | |
3754 | struct ceph_mdsmap *newmap, *oldmap; | |
3755 | struct ceph_fsid fsid; | |
3756 | int err = -EINVAL; | |
3757 | ||
3758 | ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); | |
3759 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
3d14c5d2 | 3760 | if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) |
0743304d | 3761 | return; |
c89136ea SW |
3762 | epoch = ceph_decode_32(&p); |
3763 | maplen = ceph_decode_32(&p); | |
2f2dc053 SW |
3764 | dout("handle_map epoch %u len %d\n", epoch, (int)maplen); |
3765 | ||
3766 | /* do we need it? */ | |
3d14c5d2 | 3767 | ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); |
2f2dc053 SW |
3768 | mutex_lock(&mdsc->mutex); |
3769 | if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { | |
3770 | dout("handle_map epoch %u <= our %u\n", | |
3771 | epoch, mdsc->mdsmap->m_epoch); | |
3772 | mutex_unlock(&mdsc->mutex); | |
3773 | return; | |
3774 | } | |
3775 | ||
3776 | newmap = ceph_mdsmap_decode(&p, end); | |
3777 | if (IS_ERR(newmap)) { | |
3778 | err = PTR_ERR(newmap); | |
3779 | goto bad_unlock; | |
3780 | } | |
3781 | ||
3782 | /* swap into place */ | |
3783 | if (mdsc->mdsmap) { | |
3784 | oldmap = mdsc->mdsmap; | |
3785 | mdsc->mdsmap = newmap; | |
3786 | check_new_map(mdsc, newmap, oldmap); | |
3787 | ceph_mdsmap_destroy(oldmap); | |
3788 | } else { | |
3789 | mdsc->mdsmap = newmap; /* first mds map */ | |
3790 | } | |
3d14c5d2 | 3791 | mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; |
2f2dc053 SW |
3792 | |
3793 | __wake_requests(mdsc, &mdsc->waiting_for_map); | |
3794 | ||
3795 | mutex_unlock(&mdsc->mutex); | |
3796 | schedule_delayed(mdsc); | |
3797 | return; | |
3798 | ||
3799 | bad_unlock: | |
3800 | mutex_unlock(&mdsc->mutex); | |
3801 | bad: | |
3802 | pr_err("error decoding mdsmap %d\n", err); | |
3803 | return; | |
3804 | } | |
3805 | ||
3806 | static struct ceph_connection *con_get(struct ceph_connection *con) | |
3807 | { | |
3808 | struct ceph_mds_session *s = con->private; | |
3809 | ||
3810 | if (get_session(s)) { | |
2600d2dd | 3811 | dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref)); |
2f2dc053 SW |
3812 | return con; |
3813 | } | |
3814 | dout("mdsc con_get %p FAIL\n", s); | |
3815 | return NULL; | |
3816 | } | |
3817 | ||
3818 | static void con_put(struct ceph_connection *con) | |
3819 | { | |
3820 | struct ceph_mds_session *s = con->private; | |
3821 | ||
7d8e18a6 | 3822 | dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); |
2f2dc053 SW |
3823 | ceph_put_mds_session(s); |
3824 | } | |
3825 | ||
3826 | /* | |
3827 | * if the client is unresponsive for long enough, the mds will kill | |
3828 | * the session entirely. | |
3829 | */ | |
3830 | static void peer_reset(struct ceph_connection *con) | |
3831 | { | |
3832 | struct ceph_mds_session *s = con->private; | |
7e70f0ed | 3833 | struct ceph_mds_client *mdsc = s->s_mdsc; |
2f2dc053 | 3834 | |
f3ae1b97 | 3835 | pr_warn("mds%d closed our session\n", s->s_mds); |
7e70f0ed | 3836 | send_mds_reconnect(mdsc, s); |
2f2dc053 SW |
3837 | } |
3838 | ||
3839 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
3840 | { | |
3841 | struct ceph_mds_session *s = con->private; | |
3842 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3843 | int type = le16_to_cpu(msg->hdr.type); | |
3844 | ||
2600d2dd SW |
3845 | mutex_lock(&mdsc->mutex); |
3846 | if (__verify_registered_session(mdsc, s) < 0) { | |
3847 | mutex_unlock(&mdsc->mutex); | |
3848 | goto out; | |
3849 | } | |
3850 | mutex_unlock(&mdsc->mutex); | |
3851 | ||
2f2dc053 SW |
3852 | switch (type) { |
3853 | case CEPH_MSG_MDS_MAP: | |
3854 | ceph_mdsc_handle_map(mdsc, msg); | |
3855 | break; | |
3856 | case CEPH_MSG_CLIENT_SESSION: | |
3857 | handle_session(s, msg); | |
3858 | break; | |
3859 | case CEPH_MSG_CLIENT_REPLY: | |
3860 | handle_reply(s, msg); | |
3861 | break; | |
3862 | case CEPH_MSG_CLIENT_REQUEST_FORWARD: | |
2600d2dd | 3863 | handle_forward(mdsc, s, msg); |
2f2dc053 SW |
3864 | break; |
3865 | case CEPH_MSG_CLIENT_CAPS: | |
3866 | ceph_handle_caps(s, msg); | |
3867 | break; | |
3868 | case CEPH_MSG_CLIENT_SNAP: | |
2600d2dd | 3869 | ceph_handle_snap(mdsc, s, msg); |
2f2dc053 SW |
3870 | break; |
3871 | case CEPH_MSG_CLIENT_LEASE: | |
2600d2dd | 3872 | handle_lease(mdsc, s, msg); |
2f2dc053 SW |
3873 | break; |
3874 | ||
3875 | default: | |
3876 | pr_err("received unknown message type %d %s\n", type, | |
3877 | ceph_msg_type_name(type)); | |
3878 | } | |
2600d2dd | 3879 | out: |
2f2dc053 SW |
3880 | ceph_msg_put(msg); |
3881 | } | |
3882 | ||
4e7a5dcd SW |
3883 | /* |
3884 | * authentication | |
3885 | */ | |
a3530df3 AE |
3886 | |
3887 | /* | |
3888 | * Note: returned pointer is the address of a structure that's | |
3889 | * managed separately. Caller must *not* attempt to free it. | |
3890 | */ | |
3891 | static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |
8f43fb53 | 3892 | int *proto, int force_new) |
4e7a5dcd SW |
3893 | { |
3894 | struct ceph_mds_session *s = con->private; | |
3895 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3896 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
74f1869f | 3897 | struct ceph_auth_handshake *auth = &s->s_auth; |
4e7a5dcd | 3898 | |
74f1869f | 3899 | if (force_new && auth->authorizer) { |
27859f97 | 3900 | ceph_auth_destroy_authorizer(ac, auth->authorizer); |
74f1869f | 3901 | auth->authorizer = NULL; |
4e7a5dcd | 3902 | } |
27859f97 SW |
3903 | if (!auth->authorizer) { |
3904 | int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
3905 | auth); | |
0bed9b5c SW |
3906 | if (ret) |
3907 | return ERR_PTR(ret); | |
27859f97 SW |
3908 | } else { |
3909 | int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, | |
3910 | auth); | |
a255651d | 3911 | if (ret) |
a3530df3 | 3912 | return ERR_PTR(ret); |
4e7a5dcd | 3913 | } |
4e7a5dcd | 3914 | *proto = ac->protocol; |
74f1869f | 3915 | |
a3530df3 | 3916 | return auth; |
4e7a5dcd SW |
3917 | } |
3918 | ||
3919 | ||
3920 | static int verify_authorizer_reply(struct ceph_connection *con, int len) | |
3921 | { | |
3922 | struct ceph_mds_session *s = con->private; | |
3923 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3924 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
4e7a5dcd | 3925 | |
27859f97 | 3926 | return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len); |
4e7a5dcd SW |
3927 | } |
3928 | ||
9bd2e6f8 SW |
3929 | static int invalidate_authorizer(struct ceph_connection *con) |
3930 | { | |
3931 | struct ceph_mds_session *s = con->private; | |
3932 | struct ceph_mds_client *mdsc = s->s_mdsc; | |
3d14c5d2 | 3933 | struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; |
9bd2e6f8 | 3934 | |
27859f97 | 3935 | ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); |
9bd2e6f8 | 3936 | |
3d14c5d2 | 3937 | return ceph_monc_validate_auth(&mdsc->fsc->client->monc); |
9bd2e6f8 SW |
3938 | } |
3939 | ||
53ded495 AE |
3940 | static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, |
3941 | struct ceph_msg_header *hdr, int *skip) | |
3942 | { | |
3943 | struct ceph_msg *msg; | |
3944 | int type = (int) le16_to_cpu(hdr->type); | |
3945 | int front_len = (int) le32_to_cpu(hdr->front_len); | |
3946 | ||
3947 | if (con->in_msg) | |
3948 | return con->in_msg; | |
3949 | ||
3950 | *skip = 0; | |
3951 | msg = ceph_msg_new(type, front_len, GFP_NOFS, false); | |
3952 | if (!msg) { | |
3953 | pr_err("unable to allocate msg type %d len %d\n", | |
3954 | type, front_len); | |
3955 | return NULL; | |
3956 | } | |
53ded495 AE |
3957 | |
3958 | return msg; | |
3959 | } | |
3960 | ||
79dbd1ba | 3961 | static int mds_sign_message(struct ceph_msg *msg) |
33d07337 | 3962 | { |
79dbd1ba | 3963 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 3964 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 3965 | |
33d07337 YZ |
3966 | return ceph_auth_sign_message(auth, msg); |
3967 | } | |
3968 | ||
79dbd1ba | 3969 | static int mds_check_message_signature(struct ceph_msg *msg) |
33d07337 | 3970 | { |
79dbd1ba | 3971 | struct ceph_mds_session *s = msg->con->private; |
33d07337 | 3972 | struct ceph_auth_handshake *auth = &s->s_auth; |
79dbd1ba | 3973 | |
33d07337 YZ |
3974 | return ceph_auth_check_message_signature(auth, msg); |
3975 | } | |
3976 | ||
9e32789f | 3977 | static const struct ceph_connection_operations mds_con_ops = { |
2f2dc053 SW |
3978 | .get = con_get, |
3979 | .put = con_put, | |
3980 | .dispatch = dispatch, | |
4e7a5dcd SW |
3981 | .get_authorizer = get_authorizer, |
3982 | .verify_authorizer_reply = verify_authorizer_reply, | |
9bd2e6f8 | 3983 | .invalidate_authorizer = invalidate_authorizer, |
2f2dc053 | 3984 | .peer_reset = peer_reset, |
53ded495 | 3985 | .alloc_msg = mds_alloc_msg, |
79dbd1ba ID |
3986 | .sign_message = mds_sign_message, |
3987 | .check_message_signature = mds_check_message_signature, | |
2f2dc053 SW |
3988 | }; |
3989 | ||
2f2dc053 | 3990 | /* eof */ |