]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ceph/mon_client.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / net / ceph / mon_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/random.h>
8 #include <linux/sched.h>
9
10 #include <linux/ceph/ceph_features.h>
11 #include <linux/ceph/mon_client.h>
12 #include <linux/ceph/libceph.h>
13 #include <linux/ceph/debugfs.h>
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/auth.h>
16
17 /*
18 * Interact with Ceph monitor cluster. Handle requests for new map
19 * versions, and periodically resend as needed. Also implement
20 * statfs() and umount().
21 *
22 * A small cluster of Ceph "monitors" are responsible for managing critical
23 * cluster configuration and state information. An odd number (e.g., 3, 5)
24 * of cmon daemons use a modified version of the Paxos part-time parliament
25 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
26 * list of clients who have mounted the file system.
27 *
28 * We maintain an open, active session with a monitor at all times in order to
29 * receive timely MDSMap updates. We periodically send a keepalive byte on the
30 * TCP socket to ensure we detect a failure. If the connection does break, we
31 * randomly hunt for a new monitor. Once the connection is reestablished, we
32 * resend any outstanding requests.
33 */
34
35 static const struct ceph_connection_operations mon_con_ops;
36
37 static int __validate_auth(struct ceph_mon_client *monc);
38
39 /*
40 * Decode a monmap blob (e.g., during mount).
41 */
42 struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
43 {
44 struct ceph_monmap *m = NULL;
45 int i, err = -EINVAL;
46 struct ceph_fsid fsid;
47 u32 epoch, num_mon;
48 u32 len;
49
50 ceph_decode_32_safe(&p, end, len, bad);
51 ceph_decode_need(&p, end, len, bad);
52
53 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
54 p += sizeof(u16); /* skip version */
55
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid));
58 epoch = ceph_decode_32(&p);
59
60 num_mon = ceph_decode_32(&p);
61 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
62
63 if (num_mon >= CEPH_MAX_MON)
64 goto bad;
65 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
66 if (m == NULL)
67 return ERR_PTR(-ENOMEM);
68 m->fsid = fsid;
69 m->epoch = epoch;
70 m->num_mon = num_mon;
71 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
72 for (i = 0; i < num_mon; i++)
73 ceph_decode_addr(&m->mon_inst[i].addr);
74
75 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
76 m->num_mon);
77 for (i = 0; i < m->num_mon; i++)
78 dout("monmap_decode mon%d is %s\n", i,
79 ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
80 return m;
81
82 bad:
83 dout("monmap_decode failed with %d\n", err);
84 kfree(m);
85 return ERR_PTR(err);
86 }
87
88 /*
89 * return true if *addr is included in the monmap.
90 */
91 int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
92 {
93 int i;
94
95 for (i = 0; i < m->num_mon; i++)
96 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
97 return 1;
98 return 0;
99 }
100
101 /*
102 * Send an auth request.
103 */
104 static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
105 {
106 monc->pending_auth = 1;
107 monc->m_auth->front.iov_len = len;
108 monc->m_auth->hdr.front_len = cpu_to_le32(len);
109 ceph_msg_revoke(monc->m_auth);
110 ceph_msg_get(monc->m_auth); /* keep our ref */
111 ceph_con_send(&monc->con, monc->m_auth);
112 }
113
114 /*
115 * Close monitor session, if any.
116 */
117 static void __close_session(struct ceph_mon_client *monc)
118 {
119 dout("__close_session closing mon%d\n", monc->cur_mon);
120 ceph_msg_revoke(monc->m_auth);
121 ceph_msg_revoke_incoming(monc->m_auth_reply);
122 ceph_msg_revoke(monc->m_subscribe);
123 ceph_msg_revoke_incoming(monc->m_subscribe_ack);
124 ceph_con_close(&monc->con);
125
126 monc->pending_auth = 0;
127 ceph_auth_reset(monc->auth);
128 }
129
130 /*
131 * Pick a new monitor at random and set cur_mon. If we are repicking
132 * (i.e. cur_mon is already set), be sure to pick a different one.
133 */
134 static void pick_new_mon(struct ceph_mon_client *monc)
135 {
136 int old_mon = monc->cur_mon;
137
138 BUG_ON(monc->monmap->num_mon < 1);
139
140 if (monc->monmap->num_mon == 1) {
141 monc->cur_mon = 0;
142 } else {
143 int max = monc->monmap->num_mon;
144 int o = -1;
145 int n;
146
147 if (monc->cur_mon >= 0) {
148 if (monc->cur_mon < monc->monmap->num_mon)
149 o = monc->cur_mon;
150 if (o >= 0)
151 max--;
152 }
153
154 n = prandom_u32() % max;
155 if (o >= 0 && n >= o)
156 n++;
157
158 monc->cur_mon = n;
159 }
160
161 dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon,
162 monc->cur_mon, monc->monmap->num_mon);
163 }
164
165 /*
166 * Open a session with a new monitor.
167 */
168 static void __open_session(struct ceph_mon_client *monc)
169 {
170 int ret;
171
172 pick_new_mon(monc);
173
174 monc->hunting = true;
175 if (monc->had_a_connection) {
176 monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF;
177 if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT)
178 monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT;
179 }
180
181 monc->sub_renew_after = jiffies; /* i.e., expired */
182 monc->sub_renew_sent = 0;
183
184 dout("%s opening mon%d\n", __func__, monc->cur_mon);
185 ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon,
186 &monc->monmap->mon_inst[monc->cur_mon].addr);
187
188 /*
189 * send an initial keepalive to ensure our timestamp is valid
190 * by the time we are in an OPENED state
191 */
192 ceph_con_keepalive(&monc->con);
193
194 /* initiate authentication handshake */
195 ret = ceph_auth_build_hello(monc->auth,
196 monc->m_auth->front.iov_base,
197 monc->m_auth->front_alloc_len);
198 BUG_ON(ret <= 0);
199 __send_prepared_auth_request(monc, ret);
200 }
201
202 static void reopen_session(struct ceph_mon_client *monc)
203 {
204 if (!monc->hunting)
205 pr_info("mon%d %s session lost, hunting for new mon\n",
206 monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr.in_addr));
207
208 __close_session(monc);
209 __open_session(monc);
210 }
211
212 static void un_backoff(struct ceph_mon_client *monc)
213 {
214 monc->hunt_mult /= 2; /* reduce by 50% */
215 if (monc->hunt_mult < 1)
216 monc->hunt_mult = 1;
217 dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
218 }
219
220 /*
221 * Reschedule delayed work timer.
222 */
223 static void __schedule_delayed(struct ceph_mon_client *monc)
224 {
225 unsigned long delay;
226
227 if (monc->hunting)
228 delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult;
229 else
230 delay = CEPH_MONC_PING_INTERVAL;
231
232 dout("__schedule_delayed after %lu\n", delay);
233 mod_delayed_work(system_wq, &monc->delayed_work,
234 round_jiffies_relative(delay));
235 }
236
237 const char *ceph_sub_str[] = {
238 [CEPH_SUB_MONMAP] = "monmap",
239 [CEPH_SUB_OSDMAP] = "osdmap",
240 [CEPH_SUB_FSMAP] = "fsmap.user",
241 [CEPH_SUB_MDSMAP] = "mdsmap",
242 };
243
244 /*
245 * Send subscribe request for one or more maps, according to
246 * monc->subs.
247 */
248 static void __send_subscribe(struct ceph_mon_client *monc)
249 {
250 struct ceph_msg *msg = monc->m_subscribe;
251 void *p = msg->front.iov_base;
252 void *const end = p + msg->front_alloc_len;
253 int num = 0;
254 int i;
255
256 dout("%s sent %lu\n", __func__, monc->sub_renew_sent);
257
258 BUG_ON(monc->cur_mon < 0);
259
260 if (!monc->sub_renew_sent)
261 monc->sub_renew_sent = jiffies | 1; /* never 0 */
262
263 msg->hdr.version = cpu_to_le16(2);
264
265 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
266 if (monc->subs[i].want)
267 num++;
268 }
269 BUG_ON(num < 1); /* monmap sub is always there */
270 ceph_encode_32(&p, num);
271 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
272 char buf[32];
273 int len;
274
275 if (!monc->subs[i].want)
276 continue;
277
278 len = sprintf(buf, "%s", ceph_sub_str[i]);
279 if (i == CEPH_SUB_MDSMAP &&
280 monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE)
281 len += sprintf(buf + len, ".%d", monc->fs_cluster_id);
282
283 dout("%s %s start %llu flags 0x%x\n", __func__, buf,
284 le64_to_cpu(monc->subs[i].item.start),
285 monc->subs[i].item.flags);
286 ceph_encode_string(&p, end, buf, len);
287 memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
288 p += sizeof(monc->subs[i].item);
289 }
290
291 BUG_ON(p > end);
292 msg->front.iov_len = p - msg->front.iov_base;
293 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
294 ceph_msg_revoke(msg);
295 ceph_con_send(&monc->con, ceph_msg_get(msg));
296 }
297
298 static void handle_subscribe_ack(struct ceph_mon_client *monc,
299 struct ceph_msg *msg)
300 {
301 unsigned int seconds;
302 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
303
304 if (msg->front.iov_len < sizeof(*h))
305 goto bad;
306 seconds = le32_to_cpu(h->duration);
307
308 mutex_lock(&monc->mutex);
309 if (monc->sub_renew_sent) {
310 /*
311 * This is only needed for legacy (infernalis or older)
312 * MONs -- see delayed_work().
313 */
314 monc->sub_renew_after = monc->sub_renew_sent +
315 (seconds >> 1) * HZ - 1;
316 dout("%s sent %lu duration %d renew after %lu\n", __func__,
317 monc->sub_renew_sent, seconds, monc->sub_renew_after);
318 monc->sub_renew_sent = 0;
319 } else {
320 dout("%s sent %lu renew after %lu, ignoring\n", __func__,
321 monc->sub_renew_sent, monc->sub_renew_after);
322 }
323 mutex_unlock(&monc->mutex);
324 return;
325 bad:
326 pr_err("got corrupt subscribe-ack msg\n");
327 ceph_msg_dump(msg);
328 }
329
330 /*
331 * Register interest in a map
332 *
333 * @sub: one of CEPH_SUB_*
334 * @epoch: X for "every map since X", or 0 for "just the latest"
335 */
336 static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub,
337 u32 epoch, bool continuous)
338 {
339 __le64 start = cpu_to_le64(epoch);
340 u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0;
341
342 dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub],
343 epoch, continuous);
344
345 if (monc->subs[sub].want &&
346 monc->subs[sub].item.start == start &&
347 monc->subs[sub].item.flags == flags)
348 return false;
349
350 monc->subs[sub].item.start = start;
351 monc->subs[sub].item.flags = flags;
352 monc->subs[sub].want = true;
353
354 return true;
355 }
356
357 bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
358 bool continuous)
359 {
360 bool need_request;
361
362 mutex_lock(&monc->mutex);
363 need_request = __ceph_monc_want_map(monc, sub, epoch, continuous);
364 mutex_unlock(&monc->mutex);
365
366 return need_request;
367 }
368 EXPORT_SYMBOL(ceph_monc_want_map);
369
370 /*
371 * Keep track of which maps we have
372 *
373 * @sub: one of CEPH_SUB_*
374 */
375 static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub,
376 u32 epoch)
377 {
378 dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch);
379
380 if (monc->subs[sub].want) {
381 if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME)
382 monc->subs[sub].want = false;
383 else
384 monc->subs[sub].item.start = cpu_to_le64(epoch + 1);
385 }
386
387 monc->subs[sub].have = epoch;
388 }
389
390 void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
391 {
392 mutex_lock(&monc->mutex);
393 __ceph_monc_got_map(monc, sub, epoch);
394 mutex_unlock(&monc->mutex);
395 }
396 EXPORT_SYMBOL(ceph_monc_got_map);
397
398 void ceph_monc_renew_subs(struct ceph_mon_client *monc)
399 {
400 mutex_lock(&monc->mutex);
401 __send_subscribe(monc);
402 mutex_unlock(&monc->mutex);
403 }
404 EXPORT_SYMBOL(ceph_monc_renew_subs);
405
406 /*
407 * Wait for an osdmap with a given epoch.
408 *
409 * @epoch: epoch to wait for
410 * @timeout: in jiffies, 0 means "wait forever"
411 */
412 int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
413 unsigned long timeout)
414 {
415 unsigned long started = jiffies;
416 long ret;
417
418 mutex_lock(&monc->mutex);
419 while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) {
420 mutex_unlock(&monc->mutex);
421
422 if (timeout && time_after_eq(jiffies, started + timeout))
423 return -ETIMEDOUT;
424
425 ret = wait_event_interruptible_timeout(monc->client->auth_wq,
426 monc->subs[CEPH_SUB_OSDMAP].have >= epoch,
427 ceph_timeout_jiffies(timeout));
428 if (ret < 0)
429 return ret;
430
431 mutex_lock(&monc->mutex);
432 }
433
434 mutex_unlock(&monc->mutex);
435 return 0;
436 }
437 EXPORT_SYMBOL(ceph_monc_wait_osdmap);
438
439 /*
440 * Open a session with a random monitor. Request monmap and osdmap,
441 * which are waited upon in __ceph_open_session().
442 */
443 int ceph_monc_open_session(struct ceph_mon_client *monc)
444 {
445 mutex_lock(&monc->mutex);
446 __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true);
447 __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false);
448 __open_session(monc);
449 __schedule_delayed(monc);
450 mutex_unlock(&monc->mutex);
451 return 0;
452 }
453 EXPORT_SYMBOL(ceph_monc_open_session);
454
455 static void ceph_monc_handle_map(struct ceph_mon_client *monc,
456 struct ceph_msg *msg)
457 {
458 struct ceph_client *client = monc->client;
459 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
460 void *p, *end;
461
462 mutex_lock(&monc->mutex);
463
464 dout("handle_monmap\n");
465 p = msg->front.iov_base;
466 end = p + msg->front.iov_len;
467
468 monmap = ceph_monmap_decode(p, end);
469 if (IS_ERR(monmap)) {
470 pr_err("problem decoding monmap, %d\n",
471 (int)PTR_ERR(monmap));
472 goto out;
473 }
474
475 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
476 kfree(monmap);
477 goto out;
478 }
479
480 client->monc.monmap = monmap;
481 kfree(old);
482
483 __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch);
484 client->have_fsid = true;
485
486 out:
487 mutex_unlock(&monc->mutex);
488 wake_up_all(&client->auth_wq);
489 }
490
491 /*
492 * generic requests (currently statfs, mon_get_version)
493 */
494 DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node)
495
496 static void release_generic_request(struct kref *kref)
497 {
498 struct ceph_mon_generic_request *req =
499 container_of(kref, struct ceph_mon_generic_request, kref);
500
501 dout("%s greq %p request %p reply %p\n", __func__, req, req->request,
502 req->reply);
503 WARN_ON(!RB_EMPTY_NODE(&req->node));
504
505 if (req->reply)
506 ceph_msg_put(req->reply);
507 if (req->request)
508 ceph_msg_put(req->request);
509
510 kfree(req);
511 }
512
513 static void put_generic_request(struct ceph_mon_generic_request *req)
514 {
515 if (req)
516 kref_put(&req->kref, release_generic_request);
517 }
518
519 static void get_generic_request(struct ceph_mon_generic_request *req)
520 {
521 kref_get(&req->kref);
522 }
523
524 static struct ceph_mon_generic_request *
525 alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp)
526 {
527 struct ceph_mon_generic_request *req;
528
529 req = kzalloc(sizeof(*req), gfp);
530 if (!req)
531 return NULL;
532
533 req->monc = monc;
534 kref_init(&req->kref);
535 RB_CLEAR_NODE(&req->node);
536 init_completion(&req->completion);
537
538 dout("%s greq %p\n", __func__, req);
539 return req;
540 }
541
542 static void register_generic_request(struct ceph_mon_generic_request *req)
543 {
544 struct ceph_mon_client *monc = req->monc;
545
546 WARN_ON(req->tid);
547
548 get_generic_request(req);
549 req->tid = ++monc->last_tid;
550 insert_generic_request(&monc->generic_request_tree, req);
551 }
552
553 static void send_generic_request(struct ceph_mon_client *monc,
554 struct ceph_mon_generic_request *req)
555 {
556 WARN_ON(!req->tid);
557
558 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
559 req->request->hdr.tid = cpu_to_le64(req->tid);
560 ceph_con_send(&monc->con, ceph_msg_get(req->request));
561 }
562
563 static void __finish_generic_request(struct ceph_mon_generic_request *req)
564 {
565 struct ceph_mon_client *monc = req->monc;
566
567 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
568 erase_generic_request(&monc->generic_request_tree, req);
569
570 ceph_msg_revoke(req->request);
571 ceph_msg_revoke_incoming(req->reply);
572 }
573
574 static void finish_generic_request(struct ceph_mon_generic_request *req)
575 {
576 __finish_generic_request(req);
577 put_generic_request(req);
578 }
579
580 static void complete_generic_request(struct ceph_mon_generic_request *req)
581 {
582 if (req->complete_cb)
583 req->complete_cb(req);
584 else
585 complete_all(&req->completion);
586 put_generic_request(req);
587 }
588
589 static void cancel_generic_request(struct ceph_mon_generic_request *req)
590 {
591 struct ceph_mon_client *monc = req->monc;
592 struct ceph_mon_generic_request *lookup_req;
593
594 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
595
596 mutex_lock(&monc->mutex);
597 lookup_req = lookup_generic_request(&monc->generic_request_tree,
598 req->tid);
599 if (lookup_req) {
600 WARN_ON(lookup_req != req);
601 finish_generic_request(req);
602 }
603
604 mutex_unlock(&monc->mutex);
605 }
606
607 static int wait_generic_request(struct ceph_mon_generic_request *req)
608 {
609 int ret;
610
611 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
612 ret = wait_for_completion_interruptible(&req->completion);
613 if (ret)
614 cancel_generic_request(req);
615 else
616 ret = req->result; /* completed */
617
618 return ret;
619 }
620
621 static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
622 struct ceph_msg_header *hdr,
623 int *skip)
624 {
625 struct ceph_mon_client *monc = con->private;
626 struct ceph_mon_generic_request *req;
627 u64 tid = le64_to_cpu(hdr->tid);
628 struct ceph_msg *m;
629
630 mutex_lock(&monc->mutex);
631 req = lookup_generic_request(&monc->generic_request_tree, tid);
632 if (!req) {
633 dout("get_generic_reply %lld dne\n", tid);
634 *skip = 1;
635 m = NULL;
636 } else {
637 dout("get_generic_reply %lld got %p\n", tid, req->reply);
638 *skip = 0;
639 m = ceph_msg_get(req->reply);
640 /*
641 * we don't need to track the connection reading into
642 * this reply because we only have one open connection
643 * at a time, ever.
644 */
645 }
646 mutex_unlock(&monc->mutex);
647 return m;
648 }
649
650 /*
651 * statfs
652 */
653 static void handle_statfs_reply(struct ceph_mon_client *monc,
654 struct ceph_msg *msg)
655 {
656 struct ceph_mon_generic_request *req;
657 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
658 u64 tid = le64_to_cpu(msg->hdr.tid);
659
660 dout("%s msg %p tid %llu\n", __func__, msg, tid);
661
662 if (msg->front.iov_len != sizeof(*reply))
663 goto bad;
664
665 mutex_lock(&monc->mutex);
666 req = lookup_generic_request(&monc->generic_request_tree, tid);
667 if (!req) {
668 mutex_unlock(&monc->mutex);
669 return;
670 }
671
672 req->result = 0;
673 *req->u.st = reply->st; /* struct */
674 __finish_generic_request(req);
675 mutex_unlock(&monc->mutex);
676
677 complete_generic_request(req);
678 return;
679
680 bad:
681 pr_err("corrupt statfs reply, tid %llu\n", tid);
682 ceph_msg_dump(msg);
683 }
684
685 /*
686 * Do a synchronous statfs().
687 */
688 int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool,
689 struct ceph_statfs *buf)
690 {
691 struct ceph_mon_generic_request *req;
692 struct ceph_mon_statfs *h;
693 int ret = -ENOMEM;
694
695 req = alloc_generic_request(monc, GFP_NOFS);
696 if (!req)
697 goto out;
698
699 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
700 true);
701 if (!req->request)
702 goto out;
703
704 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true);
705 if (!req->reply)
706 goto out;
707
708 req->u.st = buf;
709 req->request->hdr.version = cpu_to_le16(2);
710
711 mutex_lock(&monc->mutex);
712 register_generic_request(req);
713 /* fill out request */
714 h = req->request->front.iov_base;
715 h->monhdr.have_version = 0;
716 h->monhdr.session_mon = cpu_to_le16(-1);
717 h->monhdr.session_mon_tid = 0;
718 h->fsid = monc->monmap->fsid;
719 h->contains_data_pool = (data_pool != CEPH_NOPOOL);
720 h->data_pool = cpu_to_le64(data_pool);
721 send_generic_request(monc, req);
722 mutex_unlock(&monc->mutex);
723
724 ret = wait_generic_request(req);
725 out:
726 put_generic_request(req);
727 return ret;
728 }
729 EXPORT_SYMBOL(ceph_monc_do_statfs);
730
731 static void handle_get_version_reply(struct ceph_mon_client *monc,
732 struct ceph_msg *msg)
733 {
734 struct ceph_mon_generic_request *req;
735 u64 tid = le64_to_cpu(msg->hdr.tid);
736 void *p = msg->front.iov_base;
737 void *end = p + msg->front_alloc_len;
738 u64 handle;
739
740 dout("%s msg %p tid %llu\n", __func__, msg, tid);
741
742 ceph_decode_need(&p, end, 2*sizeof(u64), bad);
743 handle = ceph_decode_64(&p);
744 if (tid != 0 && tid != handle)
745 goto bad;
746
747 mutex_lock(&monc->mutex);
748 req = lookup_generic_request(&monc->generic_request_tree, handle);
749 if (!req) {
750 mutex_unlock(&monc->mutex);
751 return;
752 }
753
754 req->result = 0;
755 req->u.newest = ceph_decode_64(&p);
756 __finish_generic_request(req);
757 mutex_unlock(&monc->mutex);
758
759 complete_generic_request(req);
760 return;
761
762 bad:
763 pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
764 ceph_msg_dump(msg);
765 }
766
767 static struct ceph_mon_generic_request *
768 __ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
769 ceph_monc_callback_t cb, u64 private_data)
770 {
771 struct ceph_mon_generic_request *req;
772
773 req = alloc_generic_request(monc, GFP_NOIO);
774 if (!req)
775 goto err_put_req;
776
777 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
778 sizeof(u64) + sizeof(u32) + strlen(what),
779 GFP_NOIO, true);
780 if (!req->request)
781 goto err_put_req;
782
783 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO,
784 true);
785 if (!req->reply)
786 goto err_put_req;
787
788 req->complete_cb = cb;
789 req->private_data = private_data;
790
791 mutex_lock(&monc->mutex);
792 register_generic_request(req);
793 {
794 void *p = req->request->front.iov_base;
795 void *const end = p + req->request->front_alloc_len;
796
797 ceph_encode_64(&p, req->tid); /* handle */
798 ceph_encode_string(&p, end, what, strlen(what));
799 WARN_ON(p != end);
800 }
801 send_generic_request(monc, req);
802 mutex_unlock(&monc->mutex);
803
804 return req;
805
806 err_put_req:
807 put_generic_request(req);
808 return ERR_PTR(-ENOMEM);
809 }
810
811 /*
812 * Send MMonGetVersion and wait for the reply.
813 *
814 * @what: one of "mdsmap", "osdmap" or "monmap"
815 */
816 int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
817 u64 *newest)
818 {
819 struct ceph_mon_generic_request *req;
820 int ret;
821
822 req = __ceph_monc_get_version(monc, what, NULL, 0);
823 if (IS_ERR(req))
824 return PTR_ERR(req);
825
826 ret = wait_generic_request(req);
827 if (!ret)
828 *newest = req->u.newest;
829
830 put_generic_request(req);
831 return ret;
832 }
833 EXPORT_SYMBOL(ceph_monc_get_version);
834
835 /*
836 * Send MMonGetVersion,
837 *
838 * @what: one of "mdsmap", "osdmap" or "monmap"
839 */
840 int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
841 ceph_monc_callback_t cb, u64 private_data)
842 {
843 struct ceph_mon_generic_request *req;
844
845 req = __ceph_monc_get_version(monc, what, cb, private_data);
846 if (IS_ERR(req))
847 return PTR_ERR(req);
848
849 put_generic_request(req);
850 return 0;
851 }
852 EXPORT_SYMBOL(ceph_monc_get_version_async);
853
854 static void handle_command_ack(struct ceph_mon_client *monc,
855 struct ceph_msg *msg)
856 {
857 struct ceph_mon_generic_request *req;
858 void *p = msg->front.iov_base;
859 void *const end = p + msg->front_alloc_len;
860 u64 tid = le64_to_cpu(msg->hdr.tid);
861
862 dout("%s msg %p tid %llu\n", __func__, msg, tid);
863
864 ceph_decode_need(&p, end, sizeof(struct ceph_mon_request_header) +
865 sizeof(u32), bad);
866 p += sizeof(struct ceph_mon_request_header);
867
868 mutex_lock(&monc->mutex);
869 req = lookup_generic_request(&monc->generic_request_tree, tid);
870 if (!req) {
871 mutex_unlock(&monc->mutex);
872 return;
873 }
874
875 req->result = ceph_decode_32(&p);
876 __finish_generic_request(req);
877 mutex_unlock(&monc->mutex);
878
879 complete_generic_request(req);
880 return;
881
882 bad:
883 pr_err("corrupt mon_command ack, tid %llu\n", tid);
884 ceph_msg_dump(msg);
885 }
886
887 int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
888 struct ceph_entity_addr *client_addr)
889 {
890 struct ceph_mon_generic_request *req;
891 struct ceph_mon_command *h;
892 int ret = -ENOMEM;
893 int len;
894
895 req = alloc_generic_request(monc, GFP_NOIO);
896 if (!req)
897 goto out;
898
899 req->request = ceph_msg_new(CEPH_MSG_MON_COMMAND, 256, GFP_NOIO, true);
900 if (!req->request)
901 goto out;
902
903 req->reply = ceph_msg_new(CEPH_MSG_MON_COMMAND_ACK, 512, GFP_NOIO,
904 true);
905 if (!req->reply)
906 goto out;
907
908 mutex_lock(&monc->mutex);
909 register_generic_request(req);
910 h = req->request->front.iov_base;
911 h->monhdr.have_version = 0;
912 h->monhdr.session_mon = cpu_to_le16(-1);
913 h->monhdr.session_mon_tid = 0;
914 h->fsid = monc->monmap->fsid;
915 h->num_strs = cpu_to_le32(1);
916 len = sprintf(h->str, "{ \"prefix\": \"osd blacklist\", \
917 \"blacklistop\": \"add\", \
918 \"addr\": \"%pISpc/%u\" }",
919 &client_addr->in_addr, le32_to_cpu(client_addr->nonce));
920 h->str_len = cpu_to_le32(len);
921 send_generic_request(monc, req);
922 mutex_unlock(&monc->mutex);
923
924 ret = wait_generic_request(req);
925 out:
926 put_generic_request(req);
927 return ret;
928 }
929 EXPORT_SYMBOL(ceph_monc_blacklist_add);
930
931 /*
932 * Resend pending generic requests.
933 */
934 static void __resend_generic_request(struct ceph_mon_client *monc)
935 {
936 struct ceph_mon_generic_request *req;
937 struct rb_node *p;
938
939 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
940 req = rb_entry(p, struct ceph_mon_generic_request, node);
941 ceph_msg_revoke(req->request);
942 ceph_msg_revoke_incoming(req->reply);
943 ceph_con_send(&monc->con, ceph_msg_get(req->request));
944 }
945 }
946
947 /*
948 * Delayed work. If we haven't mounted yet, retry. Otherwise,
949 * renew/retry subscription as needed (in case it is timing out, or we
950 * got an ENOMEM). And keep the monitor connection alive.
951 */
952 static void delayed_work(struct work_struct *work)
953 {
954 struct ceph_mon_client *monc =
955 container_of(work, struct ceph_mon_client, delayed_work.work);
956
957 dout("monc delayed_work\n");
958 mutex_lock(&monc->mutex);
959 if (monc->hunting) {
960 dout("%s continuing hunt\n", __func__);
961 reopen_session(monc);
962 } else {
963 int is_auth = ceph_auth_is_authenticated(monc->auth);
964 if (ceph_con_keepalive_expired(&monc->con,
965 CEPH_MONC_PING_TIMEOUT)) {
966 dout("monc keepalive timeout\n");
967 is_auth = 0;
968 reopen_session(monc);
969 }
970
971 if (!monc->hunting) {
972 ceph_con_keepalive(&monc->con);
973 __validate_auth(monc);
974 un_backoff(monc);
975 }
976
977 if (is_auth &&
978 !(monc->con.peer_features & CEPH_FEATURE_MON_STATEFUL_SUB)) {
979 unsigned long now = jiffies;
980
981 dout("%s renew subs? now %lu renew after %lu\n",
982 __func__, now, monc->sub_renew_after);
983 if (time_after_eq(now, monc->sub_renew_after))
984 __send_subscribe(monc);
985 }
986 }
987 __schedule_delayed(monc);
988 mutex_unlock(&monc->mutex);
989 }
990
991 /*
992 * On startup, we build a temporary monmap populated with the IPs
993 * provided by mount(2).
994 */
995 static int build_initial_monmap(struct ceph_mon_client *monc)
996 {
997 struct ceph_options *opt = monc->client->options;
998 struct ceph_entity_addr *mon_addr = opt->mon_addr;
999 int num_mon = opt->num_mon;
1000 int i;
1001
1002 /* build initial monmap */
1003 monc->monmap = kzalloc(sizeof(*monc->monmap) +
1004 num_mon*sizeof(monc->monmap->mon_inst[0]),
1005 GFP_KERNEL);
1006 if (!monc->monmap)
1007 return -ENOMEM;
1008 for (i = 0; i < num_mon; i++) {
1009 monc->monmap->mon_inst[i].addr = mon_addr[i];
1010 monc->monmap->mon_inst[i].addr.nonce = 0;
1011 monc->monmap->mon_inst[i].name.type =
1012 CEPH_ENTITY_TYPE_MON;
1013 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
1014 }
1015 monc->monmap->num_mon = num_mon;
1016 return 0;
1017 }
1018
1019 int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
1020 {
1021 int err = 0;
1022
1023 dout("init\n");
1024 memset(monc, 0, sizeof(*monc));
1025 monc->client = cl;
1026 monc->monmap = NULL;
1027 mutex_init(&monc->mutex);
1028
1029 err = build_initial_monmap(monc);
1030 if (err)
1031 goto out;
1032
1033 /* connection */
1034 /* authentication */
1035 monc->auth = ceph_auth_init(cl->options->name,
1036 cl->options->key);
1037 if (IS_ERR(monc->auth)) {
1038 err = PTR_ERR(monc->auth);
1039 goto out_monmap;
1040 }
1041 monc->auth->want_keys =
1042 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
1043 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
1044
1045 /* msgs */
1046 err = -ENOMEM;
1047 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
1048 sizeof(struct ceph_mon_subscribe_ack),
1049 GFP_KERNEL, true);
1050 if (!monc->m_subscribe_ack)
1051 goto out_auth;
1052
1053 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128,
1054 GFP_KERNEL, true);
1055 if (!monc->m_subscribe)
1056 goto out_subscribe_ack;
1057
1058 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096,
1059 GFP_KERNEL, true);
1060 if (!monc->m_auth_reply)
1061 goto out_subscribe;
1062
1063 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_KERNEL, true);
1064 monc->pending_auth = 0;
1065 if (!monc->m_auth)
1066 goto out_auth_reply;
1067
1068 ceph_con_init(&monc->con, monc, &mon_con_ops,
1069 &monc->client->msgr);
1070
1071 monc->cur_mon = -1;
1072 monc->had_a_connection = false;
1073 monc->hunt_mult = 1;
1074
1075 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
1076 monc->generic_request_tree = RB_ROOT;
1077 monc->last_tid = 0;
1078
1079 monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE;
1080
1081 return 0;
1082
1083 out_auth_reply:
1084 ceph_msg_put(monc->m_auth_reply);
1085 out_subscribe:
1086 ceph_msg_put(monc->m_subscribe);
1087 out_subscribe_ack:
1088 ceph_msg_put(monc->m_subscribe_ack);
1089 out_auth:
1090 ceph_auth_destroy(monc->auth);
1091 out_monmap:
1092 kfree(monc->monmap);
1093 out:
1094 return err;
1095 }
1096 EXPORT_SYMBOL(ceph_monc_init);
1097
1098 void ceph_monc_stop(struct ceph_mon_client *monc)
1099 {
1100 dout("stop\n");
1101 cancel_delayed_work_sync(&monc->delayed_work);
1102
1103 mutex_lock(&monc->mutex);
1104 __close_session(monc);
1105 monc->cur_mon = -1;
1106 mutex_unlock(&monc->mutex);
1107
1108 /*
1109 * flush msgr queue before we destroy ourselves to ensure that:
1110 * - any work that references our embedded con is finished.
1111 * - any osd_client or other work that may reference an authorizer
1112 * finishes before we shut down the auth subsystem.
1113 */
1114 ceph_msgr_flush();
1115
1116 ceph_auth_destroy(monc->auth);
1117
1118 WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree));
1119
1120 ceph_msg_put(monc->m_auth);
1121 ceph_msg_put(monc->m_auth_reply);
1122 ceph_msg_put(monc->m_subscribe);
1123 ceph_msg_put(monc->m_subscribe_ack);
1124
1125 kfree(monc->monmap);
1126 }
1127 EXPORT_SYMBOL(ceph_monc_stop);
1128
1129 static void finish_hunting(struct ceph_mon_client *monc)
1130 {
1131 if (monc->hunting) {
1132 dout("%s found mon%d\n", __func__, monc->cur_mon);
1133 monc->hunting = false;
1134 monc->had_a_connection = true;
1135 un_backoff(monc);
1136 __schedule_delayed(monc);
1137 }
1138 }
1139
1140 static void handle_auth_reply(struct ceph_mon_client *monc,
1141 struct ceph_msg *msg)
1142 {
1143 int ret;
1144 int was_auth = 0;
1145
1146 mutex_lock(&monc->mutex);
1147 was_auth = ceph_auth_is_authenticated(monc->auth);
1148 monc->pending_auth = 0;
1149 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
1150 msg->front.iov_len,
1151 monc->m_auth->front.iov_base,
1152 monc->m_auth->front_alloc_len);
1153 if (ret > 0) {
1154 __send_prepared_auth_request(monc, ret);
1155 goto out;
1156 }
1157
1158 finish_hunting(monc);
1159
1160 if (ret < 0) {
1161 monc->client->auth_err = ret;
1162 } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) {
1163 dout("authenticated, starting session\n");
1164
1165 monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
1166 monc->client->msgr.inst.name.num =
1167 cpu_to_le64(monc->auth->global_id);
1168
1169 __send_subscribe(monc);
1170 __resend_generic_request(monc);
1171
1172 pr_info("mon%d %s session established\n", monc->cur_mon,
1173 ceph_pr_addr(&monc->con.peer_addr.in_addr));
1174 }
1175
1176 out:
1177 mutex_unlock(&monc->mutex);
1178 if (monc->client->auth_err < 0)
1179 wake_up_all(&monc->client->auth_wq);
1180 }
1181
1182 static int __validate_auth(struct ceph_mon_client *monc)
1183 {
1184 int ret;
1185
1186 if (monc->pending_auth)
1187 return 0;
1188
1189 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
1190 monc->m_auth->front_alloc_len);
1191 if (ret <= 0)
1192 return ret; /* either an error, or no need to authenticate */
1193 __send_prepared_auth_request(monc, ret);
1194 return 0;
1195 }
1196
1197 int ceph_monc_validate_auth(struct ceph_mon_client *monc)
1198 {
1199 int ret;
1200
1201 mutex_lock(&monc->mutex);
1202 ret = __validate_auth(monc);
1203 mutex_unlock(&monc->mutex);
1204 return ret;
1205 }
1206 EXPORT_SYMBOL(ceph_monc_validate_auth);
1207
1208 /*
1209 * handle incoming message
1210 */
1211 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1212 {
1213 struct ceph_mon_client *monc = con->private;
1214 int type = le16_to_cpu(msg->hdr.type);
1215
1216 if (!monc)
1217 return;
1218
1219 switch (type) {
1220 case CEPH_MSG_AUTH_REPLY:
1221 handle_auth_reply(monc, msg);
1222 break;
1223
1224 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1225 handle_subscribe_ack(monc, msg);
1226 break;
1227
1228 case CEPH_MSG_STATFS_REPLY:
1229 handle_statfs_reply(monc, msg);
1230 break;
1231
1232 case CEPH_MSG_MON_GET_VERSION_REPLY:
1233 handle_get_version_reply(monc, msg);
1234 break;
1235
1236 case CEPH_MSG_MON_COMMAND_ACK:
1237 handle_command_ack(monc, msg);
1238 break;
1239
1240 case CEPH_MSG_MON_MAP:
1241 ceph_monc_handle_map(monc, msg);
1242 break;
1243
1244 case CEPH_MSG_OSD_MAP:
1245 ceph_osdc_handle_map(&monc->client->osdc, msg);
1246 break;
1247
1248 default:
1249 /* can the chained handler handle it? */
1250 if (monc->client->extra_mon_dispatch &&
1251 monc->client->extra_mon_dispatch(monc->client, msg) == 0)
1252 break;
1253
1254 pr_err("received unknown message type %d %s\n", type,
1255 ceph_msg_type_name(type));
1256 }
1257 ceph_msg_put(msg);
1258 }
1259
1260 /*
1261 * Allocate memory for incoming message
1262 */
1263 static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
1264 struct ceph_msg_header *hdr,
1265 int *skip)
1266 {
1267 struct ceph_mon_client *monc = con->private;
1268 int type = le16_to_cpu(hdr->type);
1269 int front_len = le32_to_cpu(hdr->front_len);
1270 struct ceph_msg *m = NULL;
1271
1272 *skip = 0;
1273
1274 switch (type) {
1275 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1276 m = ceph_msg_get(monc->m_subscribe_ack);
1277 break;
1278 case CEPH_MSG_STATFS_REPLY:
1279 case CEPH_MSG_MON_COMMAND_ACK:
1280 return get_generic_reply(con, hdr, skip);
1281 case CEPH_MSG_AUTH_REPLY:
1282 m = ceph_msg_get(monc->m_auth_reply);
1283 break;
1284 case CEPH_MSG_MON_GET_VERSION_REPLY:
1285 if (le64_to_cpu(hdr->tid) != 0)
1286 return get_generic_reply(con, hdr, skip);
1287
1288 /*
1289 * Older OSDs don't set reply tid even if the orignal
1290 * request had a non-zero tid. Work around this weirdness
1291 * by allocating a new message.
1292 */
1293 /* fall through */
1294 case CEPH_MSG_MON_MAP:
1295 case CEPH_MSG_MDS_MAP:
1296 case CEPH_MSG_OSD_MAP:
1297 case CEPH_MSG_FS_MAP_USER:
1298 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
1299 if (!m)
1300 return NULL; /* ENOMEM--return skip == 0 */
1301 break;
1302 }
1303
1304 if (!m) {
1305 pr_info("alloc_msg unknown type %d\n", type);
1306 *skip = 1;
1307 } else if (front_len > m->front_alloc_len) {
1308 pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
1309 front_len, m->front_alloc_len,
1310 (unsigned int)con->peer_name.type,
1311 le64_to_cpu(con->peer_name.num));
1312 ceph_msg_put(m);
1313 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
1314 }
1315
1316 return m;
1317 }
1318
1319 /*
1320 * If the monitor connection resets, pick a new monitor and resubmit
1321 * any pending requests.
1322 */
1323 static void mon_fault(struct ceph_connection *con)
1324 {
1325 struct ceph_mon_client *monc = con->private;
1326
1327 mutex_lock(&monc->mutex);
1328 dout("%s mon%d\n", __func__, monc->cur_mon);
1329 if (monc->cur_mon >= 0) {
1330 if (!monc->hunting) {
1331 dout("%s hunting for new mon\n", __func__);
1332 reopen_session(monc);
1333 __schedule_delayed(monc);
1334 } else {
1335 dout("%s already hunting\n", __func__);
1336 }
1337 }
1338 mutex_unlock(&monc->mutex);
1339 }
1340
1341 /*
1342 * We can ignore refcounting on the connection struct, as all references
1343 * will come from the messenger workqueue, which is drained prior to
1344 * mon_client destruction.
1345 */
1346 static struct ceph_connection *con_get(struct ceph_connection *con)
1347 {
1348 return con;
1349 }
1350
1351 static void con_put(struct ceph_connection *con)
1352 {
1353 }
1354
1355 static const struct ceph_connection_operations mon_con_ops = {
1356 .get = con_get,
1357 .put = con_put,
1358 .dispatch = dispatch,
1359 .fault = mon_fault,
1360 .alloc_msg = mon_alloc_msg,
1361 };