]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/ceph/mon_client.c
libceph: don't complain on msgpool alloc failures
[mirror_ubuntu-artful-kernel.git] / net / ceph / mon_client.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
ba75bb98 2
3d14c5d2 3#include <linux/module.h>
ba75bb98 4#include <linux/types.h>
5a0e3ad6 5#include <linux/slab.h>
ba75bb98
SW
6#include <linux/random.h>
7#include <linux/sched.h>
8
3d14c5d2
YS
9#include <linux/ceph/mon_client.h>
10#include <linux/ceph/libceph.h>
11#include <linux/ceph/decode.h>
12
13#include <linux/ceph/auth.h>
ba75bb98
SW
14
15/*
16 * Interact with Ceph monitor cluster. Handle requests for new map
17 * versions, and periodically resend as needed. Also implement
18 * statfs() and umount().
19 *
20 * A small cluster of Ceph "monitors" are responsible for managing critical
21 * cluster configuration and state information. An odd number (e.g., 3, 5)
22 * of cmon daemons use a modified version of the Paxos part-time parliament
23 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
24 * list of clients who have mounted the file system.
25 *
26 * We maintain an open, active session with a monitor at all times in order to
27 * receive timely MDSMap updates. We periodically send a keepalive byte on the
28 * TCP socket to ensure we detect a failure. If the connection does break, we
29 * randomly hunt for a new monitor. Once the connection is reestablished, we
30 * resend any outstanding requests.
31 */
32
9e32789f 33static const struct ceph_connection_operations mon_con_ops;
ba75bb98 34
9bd2e6f8
SW
35static int __validate_auth(struct ceph_mon_client *monc);
36
ba75bb98
SW
37/*
38 * Decode a monmap blob (e.g., during mount).
39 */
40struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
41{
42 struct ceph_monmap *m = NULL;
43 int i, err = -EINVAL;
44 struct ceph_fsid fsid;
45 u32 epoch, num_mon;
46 u16 version;
4e7a5dcd
SW
47 u32 len;
48
49 ceph_decode_32_safe(&p, end, len, bad);
50 ceph_decode_need(&p, end, len, bad);
ba75bb98
SW
51
52 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
53
54 ceph_decode_16_safe(&p, end, version, bad);
55
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid));
c89136ea 58 epoch = ceph_decode_32(&p);
ba75bb98 59
c89136ea 60 num_mon = ceph_decode_32(&p);
ba75bb98
SW
61 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
62
63 if (num_mon >= CEPH_MAX_MON)
64 goto bad;
65 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
66 if (m == NULL)
67 return ERR_PTR(-ENOMEM);
68 m->fsid = fsid;
69 m->epoch = epoch;
70 m->num_mon = num_mon;
71 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
63f2d211
SW
72 for (i = 0; i < num_mon; i++)
73 ceph_decode_addr(&m->mon_inst[i].addr);
ba75bb98 74
ba75bb98
SW
75 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
76 m->num_mon);
77 for (i = 0; i < m->num_mon; i++)
78 dout("monmap_decode mon%d is %s\n", i,
3d14c5d2 79 ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
ba75bb98
SW
80 return m;
81
82bad:
83 dout("monmap_decode failed with %d\n", err);
84 kfree(m);
85 return ERR_PTR(err);
86}
87
88/*
89 * return true if *addr is included in the monmap.
90 */
91int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
92{
93 int i;
94
95 for (i = 0; i < m->num_mon; i++)
103e2d3a 96 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
ba75bb98
SW
97 return 1;
98 return 0;
99}
100
5ce6e9db
SW
101/*
102 * Send an auth request.
103 */
104static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
105{
106 monc->pending_auth = 1;
107 monc->m_auth->front.iov_len = len;
108 monc->m_auth->hdr.front_len = cpu_to_le32(len);
97069001 109 ceph_con_revoke(monc->con, monc->m_auth);
5ce6e9db
SW
110 ceph_msg_get(monc->m_auth); /* keep our ref */
111 ceph_con_send(monc->con, monc->m_auth);
112}
113
ba75bb98
SW
114/*
115 * Close monitor session, if any.
116 */
117static void __close_session(struct ceph_mon_client *monc)
118{
f6a2f5be
SW
119 dout("__close_session closing mon%d\n", monc->cur_mon);
120 ceph_con_revoke(monc->con, monc->m_auth);
121 ceph_con_close(monc->con);
122 monc->cur_mon = -1;
123 monc->pending_auth = 0;
124 ceph_auth_reset(monc->auth);
ba75bb98
SW
125}
126
127/*
128 * Open a session with a (new) monitor.
129 */
130static int __open_session(struct ceph_mon_client *monc)
131{
132 char r;
4e7a5dcd 133 int ret;
ba75bb98
SW
134
135 if (monc->cur_mon < 0) {
136 get_random_bytes(&r, 1);
137 monc->cur_mon = r % monc->monmap->num_mon;
138 dout("open_session num=%d r=%d -> mon%d\n",
139 monc->monmap->num_mon, r, monc->cur_mon);
140 monc->sub_sent = 0;
141 monc->sub_renew_after = jiffies; /* i.e., expired */
142 monc->want_next_osdmap = !!monc->want_next_osdmap;
143
144 dout("open_session mon%d opening\n", monc->cur_mon);
145 monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
146 monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
147 ceph_con_open(monc->con,
148 &monc->monmap->mon_inst[monc->cur_mon].addr);
4e7a5dcd
SW
149
150 /* initiatiate authentication handshake */
151 ret = ceph_auth_build_hello(monc->auth,
152 monc->m_auth->front.iov_base,
153 monc->m_auth->front_max);
5ce6e9db 154 __send_prepared_auth_request(monc, ret);
ba75bb98
SW
155 } else {
156 dout("open_session mon%d already open\n", monc->cur_mon);
157 }
158 return 0;
159}
160
161static bool __sub_expired(struct ceph_mon_client *monc)
162{
163 return time_after_eq(jiffies, monc->sub_renew_after);
164}
165
166/*
167 * Reschedule delayed work timer.
168 */
169static void __schedule_delayed(struct ceph_mon_client *monc)
170{
171 unsigned delay;
172
4e7a5dcd 173 if (monc->cur_mon < 0 || __sub_expired(monc))
ba75bb98
SW
174 delay = 10 * HZ;
175 else
176 delay = 20 * HZ;
177 dout("__schedule_delayed after %u\n", delay);
178 schedule_delayed_work(&monc->delayed_work, delay);
179}
180
181/*
182 * Send subscribe request for mdsmap and/or osdmap.
183 */
184static void __send_subscribe(struct ceph_mon_client *monc)
185{
186 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
187 (unsigned)monc->sub_sent, __sub_expired(monc),
188 monc->want_next_osdmap);
189 if ((__sub_expired(monc) && !monc->sub_sent) ||
190 monc->want_next_osdmap == 1) {
240ed68e 191 struct ceph_msg *msg = monc->m_subscribe;
ba75bb98
SW
192 struct ceph_mon_subscribe_item *i;
193 void *p, *end;
3d14c5d2 194 int num;
ba75bb98 195
ba75bb98 196 p = msg->front.iov_base;
240ed68e 197 end = p + msg->front_max;
ba75bb98 198
3d14c5d2
YS
199 num = 1 + !!monc->want_next_osdmap + !!monc->want_mdsmap;
200 ceph_encode_32(&p, num);
201
ba75bb98
SW
202 if (monc->want_next_osdmap) {
203 dout("__send_subscribe to 'osdmap' %u\n",
204 (unsigned)monc->have_osdmap);
ba75bb98
SW
205 ceph_encode_string(&p, end, "osdmap", 6);
206 i = p;
207 i->have = cpu_to_le64(monc->have_osdmap);
208 i->onetime = 1;
209 p += sizeof(*i);
210 monc->want_next_osdmap = 2; /* requested */
ba75bb98 211 }
3d14c5d2
YS
212 if (monc->want_mdsmap) {
213 dout("__send_subscribe to 'mdsmap' %u+\n",
214 (unsigned)monc->have_mdsmap);
215 ceph_encode_string(&p, end, "mdsmap", 6);
216 i = p;
217 i->have = cpu_to_le64(monc->have_mdsmap);
218 i->onetime = 0;
219 p += sizeof(*i);
220 }
4e7a5dcd
SW
221 ceph_encode_string(&p, end, "monmap", 6);
222 i = p;
223 i->have = 0;
224 i->onetime = 0;
225 p += sizeof(*i);
ba75bb98
SW
226
227 msg->front.iov_len = p - msg->front.iov_base;
228 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
240ed68e
SW
229 ceph_con_revoke(monc->con, msg);
230 ceph_con_send(monc->con, ceph_msg_get(msg));
ba75bb98
SW
231
232 monc->sub_sent = jiffies | 1; /* never 0 */
233 }
234}
235
236static void handle_subscribe_ack(struct ceph_mon_client *monc,
237 struct ceph_msg *msg)
238{
239 unsigned seconds;
07bd10fb
SW
240 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
241
242 if (msg->front.iov_len < sizeof(*h))
243 goto bad;
244 seconds = le32_to_cpu(h->duration);
ba75bb98 245
ba75bb98
SW
246 mutex_lock(&monc->mutex);
247 if (monc->hunting) {
248 pr_info("mon%d %s session established\n",
3d14c5d2
YS
249 monc->cur_mon,
250 ceph_pr_addr(&monc->con->peer_addr.in_addr));
ba75bb98
SW
251 monc->hunting = false;
252 }
253 dout("handle_subscribe_ack after %d seconds\n", seconds);
0656d11b 254 monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1;
ba75bb98
SW
255 monc->sub_sent = 0;
256 mutex_unlock(&monc->mutex);
257 return;
258bad:
259 pr_err("got corrupt subscribe-ack msg\n");
9ec7cab1 260 ceph_msg_dump(msg);
ba75bb98
SW
261}
262
263/*
264 * Keep track of which maps we have
265 */
266int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
267{
268 mutex_lock(&monc->mutex);
269 monc->have_mdsmap = got;
270 mutex_unlock(&monc->mutex);
271 return 0;
272}
3d14c5d2 273EXPORT_SYMBOL(ceph_monc_got_mdsmap);
ba75bb98
SW
274
275int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
276{
277 mutex_lock(&monc->mutex);
278 monc->have_osdmap = got;
279 monc->want_next_osdmap = 0;
280 mutex_unlock(&monc->mutex);
281 return 0;
282}
283
284/*
285 * Register interest in the next osdmap
286 */
287void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
288{
289 dout("request_next_osdmap have %u\n", monc->have_osdmap);
290 mutex_lock(&monc->mutex);
291 if (!monc->want_next_osdmap)
292 monc->want_next_osdmap = 1;
293 if (monc->want_next_osdmap < 2)
294 __send_subscribe(monc);
295 mutex_unlock(&monc->mutex);
296}
297
4e7a5dcd 298/*
50b885b9 299 *
4e7a5dcd
SW
300 */
301int ceph_monc_open_session(struct ceph_mon_client *monc)
ba75bb98 302{
ba75bb98 303 mutex_lock(&monc->mutex);
4e7a5dcd 304 __open_session(monc);
ba75bb98
SW
305 __schedule_delayed(monc);
306 mutex_unlock(&monc->mutex);
307 return 0;
308}
3d14c5d2 309EXPORT_SYMBOL(ceph_monc_open_session);
ba75bb98 310
4e7a5dcd
SW
311/*
312 * The monitor responds with mount ack indicate mount success. The
313 * included client ticket allows the client to talk to MDSs and OSDs.
314 */
0743304d
SW
315static void ceph_monc_handle_map(struct ceph_mon_client *monc,
316 struct ceph_msg *msg)
4e7a5dcd
SW
317{
318 struct ceph_client *client = monc->client;
319 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
320 void *p, *end;
321
322 mutex_lock(&monc->mutex);
323
324 dout("handle_monmap\n");
325 p = msg->front.iov_base;
326 end = p + msg->front.iov_len;
327
328 monmap = ceph_monmap_decode(p, end);
329 if (IS_ERR(monmap)) {
330 pr_err("problem decoding monmap, %d\n",
331 (int)PTR_ERR(monmap));
d4a780ce 332 goto out;
4e7a5dcd 333 }
0743304d
SW
334
335 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
4e7a5dcd 336 kfree(monmap);
d4a780ce 337 goto out;
4e7a5dcd
SW
338 }
339
340 client->monc.monmap = monmap;
4e7a5dcd
SW
341 kfree(old);
342
d4a780ce 343out:
4e7a5dcd 344 mutex_unlock(&monc->mutex);
03066f23 345 wake_up_all(&client->auth_wq);
4e7a5dcd
SW
346}
347
ba75bb98 348/*
e56fa10e 349 * generic requests (e.g., statfs, poolop)
ba75bb98 350 */
f8c76f6f 351static struct ceph_mon_generic_request *__lookup_generic_req(
85ff03f6
SW
352 struct ceph_mon_client *monc, u64 tid)
353{
f8c76f6f
YS
354 struct ceph_mon_generic_request *req;
355 struct rb_node *n = monc->generic_request_tree.rb_node;
85ff03f6
SW
356
357 while (n) {
f8c76f6f 358 req = rb_entry(n, struct ceph_mon_generic_request, node);
85ff03f6
SW
359 if (tid < req->tid)
360 n = n->rb_left;
361 else if (tid > req->tid)
362 n = n->rb_right;
363 else
364 return req;
365 }
366 return NULL;
367}
368
f8c76f6f
YS
369static void __insert_generic_request(struct ceph_mon_client *monc,
370 struct ceph_mon_generic_request *new)
85ff03f6 371{
f8c76f6f 372 struct rb_node **p = &monc->generic_request_tree.rb_node;
85ff03f6 373 struct rb_node *parent = NULL;
f8c76f6f 374 struct ceph_mon_generic_request *req = NULL;
85ff03f6
SW
375
376 while (*p) {
377 parent = *p;
f8c76f6f 378 req = rb_entry(parent, struct ceph_mon_generic_request, node);
85ff03f6
SW
379 if (new->tid < req->tid)
380 p = &(*p)->rb_left;
381 else if (new->tid > req->tid)
382 p = &(*p)->rb_right;
383 else
384 BUG();
385 }
386
387 rb_link_node(&new->node, parent, p);
f8c76f6f 388 rb_insert_color(&new->node, &monc->generic_request_tree);
85ff03f6
SW
389}
390
f8c76f6f 391static void release_generic_request(struct kref *kref)
3143edd3 392{
f8c76f6f
YS
393 struct ceph_mon_generic_request *req =
394 container_of(kref, struct ceph_mon_generic_request, kref);
3143edd3
SW
395
396 if (req->reply)
397 ceph_msg_put(req->reply);
398 if (req->request)
399 ceph_msg_put(req->request);
20547567
YS
400
401 kfree(req);
3143edd3
SW
402}
403
f8c76f6f 404static void put_generic_request(struct ceph_mon_generic_request *req)
3143edd3 405{
f8c76f6f 406 kref_put(&req->kref, release_generic_request);
3143edd3
SW
407}
408
f8c76f6f 409static void get_generic_request(struct ceph_mon_generic_request *req)
3143edd3
SW
410{
411 kref_get(&req->kref);
412}
413
f8c76f6f 414static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
3143edd3
SW
415 struct ceph_msg_header *hdr,
416 int *skip)
417{
418 struct ceph_mon_client *monc = con->private;
f8c76f6f 419 struct ceph_mon_generic_request *req;
3143edd3
SW
420 u64 tid = le64_to_cpu(hdr->tid);
421 struct ceph_msg *m;
422
423 mutex_lock(&monc->mutex);
f8c76f6f 424 req = __lookup_generic_req(monc, tid);
3143edd3 425 if (!req) {
f8c76f6f 426 dout("get_generic_reply %lld dne\n", tid);
3143edd3
SW
427 *skip = 1;
428 m = NULL;
429 } else {
f8c76f6f 430 dout("get_generic_reply %lld got %p\n", tid, req->reply);
3143edd3
SW
431 m = ceph_msg_get(req->reply);
432 /*
433 * we don't need to track the connection reading into
434 * this reply because we only have one open connection
435 * at a time, ever.
436 */
437 }
438 mutex_unlock(&monc->mutex);
439 return m;
440}
441
e56fa10e
YS
442static int do_generic_request(struct ceph_mon_client *monc,
443 struct ceph_mon_generic_request *req)
444{
445 int err;
446
447 /* register request */
448 mutex_lock(&monc->mutex);
449 req->tid = ++monc->last_tid;
450 req->request->hdr.tid = cpu_to_le64(req->tid);
451 __insert_generic_request(monc, req);
452 monc->num_generic_requests++;
453 ceph_con_send(monc->con, ceph_msg_get(req->request));
454 mutex_unlock(&monc->mutex);
455
456 err = wait_for_completion_interruptible(&req->completion);
457
458 mutex_lock(&monc->mutex);
459 rb_erase(&req->node, &monc->generic_request_tree);
460 monc->num_generic_requests--;
461 mutex_unlock(&monc->mutex);
462
463 if (!err)
464 err = req->result;
465 return err;
466}
467
468/*
469 * statfs
470 */
ba75bb98
SW
471static void handle_statfs_reply(struct ceph_mon_client *monc,
472 struct ceph_msg *msg)
473{
f8c76f6f 474 struct ceph_mon_generic_request *req;
ba75bb98 475 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
3143edd3 476 u64 tid = le64_to_cpu(msg->hdr.tid);
ba75bb98
SW
477
478 if (msg->front.iov_len != sizeof(*reply))
479 goto bad;
ba75bb98
SW
480 dout("handle_statfs_reply %p tid %llu\n", msg, tid);
481
482 mutex_lock(&monc->mutex);
f8c76f6f 483 req = __lookup_generic_req(monc, tid);
ba75bb98 484 if (req) {
f8c76f6f 485 *(struct ceph_statfs *)req->buf = reply->st;
ba75bb98 486 req->result = 0;
f8c76f6f 487 get_generic_request(req);
ba75bb98
SW
488 }
489 mutex_unlock(&monc->mutex);
3143edd3 490 if (req) {
03066f23 491 complete_all(&req->completion);
f8c76f6f 492 put_generic_request(req);
3143edd3 493 }
ba75bb98
SW
494 return;
495
496bad:
e56fa10e 497 pr_err("corrupt generic reply, tid %llu\n", tid);
9ec7cab1 498 ceph_msg_dump(msg);
ba75bb98
SW
499}
500
501/*
3143edd3 502 * Do a synchronous statfs().
ba75bb98 503 */
3143edd3 504int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
ba75bb98 505{
f8c76f6f 506 struct ceph_mon_generic_request *req;
ba75bb98 507 struct ceph_mon_statfs *h;
3143edd3
SW
508 int err;
509
cffe7b6d 510 req = kzalloc(sizeof(*req), GFP_NOFS);
3143edd3
SW
511 if (!req)
512 return -ENOMEM;
513
3143edd3
SW
514 kref_init(&req->kref);
515 req->buf = buf;
e56fa10e 516 req->buf_len = sizeof(*buf);
3143edd3 517 init_completion(&req->completion);
ba75bb98 518
a79832f2 519 err = -ENOMEM;
b61c2763
SW
520 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
521 true);
a79832f2 522 if (!req->request)
3143edd3 523 goto out;
b61c2763
SW
524 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
525 true);
a79832f2 526 if (!req->reply)
3143edd3 527 goto out;
3143edd3
SW
528
529 /* fill out request */
530 h = req->request->front.iov_base;
13e38c8a
SW
531 h->monhdr.have_version = 0;
532 h->monhdr.session_mon = cpu_to_le16(-1);
533 h->monhdr.session_mon_tid = 0;
ba75bb98 534 h->fsid = monc->monmap->fsid;
ba75bb98 535
e56fa10e 536 err = do_generic_request(monc, req);
ba75bb98 537
e56fa10e
YS
538out:
539 kref_put(&req->kref, release_generic_request);
540 return err;
541}
3d14c5d2 542EXPORT_SYMBOL(ceph_monc_do_statfs);
e56fa10e
YS
543
544/*
545 * pool ops
546 */
547static int get_poolop_reply_buf(const char *src, size_t src_len,
548 char *dst, size_t dst_len)
549{
550 u32 buf_len;
551
552 if (src_len != sizeof(u32) + dst_len)
553 return -EINVAL;
554
555 buf_len = le32_to_cpu(*(u32 *)src);
556 if (buf_len != dst_len)
557 return -EINVAL;
558
559 memcpy(dst, src + sizeof(u32), dst_len);
560 return 0;
561}
562
563static void handle_poolop_reply(struct ceph_mon_client *monc,
564 struct ceph_msg *msg)
565{
566 struct ceph_mon_generic_request *req;
567 struct ceph_mon_poolop_reply *reply = msg->front.iov_base;
568 u64 tid = le64_to_cpu(msg->hdr.tid);
569
570 if (msg->front.iov_len < sizeof(*reply))
571 goto bad;
572 dout("handle_poolop_reply %p tid %llu\n", msg, tid);
ba75bb98
SW
573
574 mutex_lock(&monc->mutex);
e56fa10e
YS
575 req = __lookup_generic_req(monc, tid);
576 if (req) {
577 if (req->buf_len &&
578 get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply),
579 msg->front.iov_len - sizeof(*reply),
580 req->buf, req->buf_len) < 0) {
581 mutex_unlock(&monc->mutex);
582 goto bad;
583 }
584 req->result = le32_to_cpu(reply->reply_code);
585 get_generic_request(req);
586 }
ba75bb98 587 mutex_unlock(&monc->mutex);
e56fa10e
YS
588 if (req) {
589 complete(&req->completion);
590 put_generic_request(req);
591 }
592 return;
ba75bb98 593
e56fa10e
YS
594bad:
595 pr_err("corrupt generic reply, tid %llu\n", tid);
596 ceph_msg_dump(msg);
597}
598
599/*
600 * Do a synchronous pool op.
601 */
602int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op,
603 u32 pool, u64 snapid,
604 char *buf, int len)
605{
606 struct ceph_mon_generic_request *req;
607 struct ceph_mon_poolop *h;
608 int err;
609
610 req = kzalloc(sizeof(*req), GFP_NOFS);
611 if (!req)
612 return -ENOMEM;
613
614 kref_init(&req->kref);
615 req->buf = buf;
616 req->buf_len = len;
617 init_completion(&req->completion);
618
619 err = -ENOMEM;
b61c2763
SW
620 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS,
621 true);
e56fa10e
YS
622 if (!req->request)
623 goto out;
b61c2763
SW
624 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS,
625 true);
e56fa10e
YS
626 if (!req->reply)
627 goto out;
628
629 /* fill out request */
630 req->request->hdr.version = cpu_to_le16(2);
631 h = req->request->front.iov_base;
632 h->monhdr.have_version = 0;
633 h->monhdr.session_mon = cpu_to_le16(-1);
634 h->monhdr.session_mon_tid = 0;
635 h->fsid = monc->monmap->fsid;
636 h->pool = cpu_to_le32(pool);
637 h->op = cpu_to_le32(op);
638 h->auid = 0;
639 h->snapid = cpu_to_le64(snapid);
640 h->name_len = 0;
641
642 err = do_generic_request(monc, req);
3143edd3
SW
643
644out:
f8c76f6f 645 kref_put(&req->kref, release_generic_request);
ba75bb98
SW
646 return err;
647}
648
e56fa10e
YS
649int ceph_monc_create_snapid(struct ceph_mon_client *monc,
650 u32 pool, u64 *snapid)
651{
652 return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
653 pool, 0, (char *)snapid, sizeof(*snapid));
654
655}
3d14c5d2 656EXPORT_SYMBOL(ceph_monc_create_snapid);
e56fa10e
YS
657
658int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
659 u32 pool, u64 snapid)
660{
661 return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
662 pool, snapid, 0, 0);
663
664}
665
ba75bb98 666/*
e56fa10e 667 * Resend pending generic requests.
ba75bb98 668 */
f8c76f6f 669static void __resend_generic_request(struct ceph_mon_client *monc)
ba75bb98 670{
f8c76f6f 671 struct ceph_mon_generic_request *req;
85ff03f6 672 struct rb_node *p;
ba75bb98 673
f8c76f6f
YS
674 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
675 req = rb_entry(p, struct ceph_mon_generic_request, node);
97069001 676 ceph_con_revoke(monc->con, req->request);
3143edd3 677 ceph_con_send(monc->con, ceph_msg_get(req->request));
ba75bb98
SW
678 }
679}
680
681/*
682 * Delayed work. If we haven't mounted yet, retry. Otherwise,
683 * renew/retry subscription as needed (in case it is timing out, or we
684 * got an ENOMEM). And keep the monitor connection alive.
685 */
686static void delayed_work(struct work_struct *work)
687{
688 struct ceph_mon_client *monc =
689 container_of(work, struct ceph_mon_client, delayed_work.work);
690
691 dout("monc delayed_work\n");
692 mutex_lock(&monc->mutex);
4e7a5dcd
SW
693 if (monc->hunting) {
694 __close_session(monc);
695 __open_session(monc); /* continue hunting */
ba75bb98 696 } else {
4e7a5dcd 697 ceph_con_keepalive(monc->con);
9bd2e6f8
SW
698
699 __validate_auth(monc);
700
4e7a5dcd
SW
701 if (monc->auth->ops->is_authenticated(monc->auth))
702 __send_subscribe(monc);
ba75bb98 703 }
ba75bb98
SW
704 __schedule_delayed(monc);
705 mutex_unlock(&monc->mutex);
706}
707
6b805185
SW
708/*
709 * On startup, we build a temporary monmap populated with the IPs
710 * provided by mount(2).
711 */
712static int build_initial_monmap(struct ceph_mon_client *monc)
713{
3d14c5d2
YS
714 struct ceph_options *opt = monc->client->options;
715 struct ceph_entity_addr *mon_addr = opt->mon_addr;
716 int num_mon = opt->num_mon;
6b805185
SW
717 int i;
718
719 /* build initial monmap */
720 monc->monmap = kzalloc(sizeof(*monc->monmap) +
721 num_mon*sizeof(monc->monmap->mon_inst[0]),
722 GFP_KERNEL);
723 if (!monc->monmap)
724 return -ENOMEM;
725 for (i = 0; i < num_mon; i++) {
726 monc->monmap->mon_inst[i].addr = mon_addr[i];
6b805185
SW
727 monc->monmap->mon_inst[i].addr.nonce = 0;
728 monc->monmap->mon_inst[i].name.type =
729 CEPH_ENTITY_TYPE_MON;
730 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
731 }
732 monc->monmap->num_mon = num_mon;
4e7a5dcd 733 monc->have_fsid = false;
6b805185
SW
734 return 0;
735}
736
ba75bb98
SW
737int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
738{
739 int err = 0;
740
741 dout("init\n");
742 memset(monc, 0, sizeof(*monc));
743 monc->client = cl;
744 monc->monmap = NULL;
745 mutex_init(&monc->mutex);
746
6b805185
SW
747 err = build_initial_monmap(monc);
748 if (err)
749 goto out;
750
f6a2f5be
SW
751 /* connection */
752 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
753 if (!monc->con)
754 goto out_monmap;
755 ceph_con_init(monc->client->msgr, monc->con);
756 monc->con->private = monc;
757 monc->con->ops = &mon_con_ops;
ba75bb98 758
4e7a5dcd 759 /* authentication */
3d14c5d2 760 monc->auth = ceph_auth_init(cl->options->name,
8323c3aa 761 cl->options->key);
4e7a5dcd
SW
762 if (IS_ERR(monc->auth))
763 return PTR_ERR(monc->auth);
764 monc->auth->want_keys =
765 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
766 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
767
240ed68e 768 /* msgs */
a79832f2 769 err = -ENOMEM;
7c315c55 770 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
34d23762 771 sizeof(struct ceph_mon_subscribe_ack),
b61c2763 772 GFP_NOFS, true);
a79832f2 773 if (!monc->m_subscribe_ack)
f6a2f5be 774 goto out_con;
6694d6b9 775
b61c2763
SW
776 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
777 true);
240ed68e
SW
778 if (!monc->m_subscribe)
779 goto out_subscribe_ack;
780
b61c2763
SW
781 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
782 true);
a79832f2 783 if (!monc->m_auth_reply)
240ed68e 784 goto out_subscribe;
4e7a5dcd 785
b61c2763 786 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
9bd2e6f8 787 monc->pending_auth = 0;
a79832f2 788 if (!monc->m_auth)
6694d6b9 789 goto out_auth_reply;
ba75bb98
SW
790
791 monc->cur_mon = -1;
4e7a5dcd 792 monc->hunting = true;
ba75bb98
SW
793 monc->sub_renew_after = jiffies;
794 monc->sub_sent = 0;
795
796 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
f8c76f6f
YS
797 monc->generic_request_tree = RB_ROOT;
798 monc->num_generic_requests = 0;
ba75bb98
SW
799 monc->last_tid = 0;
800
801 monc->have_mdsmap = 0;
802 monc->have_osdmap = 0;
803 monc->want_next_osdmap = 1;
4e7a5dcd
SW
804 return 0;
805
6694d6b9
SW
806out_auth_reply:
807 ceph_msg_put(monc->m_auth_reply);
240ed68e
SW
808out_subscribe:
809 ceph_msg_put(monc->m_subscribe);
7c315c55
SW
810out_subscribe_ack:
811 ceph_msg_put(monc->m_subscribe_ack);
f6a2f5be
SW
812out_con:
813 monc->con->ops->put(monc->con);
4e7a5dcd
SW
814out_monmap:
815 kfree(monc->monmap);
ba75bb98
SW
816out:
817 return err;
818}
3d14c5d2 819EXPORT_SYMBOL(ceph_monc_init);
ba75bb98
SW
820
821void ceph_monc_stop(struct ceph_mon_client *monc)
822{
823 dout("stop\n");
824 cancel_delayed_work_sync(&monc->delayed_work);
825
826 mutex_lock(&monc->mutex);
827 __close_session(monc);
f6a2f5be
SW
828
829 monc->con->private = NULL;
830 monc->con->ops->put(monc->con);
831 monc->con = NULL;
832
ba75bb98
SW
833 mutex_unlock(&monc->mutex);
834
4e7a5dcd
SW
835 ceph_auth_destroy(monc->auth);
836
837 ceph_msg_put(monc->m_auth);
6694d6b9 838 ceph_msg_put(monc->m_auth_reply);
240ed68e 839 ceph_msg_put(monc->m_subscribe);
7c315c55 840 ceph_msg_put(monc->m_subscribe_ack);
ba75bb98
SW
841
842 kfree(monc->monmap);
843}
3d14c5d2 844EXPORT_SYMBOL(ceph_monc_stop);
ba75bb98 845
4e7a5dcd
SW
846static void handle_auth_reply(struct ceph_mon_client *monc,
847 struct ceph_msg *msg)
848{
849 int ret;
09c4d6a7 850 int was_auth = 0;
4e7a5dcd
SW
851
852 mutex_lock(&monc->mutex);
09c4d6a7
SW
853 if (monc->auth->ops)
854 was_auth = monc->auth->ops->is_authenticated(monc->auth);
9bd2e6f8 855 monc->pending_auth = 0;
4e7a5dcd
SW
856 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
857 msg->front.iov_len,
858 monc->m_auth->front.iov_base,
859 monc->m_auth->front_max);
860 if (ret < 0) {
9bd2e6f8 861 monc->client->auth_err = ret;
03066f23 862 wake_up_all(&monc->client->auth_wq);
4e7a5dcd 863 } else if (ret > 0) {
9bd2e6f8 864 __send_prepared_auth_request(monc, ret);
09c4d6a7 865 } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
4e7a5dcd 866 dout("authenticated, starting session\n");
0743304d
SW
867
868 monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
0cf5537b
YS
869 monc->client->msgr->inst.name.num =
870 cpu_to_le64(monc->auth->global_id);
0743304d 871
4e7a5dcd 872 __send_subscribe(monc);
f8c76f6f 873 __resend_generic_request(monc);
4e7a5dcd
SW
874 }
875 mutex_unlock(&monc->mutex);
876}
877
9bd2e6f8
SW
878static int __validate_auth(struct ceph_mon_client *monc)
879{
880 int ret;
881
882 if (monc->pending_auth)
883 return 0;
884
885 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
886 monc->m_auth->front_max);
887 if (ret <= 0)
888 return ret; /* either an error, or no need to authenticate */
889 __send_prepared_auth_request(monc, ret);
890 return 0;
891}
892
893int ceph_monc_validate_auth(struct ceph_mon_client *monc)
894{
895 int ret;
896
897 mutex_lock(&monc->mutex);
898 ret = __validate_auth(monc);
899 mutex_unlock(&monc->mutex);
900 return ret;
901}
3d14c5d2 902EXPORT_SYMBOL(ceph_monc_validate_auth);
9bd2e6f8 903
ba75bb98
SW
904/*
905 * handle incoming message
906 */
907static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
908{
909 struct ceph_mon_client *monc = con->private;
910 int type = le16_to_cpu(msg->hdr.type);
911
912 if (!monc)
913 return;
914
915 switch (type) {
4e7a5dcd
SW
916 case CEPH_MSG_AUTH_REPLY:
917 handle_auth_reply(monc, msg);
ba75bb98
SW
918 break;
919
920 case CEPH_MSG_MON_SUBSCRIBE_ACK:
921 handle_subscribe_ack(monc, msg);
922 break;
923
924 case CEPH_MSG_STATFS_REPLY:
925 handle_statfs_reply(monc, msg);
926 break;
927
e56fa10e
YS
928 case CEPH_MSG_POOLOP_REPLY:
929 handle_poolop_reply(monc, msg);
930 break;
931
4e7a5dcd
SW
932 case CEPH_MSG_MON_MAP:
933 ceph_monc_handle_map(monc, msg);
934 break;
935
ba75bb98
SW
936 case CEPH_MSG_OSD_MAP:
937 ceph_osdc_handle_map(&monc->client->osdc, msg);
938 break;
939
940 default:
3d14c5d2
YS
941 /* can the chained handler handle it? */
942 if (monc->client->extra_mon_dispatch &&
943 monc->client->extra_mon_dispatch(monc->client, msg) == 0)
944 break;
945
ba75bb98
SW
946 pr_err("received unknown message type %d %s\n", type,
947 ceph_msg_type_name(type));
948 }
949 ceph_msg_put(msg);
950}
951
952/*
953 * Allocate memory for incoming message
954 */
955static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
2450418c
YS
956 struct ceph_msg_header *hdr,
957 int *skip)
ba75bb98
SW
958{
959 struct ceph_mon_client *monc = con->private;
960 int type = le16_to_cpu(hdr->type);
2450418c 961 int front_len = le32_to_cpu(hdr->front_len);
5b3a4db3 962 struct ceph_msg *m = NULL;
ba75bb98 963
2450418c 964 *skip = 0;
0547a9b3 965
ba75bb98 966 switch (type) {
ba75bb98 967 case CEPH_MSG_MON_SUBSCRIBE_ACK:
7c315c55 968 m = ceph_msg_get(monc->m_subscribe_ack);
2450418c 969 break;
e56fa10e 970 case CEPH_MSG_POOLOP_REPLY:
ba75bb98 971 case CEPH_MSG_STATFS_REPLY:
f8c76f6f 972 return get_generic_reply(con, hdr, skip);
4e7a5dcd 973 case CEPH_MSG_AUTH_REPLY:
6694d6b9 974 m = ceph_msg_get(monc->m_auth_reply);
2450418c 975 break;
5b3a4db3
SW
976 case CEPH_MSG_MON_MAP:
977 case CEPH_MSG_MDS_MAP:
978 case CEPH_MSG_OSD_MAP:
b61c2763 979 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
5b3a4db3 980 break;
ba75bb98 981 }
2450418c 982
5b3a4db3
SW
983 if (!m) {
984 pr_info("alloc_msg unknown type %d\n", type);
2450418c 985 *skip = 1;
5b3a4db3 986 }
2450418c 987 return m;
ba75bb98
SW
988}
989
990/*
991 * If the monitor connection resets, pick a new monitor and resubmit
992 * any pending requests.
993 */
994static void mon_fault(struct ceph_connection *con)
995{
996 struct ceph_mon_client *monc = con->private;
997
998 if (!monc)
999 return;
1000
1001 dout("mon_fault\n");
1002 mutex_lock(&monc->mutex);
1003 if (!con->private)
1004 goto out;
1005
f6a2f5be 1006 if (!monc->hunting)
ba75bb98
SW
1007 pr_info("mon%d %s session lost, "
1008 "hunting for new mon\n", monc->cur_mon,
3d14c5d2 1009 ceph_pr_addr(&monc->con->peer_addr.in_addr));
ba75bb98
SW
1010
1011 __close_session(monc);
1012 if (!monc->hunting) {
1013 /* start hunting */
1014 monc->hunting = true;
4e7a5dcd 1015 __open_session(monc);
ba75bb98
SW
1016 } else {
1017 /* already hunting, let's wait a bit */
1018 __schedule_delayed(monc);
1019 }
1020out:
1021 mutex_unlock(&monc->mutex);
1022}
1023
9e32789f 1024static const struct ceph_connection_operations mon_con_ops = {
ba75bb98
SW
1025 .get = ceph_con_get,
1026 .put = ceph_con_put,
1027 .dispatch = dispatch,
1028 .fault = mon_fault,
1029 .alloc_msg = mon_alloc_msg,
ba75bb98 1030};