]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ceph/messenger.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / net / ceph / messenger.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
8 #include <linux/net.h>
9 #include <linux/nsproxy.h>
10 #include <linux/sched/mm.h>
11 #include <linux/slab.h>
12 #include <linux/socket.h>
13 #include <linux/string.h>
14 #ifdef CONFIG_BLOCK
15 #include <linux/bio.h>
16 #endif /* CONFIG_BLOCK */
17 #include <linux/dns_resolver.h>
18 #include <net/tcp.h>
19
20 #include <linux/ceph/ceph_features.h>
21 #include <linux/ceph/libceph.h>
22 #include <linux/ceph/messenger.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/pagelist.h>
25 #include <linux/export.h>
26
27 /*
28 * Ceph uses the messenger to exchange ceph_msg messages with other
29 * hosts in the system. The messenger provides ordered and reliable
30 * delivery. We tolerate TCP disconnects by reconnecting (with
31 * exponential backoff) in the case of a fault (disconnection, bad
32 * crc, protocol error). Acks allow sent messages to be discarded by
33 * the sender.
34 */
35
36 /*
37 * We track the state of the socket on a given connection using
38 * values defined below. The transition to a new socket state is
39 * handled by a function which verifies we aren't coming from an
40 * unexpected state.
41 *
42 * --------
43 * | NEW* | transient initial state
44 * --------
45 * | con_sock_state_init()
46 * v
47 * ----------
48 * | CLOSED | initialized, but no socket (and no
49 * ---------- TCP connection)
50 * ^ \
51 * | \ con_sock_state_connecting()
52 * | ----------------------
53 * | \
54 * + con_sock_state_closed() \
55 * |+--------------------------- \
56 * | \ \ \
57 * | ----------- \ \
58 * | | CLOSING | socket event; \ \
59 * | ----------- await close \ \
60 * | ^ \ |
61 * | | \ |
62 * | + con_sock_state_closing() \ |
63 * | / \ | |
64 * | / --------------- | |
65 * | / \ v v
66 * | / --------------
67 * | / -----------------| CONNECTING | socket created, TCP
68 * | | / -------------- connect initiated
69 * | | | con_sock_state_connected()
70 * | | v
71 * -------------
72 * | CONNECTED | TCP connection established
73 * -------------
74 *
75 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
76 */
77
78 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
79 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
80 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
81 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
82 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
83
84 /*
85 * connection states
86 */
87 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
88 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
89 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
90 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
91 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
92 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
93
94 /*
95 * ceph_connection flag bits
96 */
97 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
98 * messages on errors */
99 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
100 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
101 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
102 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
103
104 static bool con_flag_valid(unsigned long con_flag)
105 {
106 switch (con_flag) {
107 case CON_FLAG_LOSSYTX:
108 case CON_FLAG_KEEPALIVE_PENDING:
109 case CON_FLAG_WRITE_PENDING:
110 case CON_FLAG_SOCK_CLOSED:
111 case CON_FLAG_BACKOFF:
112 return true;
113 default:
114 return false;
115 }
116 }
117
118 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
119 {
120 BUG_ON(!con_flag_valid(con_flag));
121
122 clear_bit(con_flag, &con->flags);
123 }
124
125 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
126 {
127 BUG_ON(!con_flag_valid(con_flag));
128
129 set_bit(con_flag, &con->flags);
130 }
131
132 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
133 {
134 BUG_ON(!con_flag_valid(con_flag));
135
136 return test_bit(con_flag, &con->flags);
137 }
138
139 static bool con_flag_test_and_clear(struct ceph_connection *con,
140 unsigned long con_flag)
141 {
142 BUG_ON(!con_flag_valid(con_flag));
143
144 return test_and_clear_bit(con_flag, &con->flags);
145 }
146
147 static bool con_flag_test_and_set(struct ceph_connection *con,
148 unsigned long con_flag)
149 {
150 BUG_ON(!con_flag_valid(con_flag));
151
152 return test_and_set_bit(con_flag, &con->flags);
153 }
154
155 /* Slab caches for frequently-allocated structures */
156
157 static struct kmem_cache *ceph_msg_cache;
158 static struct kmem_cache *ceph_msg_data_cache;
159
160 /* static tag bytes (protocol control messages) */
161 static char tag_msg = CEPH_MSGR_TAG_MSG;
162 static char tag_ack = CEPH_MSGR_TAG_ACK;
163 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
164 static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
165
166 #ifdef CONFIG_LOCKDEP
167 static struct lock_class_key socket_class;
168 #endif
169
170 /*
171 * When skipping (ignoring) a block of input we read it into a "skip
172 * buffer," which is this many bytes in size.
173 */
174 #define SKIP_BUF_SIZE 1024
175
176 static void queue_con(struct ceph_connection *con);
177 static void cancel_con(struct ceph_connection *con);
178 static void ceph_con_workfn(struct work_struct *);
179 static void con_fault(struct ceph_connection *con);
180
181 /*
182 * Nicely render a sockaddr as a string. An array of formatted
183 * strings is used, to approximate reentrancy.
184 */
185 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
186 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
187 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
188 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
189
190 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
191 static atomic_t addr_str_seq = ATOMIC_INIT(0);
192
193 static struct page *zero_page; /* used in certain error cases */
194
195 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
196 {
197 int i;
198 char *s;
199 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
200 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
201
202 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
203 s = addr_str[i];
204
205 switch (ss->ss_family) {
206 case AF_INET:
207 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
208 ntohs(in4->sin_port));
209 break;
210
211 case AF_INET6:
212 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
213 ntohs(in6->sin6_port));
214 break;
215
216 default:
217 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
218 ss->ss_family);
219 }
220
221 return s;
222 }
223 EXPORT_SYMBOL(ceph_pr_addr);
224
225 static void encode_my_addr(struct ceph_messenger *msgr)
226 {
227 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
228 ceph_encode_addr(&msgr->my_enc_addr);
229 }
230
231 /*
232 * work queue for all reading and writing to/from the socket.
233 */
234 static struct workqueue_struct *ceph_msgr_wq;
235
236 static int ceph_msgr_slab_init(void)
237 {
238 BUG_ON(ceph_msg_cache);
239 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
240 if (!ceph_msg_cache)
241 return -ENOMEM;
242
243 BUG_ON(ceph_msg_data_cache);
244 ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0);
245 if (ceph_msg_data_cache)
246 return 0;
247
248 kmem_cache_destroy(ceph_msg_cache);
249 ceph_msg_cache = NULL;
250
251 return -ENOMEM;
252 }
253
254 static void ceph_msgr_slab_exit(void)
255 {
256 BUG_ON(!ceph_msg_data_cache);
257 kmem_cache_destroy(ceph_msg_data_cache);
258 ceph_msg_data_cache = NULL;
259
260 BUG_ON(!ceph_msg_cache);
261 kmem_cache_destroy(ceph_msg_cache);
262 ceph_msg_cache = NULL;
263 }
264
265 static void _ceph_msgr_exit(void)
266 {
267 if (ceph_msgr_wq) {
268 destroy_workqueue(ceph_msgr_wq);
269 ceph_msgr_wq = NULL;
270 }
271
272 BUG_ON(zero_page == NULL);
273 put_page(zero_page);
274 zero_page = NULL;
275
276 ceph_msgr_slab_exit();
277 }
278
279 int ceph_msgr_init(void)
280 {
281 if (ceph_msgr_slab_init())
282 return -ENOMEM;
283
284 BUG_ON(zero_page != NULL);
285 zero_page = ZERO_PAGE(0);
286 get_page(zero_page);
287
288 /*
289 * The number of active work items is limited by the number of
290 * connections, so leave @max_active at default.
291 */
292 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
293 if (ceph_msgr_wq)
294 return 0;
295
296 pr_err("msgr_init failed to create workqueue\n");
297 _ceph_msgr_exit();
298
299 return -ENOMEM;
300 }
301 EXPORT_SYMBOL(ceph_msgr_init);
302
303 void ceph_msgr_exit(void)
304 {
305 BUG_ON(ceph_msgr_wq == NULL);
306
307 _ceph_msgr_exit();
308 }
309 EXPORT_SYMBOL(ceph_msgr_exit);
310
311 void ceph_msgr_flush(void)
312 {
313 flush_workqueue(ceph_msgr_wq);
314 }
315 EXPORT_SYMBOL(ceph_msgr_flush);
316
317 /* Connection socket state transition functions */
318
319 static void con_sock_state_init(struct ceph_connection *con)
320 {
321 int old_state;
322
323 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
324 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
325 printk("%s: unexpected old state %d\n", __func__, old_state);
326 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
327 CON_SOCK_STATE_CLOSED);
328 }
329
330 static void con_sock_state_connecting(struct ceph_connection *con)
331 {
332 int old_state;
333
334 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
335 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
336 printk("%s: unexpected old state %d\n", __func__, old_state);
337 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
338 CON_SOCK_STATE_CONNECTING);
339 }
340
341 static void con_sock_state_connected(struct ceph_connection *con)
342 {
343 int old_state;
344
345 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
346 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
347 printk("%s: unexpected old state %d\n", __func__, old_state);
348 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
349 CON_SOCK_STATE_CONNECTED);
350 }
351
352 static void con_sock_state_closing(struct ceph_connection *con)
353 {
354 int old_state;
355
356 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
357 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
358 old_state != CON_SOCK_STATE_CONNECTED &&
359 old_state != CON_SOCK_STATE_CLOSING))
360 printk("%s: unexpected old state %d\n", __func__, old_state);
361 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
362 CON_SOCK_STATE_CLOSING);
363 }
364
365 static void con_sock_state_closed(struct ceph_connection *con)
366 {
367 int old_state;
368
369 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
370 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
371 old_state != CON_SOCK_STATE_CLOSING &&
372 old_state != CON_SOCK_STATE_CONNECTING &&
373 old_state != CON_SOCK_STATE_CLOSED))
374 printk("%s: unexpected old state %d\n", __func__, old_state);
375 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
376 CON_SOCK_STATE_CLOSED);
377 }
378
379 /*
380 * socket callback functions
381 */
382
383 /* data available on socket, or listen socket received a connect */
384 static void ceph_sock_data_ready(struct sock *sk)
385 {
386 struct ceph_connection *con = sk->sk_user_data;
387 if (atomic_read(&con->msgr->stopping)) {
388 return;
389 }
390
391 if (sk->sk_state != TCP_CLOSE_WAIT) {
392 dout("%s on %p state = %lu, queueing work\n", __func__,
393 con, con->state);
394 queue_con(con);
395 }
396 }
397
398 /* socket has buffer space for writing */
399 static void ceph_sock_write_space(struct sock *sk)
400 {
401 struct ceph_connection *con = sk->sk_user_data;
402
403 /* only queue to workqueue if there is data we want to write,
404 * and there is sufficient space in the socket buffer to accept
405 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
406 * doesn't get called again until try_write() fills the socket
407 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
408 * and net/core/stream.c:sk_stream_write_space().
409 */
410 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
411 if (sk_stream_is_writeable(sk)) {
412 dout("%s %p queueing write work\n", __func__, con);
413 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
414 queue_con(con);
415 }
416 } else {
417 dout("%s %p nothing to write\n", __func__, con);
418 }
419 }
420
421 /* socket's state has changed */
422 static void ceph_sock_state_change(struct sock *sk)
423 {
424 struct ceph_connection *con = sk->sk_user_data;
425
426 dout("%s %p state = %lu sk_state = %u\n", __func__,
427 con, con->state, sk->sk_state);
428
429 switch (sk->sk_state) {
430 case TCP_CLOSE:
431 dout("%s TCP_CLOSE\n", __func__);
432 case TCP_CLOSE_WAIT:
433 dout("%s TCP_CLOSE_WAIT\n", __func__);
434 con_sock_state_closing(con);
435 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
436 queue_con(con);
437 break;
438 case TCP_ESTABLISHED:
439 dout("%s TCP_ESTABLISHED\n", __func__);
440 con_sock_state_connected(con);
441 queue_con(con);
442 break;
443 default: /* Everything else is uninteresting */
444 break;
445 }
446 }
447
448 /*
449 * set up socket callbacks
450 */
451 static void set_sock_callbacks(struct socket *sock,
452 struct ceph_connection *con)
453 {
454 struct sock *sk = sock->sk;
455 sk->sk_user_data = con;
456 sk->sk_data_ready = ceph_sock_data_ready;
457 sk->sk_write_space = ceph_sock_write_space;
458 sk->sk_state_change = ceph_sock_state_change;
459 }
460
461
462 /*
463 * socket helpers
464 */
465
466 /*
467 * initiate connection to a remote socket.
468 */
469 static int ceph_tcp_connect(struct ceph_connection *con)
470 {
471 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
472 struct socket *sock;
473 unsigned int noio_flag;
474 int ret;
475
476 BUG_ON(con->sock);
477
478 /* sock_create_kern() allocates with GFP_KERNEL */
479 noio_flag = memalloc_noio_save();
480 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
481 SOCK_STREAM, IPPROTO_TCP, &sock);
482 memalloc_noio_restore(noio_flag);
483 if (ret)
484 return ret;
485 sock->sk->sk_allocation = GFP_NOFS;
486
487 #ifdef CONFIG_LOCKDEP
488 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
489 #endif
490
491 set_sock_callbacks(sock, con);
492
493 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
494
495 con_sock_state_connecting(con);
496 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
497 O_NONBLOCK);
498 if (ret == -EINPROGRESS) {
499 dout("connect %s EINPROGRESS sk_state = %u\n",
500 ceph_pr_addr(&con->peer_addr.in_addr),
501 sock->sk->sk_state);
502 } else if (ret < 0) {
503 pr_err("connect %s error %d\n",
504 ceph_pr_addr(&con->peer_addr.in_addr), ret);
505 sock_release(sock);
506 return ret;
507 }
508
509 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) {
510 int optval = 1;
511
512 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
513 (char *)&optval, sizeof(optval));
514 if (ret)
515 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
516 ret);
517 }
518
519 con->sock = sock;
520 return 0;
521 }
522
523 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
524 {
525 struct kvec iov = {buf, len};
526 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
527 int r;
528
529 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len);
530 r = sock_recvmsg(sock, &msg, msg.msg_flags);
531 if (r == -EAGAIN)
532 r = 0;
533 return r;
534 }
535
536 static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
537 int page_offset, size_t length)
538 {
539 struct bio_vec bvec = {
540 .bv_page = page,
541 .bv_offset = page_offset,
542 .bv_len = length
543 };
544 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
545 int r;
546
547 BUG_ON(page_offset + length > PAGE_SIZE);
548 iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length);
549 r = sock_recvmsg(sock, &msg, msg.msg_flags);
550 if (r == -EAGAIN)
551 r = 0;
552 return r;
553 }
554
555 /*
556 * write something. @more is true if caller will be sending more data
557 * shortly.
558 */
559 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
560 size_t kvlen, size_t len, int more)
561 {
562 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
563 int r;
564
565 if (more)
566 msg.msg_flags |= MSG_MORE;
567 else
568 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
569
570 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
571 if (r == -EAGAIN)
572 r = 0;
573 return r;
574 }
575
576 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
577 int offset, size_t size, bool more)
578 {
579 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
580 int ret;
581
582 ret = kernel_sendpage(sock, page, offset, size, flags);
583 if (ret == -EAGAIN)
584 ret = 0;
585
586 return ret;
587 }
588
589 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
590 int offset, size_t size, bool more)
591 {
592 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
593 struct bio_vec bvec;
594 int ret;
595
596 /* sendpage cannot properly handle pages with page_count == 0,
597 * we need to fallback to sendmsg if that's the case */
598 if (page_count(page) >= 1)
599 return __ceph_tcp_sendpage(sock, page, offset, size, more);
600
601 bvec.bv_page = page;
602 bvec.bv_offset = offset;
603 bvec.bv_len = size;
604
605 if (more)
606 msg.msg_flags |= MSG_MORE;
607 else
608 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
609
610 iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size);
611 ret = sock_sendmsg(sock, &msg);
612 if (ret == -EAGAIN)
613 ret = 0;
614
615 return ret;
616 }
617
618 /*
619 * Shutdown/close the socket for the given connection.
620 */
621 static int con_close_socket(struct ceph_connection *con)
622 {
623 int rc = 0;
624
625 dout("con_close_socket on %p sock %p\n", con, con->sock);
626 if (con->sock) {
627 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
628 sock_release(con->sock);
629 con->sock = NULL;
630 }
631
632 /*
633 * Forcibly clear the SOCK_CLOSED flag. It gets set
634 * independent of the connection mutex, and we could have
635 * received a socket close event before we had the chance to
636 * shut the socket down.
637 */
638 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
639
640 con_sock_state_closed(con);
641 return rc;
642 }
643
644 /*
645 * Reset a connection. Discard all incoming and outgoing messages
646 * and clear *_seq state.
647 */
648 static void ceph_msg_remove(struct ceph_msg *msg)
649 {
650 list_del_init(&msg->list_head);
651
652 ceph_msg_put(msg);
653 }
654 static void ceph_msg_remove_list(struct list_head *head)
655 {
656 while (!list_empty(head)) {
657 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
658 list_head);
659 ceph_msg_remove(msg);
660 }
661 }
662
663 static void reset_connection(struct ceph_connection *con)
664 {
665 /* reset connection, out_queue, msg_ and connect_seq */
666 /* discard existing out_queue and msg_seq */
667 dout("reset_connection %p\n", con);
668 ceph_msg_remove_list(&con->out_queue);
669 ceph_msg_remove_list(&con->out_sent);
670
671 if (con->in_msg) {
672 BUG_ON(con->in_msg->con != con);
673 ceph_msg_put(con->in_msg);
674 con->in_msg = NULL;
675 }
676
677 con->connect_seq = 0;
678 con->out_seq = 0;
679 if (con->out_msg) {
680 BUG_ON(con->out_msg->con != con);
681 ceph_msg_put(con->out_msg);
682 con->out_msg = NULL;
683 }
684 con->in_seq = 0;
685 con->in_seq_acked = 0;
686
687 con->out_skip = 0;
688 }
689
690 /*
691 * mark a peer down. drop any open connections.
692 */
693 void ceph_con_close(struct ceph_connection *con)
694 {
695 mutex_lock(&con->mutex);
696 dout("con_close %p peer %s\n", con,
697 ceph_pr_addr(&con->peer_addr.in_addr));
698 con->state = CON_STATE_CLOSED;
699
700 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
701 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
702 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
703 con_flag_clear(con, CON_FLAG_BACKOFF);
704
705 reset_connection(con);
706 con->peer_global_seq = 0;
707 cancel_con(con);
708 con_close_socket(con);
709 mutex_unlock(&con->mutex);
710 }
711 EXPORT_SYMBOL(ceph_con_close);
712
713 /*
714 * Reopen a closed connection, with a new peer address.
715 */
716 void ceph_con_open(struct ceph_connection *con,
717 __u8 entity_type, __u64 entity_num,
718 struct ceph_entity_addr *addr)
719 {
720 mutex_lock(&con->mutex);
721 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
722
723 WARN_ON(con->state != CON_STATE_CLOSED);
724 con->state = CON_STATE_PREOPEN;
725
726 con->peer_name.type = (__u8) entity_type;
727 con->peer_name.num = cpu_to_le64(entity_num);
728
729 memcpy(&con->peer_addr, addr, sizeof(*addr));
730 con->delay = 0; /* reset backoff memory */
731 mutex_unlock(&con->mutex);
732 queue_con(con);
733 }
734 EXPORT_SYMBOL(ceph_con_open);
735
736 /*
737 * return true if this connection ever successfully opened
738 */
739 bool ceph_con_opened(struct ceph_connection *con)
740 {
741 return con->connect_seq > 0;
742 }
743
744 /*
745 * initialize a new connection.
746 */
747 void ceph_con_init(struct ceph_connection *con, void *private,
748 const struct ceph_connection_operations *ops,
749 struct ceph_messenger *msgr)
750 {
751 dout("con_init %p\n", con);
752 memset(con, 0, sizeof(*con));
753 con->private = private;
754 con->ops = ops;
755 con->msgr = msgr;
756
757 con_sock_state_init(con);
758
759 mutex_init(&con->mutex);
760 INIT_LIST_HEAD(&con->out_queue);
761 INIT_LIST_HEAD(&con->out_sent);
762 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
763
764 con->state = CON_STATE_CLOSED;
765 }
766 EXPORT_SYMBOL(ceph_con_init);
767
768
769 /*
770 * We maintain a global counter to order connection attempts. Get
771 * a unique seq greater than @gt.
772 */
773 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
774 {
775 u32 ret;
776
777 spin_lock(&msgr->global_seq_lock);
778 if (msgr->global_seq < gt)
779 msgr->global_seq = gt;
780 ret = ++msgr->global_seq;
781 spin_unlock(&msgr->global_seq_lock);
782 return ret;
783 }
784
785 static void con_out_kvec_reset(struct ceph_connection *con)
786 {
787 BUG_ON(con->out_skip);
788
789 con->out_kvec_left = 0;
790 con->out_kvec_bytes = 0;
791 con->out_kvec_cur = &con->out_kvec[0];
792 }
793
794 static void con_out_kvec_add(struct ceph_connection *con,
795 size_t size, void *data)
796 {
797 int index = con->out_kvec_left;
798
799 BUG_ON(con->out_skip);
800 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
801
802 con->out_kvec[index].iov_len = size;
803 con->out_kvec[index].iov_base = data;
804 con->out_kvec_left++;
805 con->out_kvec_bytes += size;
806 }
807
808 /*
809 * Chop off a kvec from the end. Return residual number of bytes for
810 * that kvec, i.e. how many bytes would have been written if the kvec
811 * hadn't been nuked.
812 */
813 static int con_out_kvec_skip(struct ceph_connection *con)
814 {
815 int off = con->out_kvec_cur - con->out_kvec;
816 int skip = 0;
817
818 if (con->out_kvec_bytes > 0) {
819 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
820 BUG_ON(con->out_kvec_bytes < skip);
821 BUG_ON(!con->out_kvec_left);
822 con->out_kvec_bytes -= skip;
823 con->out_kvec_left--;
824 }
825
826 return skip;
827 }
828
829 #ifdef CONFIG_BLOCK
830
831 /*
832 * For a bio data item, a piece is whatever remains of the next
833 * entry in the current bio iovec, or the first entry in the next
834 * bio in the list.
835 */
836 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
837 size_t length)
838 {
839 struct ceph_msg_data *data = cursor->data;
840 struct bio *bio;
841
842 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
843
844 bio = data->bio;
845 BUG_ON(!bio);
846
847 cursor->resid = min(length, data->bio_length);
848 cursor->bio = bio;
849 cursor->bvec_iter = bio->bi_iter;
850 cursor->last_piece =
851 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
852 }
853
854 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
855 size_t *page_offset,
856 size_t *length)
857 {
858 struct ceph_msg_data *data = cursor->data;
859 struct bio *bio;
860 struct bio_vec bio_vec;
861
862 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
863
864 bio = cursor->bio;
865 BUG_ON(!bio);
866
867 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
868
869 *page_offset = (size_t) bio_vec.bv_offset;
870 BUG_ON(*page_offset >= PAGE_SIZE);
871 if (cursor->last_piece) /* pagelist offset is always 0 */
872 *length = cursor->resid;
873 else
874 *length = (size_t) bio_vec.bv_len;
875 BUG_ON(*length > cursor->resid);
876 BUG_ON(*page_offset + *length > PAGE_SIZE);
877
878 return bio_vec.bv_page;
879 }
880
881 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
882 size_t bytes)
883 {
884 struct bio *bio;
885 struct bio_vec bio_vec;
886
887 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
888
889 bio = cursor->bio;
890 BUG_ON(!bio);
891
892 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
893
894 /* Advance the cursor offset */
895
896 BUG_ON(cursor->resid < bytes);
897 cursor->resid -= bytes;
898
899 bio_advance_iter(bio, &cursor->bvec_iter, bytes);
900
901 if (bytes < bio_vec.bv_len)
902 return false; /* more bytes to process in this segment */
903
904 /* Move on to the next segment, and possibly the next bio */
905
906 if (!cursor->bvec_iter.bi_size) {
907 bio = bio->bi_next;
908 cursor->bio = bio;
909 if (bio)
910 cursor->bvec_iter = bio->bi_iter;
911 else
912 memset(&cursor->bvec_iter, 0,
913 sizeof(cursor->bvec_iter));
914 }
915
916 if (!cursor->last_piece) {
917 BUG_ON(!cursor->resid);
918 BUG_ON(!bio);
919 /* A short read is OK, so use <= rather than == */
920 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
921 cursor->last_piece = true;
922 }
923
924 return true;
925 }
926 #endif /* CONFIG_BLOCK */
927
928 /*
929 * For a page array, a piece comes from the first page in the array
930 * that has not already been fully consumed.
931 */
932 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
933 size_t length)
934 {
935 struct ceph_msg_data *data = cursor->data;
936 int page_count;
937
938 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
939
940 BUG_ON(!data->pages);
941 BUG_ON(!data->length);
942
943 cursor->resid = min(length, data->length);
944 page_count = calc_pages_for(data->alignment, (u64)data->length);
945 cursor->page_offset = data->alignment & ~PAGE_MASK;
946 cursor->page_index = 0;
947 BUG_ON(page_count > (int)USHRT_MAX);
948 cursor->page_count = (unsigned short)page_count;
949 BUG_ON(length > SIZE_MAX - cursor->page_offset);
950 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
951 }
952
953 static struct page *
954 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
955 size_t *page_offset, size_t *length)
956 {
957 struct ceph_msg_data *data = cursor->data;
958
959 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
960
961 BUG_ON(cursor->page_index >= cursor->page_count);
962 BUG_ON(cursor->page_offset >= PAGE_SIZE);
963
964 *page_offset = cursor->page_offset;
965 if (cursor->last_piece)
966 *length = cursor->resid;
967 else
968 *length = PAGE_SIZE - *page_offset;
969
970 return data->pages[cursor->page_index];
971 }
972
973 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
974 size_t bytes)
975 {
976 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
977
978 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
979
980 /* Advance the cursor page offset */
981
982 cursor->resid -= bytes;
983 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
984 if (!bytes || cursor->page_offset)
985 return false; /* more bytes to process in the current page */
986
987 if (!cursor->resid)
988 return false; /* no more data */
989
990 /* Move on to the next page; offset is already at 0 */
991
992 BUG_ON(cursor->page_index >= cursor->page_count);
993 cursor->page_index++;
994 cursor->last_piece = cursor->resid <= PAGE_SIZE;
995
996 return true;
997 }
998
999 /*
1000 * For a pagelist, a piece is whatever remains to be consumed in the
1001 * first page in the list, or the front of the next page.
1002 */
1003 static void
1004 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
1005 size_t length)
1006 {
1007 struct ceph_msg_data *data = cursor->data;
1008 struct ceph_pagelist *pagelist;
1009 struct page *page;
1010
1011 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1012
1013 pagelist = data->pagelist;
1014 BUG_ON(!pagelist);
1015
1016 if (!length)
1017 return; /* pagelist can be assigned but empty */
1018
1019 BUG_ON(list_empty(&pagelist->head));
1020 page = list_first_entry(&pagelist->head, struct page, lru);
1021
1022 cursor->resid = min(length, pagelist->length);
1023 cursor->page = page;
1024 cursor->offset = 0;
1025 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1026 }
1027
1028 static struct page *
1029 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
1030 size_t *page_offset, size_t *length)
1031 {
1032 struct ceph_msg_data *data = cursor->data;
1033 struct ceph_pagelist *pagelist;
1034
1035 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1036
1037 pagelist = data->pagelist;
1038 BUG_ON(!pagelist);
1039
1040 BUG_ON(!cursor->page);
1041 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1042
1043 /* offset of first page in pagelist is always 0 */
1044 *page_offset = cursor->offset & ~PAGE_MASK;
1045 if (cursor->last_piece)
1046 *length = cursor->resid;
1047 else
1048 *length = PAGE_SIZE - *page_offset;
1049
1050 return cursor->page;
1051 }
1052
1053 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
1054 size_t bytes)
1055 {
1056 struct ceph_msg_data *data = cursor->data;
1057 struct ceph_pagelist *pagelist;
1058
1059 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1060
1061 pagelist = data->pagelist;
1062 BUG_ON(!pagelist);
1063
1064 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1065 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
1066
1067 /* Advance the cursor offset */
1068
1069 cursor->resid -= bytes;
1070 cursor->offset += bytes;
1071 /* offset of first page in pagelist is always 0 */
1072 if (!bytes || cursor->offset & ~PAGE_MASK)
1073 return false; /* more bytes to process in the current page */
1074
1075 if (!cursor->resid)
1076 return false; /* no more data */
1077
1078 /* Move on to the next page */
1079
1080 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
1081 cursor->page = list_next_entry(cursor->page, lru);
1082 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1083
1084 return true;
1085 }
1086
1087 /*
1088 * Message data is handled (sent or received) in pieces, where each
1089 * piece resides on a single page. The network layer might not
1090 * consume an entire piece at once. A data item's cursor keeps
1091 * track of which piece is next to process and how much remains to
1092 * be processed in that piece. It also tracks whether the current
1093 * piece is the last one in the data item.
1094 */
1095 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1096 {
1097 size_t length = cursor->total_resid;
1098
1099 switch (cursor->data->type) {
1100 case CEPH_MSG_DATA_PAGELIST:
1101 ceph_msg_data_pagelist_cursor_init(cursor, length);
1102 break;
1103 case CEPH_MSG_DATA_PAGES:
1104 ceph_msg_data_pages_cursor_init(cursor, length);
1105 break;
1106 #ifdef CONFIG_BLOCK
1107 case CEPH_MSG_DATA_BIO:
1108 ceph_msg_data_bio_cursor_init(cursor, length);
1109 break;
1110 #endif /* CONFIG_BLOCK */
1111 case CEPH_MSG_DATA_NONE:
1112 default:
1113 /* BUG(); */
1114 break;
1115 }
1116 cursor->need_crc = true;
1117 }
1118
1119 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
1120 {
1121 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1122 struct ceph_msg_data *data;
1123
1124 BUG_ON(!length);
1125 BUG_ON(length > msg->data_length);
1126 BUG_ON(list_empty(&msg->data));
1127
1128 cursor->data_head = &msg->data;
1129 cursor->total_resid = length;
1130 data = list_first_entry(&msg->data, struct ceph_msg_data, links);
1131 cursor->data = data;
1132
1133 __ceph_msg_data_cursor_init(cursor);
1134 }
1135
1136 /*
1137 * Return the page containing the next piece to process for a given
1138 * data item, and supply the page offset and length of that piece.
1139 * Indicate whether this is the last piece in this data item.
1140 */
1141 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1142 size_t *page_offset, size_t *length,
1143 bool *last_piece)
1144 {
1145 struct page *page;
1146
1147 switch (cursor->data->type) {
1148 case CEPH_MSG_DATA_PAGELIST:
1149 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1150 break;
1151 case CEPH_MSG_DATA_PAGES:
1152 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1153 break;
1154 #ifdef CONFIG_BLOCK
1155 case CEPH_MSG_DATA_BIO:
1156 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1157 break;
1158 #endif /* CONFIG_BLOCK */
1159 case CEPH_MSG_DATA_NONE:
1160 default:
1161 page = NULL;
1162 break;
1163 }
1164 BUG_ON(!page);
1165 BUG_ON(*page_offset + *length > PAGE_SIZE);
1166 BUG_ON(!*length);
1167 if (last_piece)
1168 *last_piece = cursor->last_piece;
1169
1170 return page;
1171 }
1172
1173 /*
1174 * Returns true if the result moves the cursor on to the next piece
1175 * of the data item.
1176 */
1177 static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1178 size_t bytes)
1179 {
1180 bool new_piece;
1181
1182 BUG_ON(bytes > cursor->resid);
1183 switch (cursor->data->type) {
1184 case CEPH_MSG_DATA_PAGELIST:
1185 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1186 break;
1187 case CEPH_MSG_DATA_PAGES:
1188 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1189 break;
1190 #ifdef CONFIG_BLOCK
1191 case CEPH_MSG_DATA_BIO:
1192 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1193 break;
1194 #endif /* CONFIG_BLOCK */
1195 case CEPH_MSG_DATA_NONE:
1196 default:
1197 BUG();
1198 break;
1199 }
1200 cursor->total_resid -= bytes;
1201
1202 if (!cursor->resid && cursor->total_resid) {
1203 WARN_ON(!cursor->last_piece);
1204 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
1205 cursor->data = list_next_entry(cursor->data, links);
1206 __ceph_msg_data_cursor_init(cursor);
1207 new_piece = true;
1208 }
1209 cursor->need_crc = new_piece;
1210 }
1211
1212 static size_t sizeof_footer(struct ceph_connection *con)
1213 {
1214 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1215 sizeof(struct ceph_msg_footer) :
1216 sizeof(struct ceph_msg_footer_old);
1217 }
1218
1219 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1220 {
1221 BUG_ON(!msg);
1222 BUG_ON(!data_len);
1223
1224 /* Initialize data cursor */
1225
1226 ceph_msg_data_cursor_init(msg, (size_t)data_len);
1227 }
1228
1229 /*
1230 * Prepare footer for currently outgoing message, and finish things
1231 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1232 */
1233 static void prepare_write_message_footer(struct ceph_connection *con)
1234 {
1235 struct ceph_msg *m = con->out_msg;
1236
1237 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1238
1239 dout("prepare_write_message_footer %p\n", con);
1240 con_out_kvec_add(con, sizeof_footer(con), &m->footer);
1241 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1242 if (con->ops->sign_message)
1243 con->ops->sign_message(m);
1244 else
1245 m->footer.sig = 0;
1246 } else {
1247 m->old_footer.flags = m->footer.flags;
1248 }
1249 con->out_more = m->more_to_follow;
1250 con->out_msg_done = true;
1251 }
1252
1253 /*
1254 * Prepare headers for the next outgoing message.
1255 */
1256 static void prepare_write_message(struct ceph_connection *con)
1257 {
1258 struct ceph_msg *m;
1259 u32 crc;
1260
1261 con_out_kvec_reset(con);
1262 con->out_msg_done = false;
1263
1264 /* Sneak an ack in there first? If we can get it into the same
1265 * TCP packet that's a good thing. */
1266 if (con->in_seq > con->in_seq_acked) {
1267 con->in_seq_acked = con->in_seq;
1268 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1269 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1270 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1271 &con->out_temp_ack);
1272 }
1273
1274 BUG_ON(list_empty(&con->out_queue));
1275 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
1276 con->out_msg = m;
1277 BUG_ON(m->con != con);
1278
1279 /* put message on sent list */
1280 ceph_msg_get(m);
1281 list_move_tail(&m->list_head, &con->out_sent);
1282
1283 /*
1284 * only assign outgoing seq # if we haven't sent this message
1285 * yet. if it is requeued, resend with it's original seq.
1286 */
1287 if (m->needs_out_seq) {
1288 m->hdr.seq = cpu_to_le64(++con->out_seq);
1289 m->needs_out_seq = false;
1290 }
1291
1292 if (con->ops->reencode_message)
1293 con->ops->reencode_message(m);
1294
1295 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
1296 m, con->out_seq, le16_to_cpu(m->hdr.type),
1297 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
1298 m->data_length);
1299 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len));
1300 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
1301
1302 /* tag + hdr + front + middle */
1303 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
1304 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
1305 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
1306
1307 if (m->middle)
1308 con_out_kvec_add(con, m->middle->vec.iov_len,
1309 m->middle->vec.iov_base);
1310
1311 /* fill in hdr crc and finalize hdr */
1312 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1313 con->out_msg->hdr.crc = cpu_to_le32(crc);
1314 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
1315
1316 /* fill in front and middle crc, footer */
1317 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1318 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1319 if (m->middle) {
1320 crc = crc32c(0, m->middle->vec.iov_base,
1321 m->middle->vec.iov_len);
1322 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1323 } else
1324 con->out_msg->footer.middle_crc = 0;
1325 dout("%s front_crc %u middle_crc %u\n", __func__,
1326 le32_to_cpu(con->out_msg->footer.front_crc),
1327 le32_to_cpu(con->out_msg->footer.middle_crc));
1328 con->out_msg->footer.flags = 0;
1329
1330 /* is there a data payload? */
1331 con->out_msg->footer.data_crc = 0;
1332 if (m->data_length) {
1333 prepare_message_data(con->out_msg, m->data_length);
1334 con->out_more = 1; /* data + footer will follow */
1335 } else {
1336 /* no, queue up footer too and be done */
1337 prepare_write_message_footer(con);
1338 }
1339
1340 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1341 }
1342
1343 /*
1344 * Prepare an ack.
1345 */
1346 static void prepare_write_ack(struct ceph_connection *con)
1347 {
1348 dout("prepare_write_ack %p %llu -> %llu\n", con,
1349 con->in_seq_acked, con->in_seq);
1350 con->in_seq_acked = con->in_seq;
1351
1352 con_out_kvec_reset(con);
1353
1354 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1355
1356 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1357 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1358 &con->out_temp_ack);
1359
1360 con->out_more = 1; /* more will follow.. eventually.. */
1361 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1362 }
1363
1364 /*
1365 * Prepare to share the seq during handshake
1366 */
1367 static void prepare_write_seq(struct ceph_connection *con)
1368 {
1369 dout("prepare_write_seq %p %llu -> %llu\n", con,
1370 con->in_seq_acked, con->in_seq);
1371 con->in_seq_acked = con->in_seq;
1372
1373 con_out_kvec_reset(con);
1374
1375 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1376 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1377 &con->out_temp_ack);
1378
1379 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1380 }
1381
1382 /*
1383 * Prepare to write keepalive byte.
1384 */
1385 static void prepare_write_keepalive(struct ceph_connection *con)
1386 {
1387 dout("prepare_write_keepalive %p\n", con);
1388 con_out_kvec_reset(con);
1389 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
1390 struct timespec now;
1391
1392 ktime_get_real_ts(&now);
1393 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
1394 ceph_encode_timespec(&con->out_temp_keepalive2, &now);
1395 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
1396 &con->out_temp_keepalive2);
1397 } else {
1398 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
1399 }
1400 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1401 }
1402
1403 /*
1404 * Connection negotiation.
1405 */
1406
1407 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
1408 int *auth_proto)
1409 {
1410 struct ceph_auth_handshake *auth;
1411
1412 if (!con->ops->get_authorizer) {
1413 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1414 con->out_connect.authorizer_len = 0;
1415 return NULL;
1416 }
1417
1418 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
1419 if (IS_ERR(auth))
1420 return auth;
1421
1422 con->auth_reply_buf = auth->authorizer_reply_buf;
1423 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
1424 return auth;
1425 }
1426
1427 /*
1428 * We connected to a peer and are saying hello.
1429 */
1430 static void prepare_write_banner(struct ceph_connection *con)
1431 {
1432 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1433 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
1434 &con->msgr->my_enc_addr);
1435
1436 con->out_more = 0;
1437 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1438 }
1439
1440 static int prepare_write_connect(struct ceph_connection *con)
1441 {
1442 unsigned int global_seq = get_global_seq(con->msgr, 0);
1443 int proto;
1444 int auth_proto;
1445 struct ceph_auth_handshake *auth;
1446
1447 switch (con->peer_name.type) {
1448 case CEPH_ENTITY_TYPE_MON:
1449 proto = CEPH_MONC_PROTOCOL;
1450 break;
1451 case CEPH_ENTITY_TYPE_OSD:
1452 proto = CEPH_OSDC_PROTOCOL;
1453 break;
1454 case CEPH_ENTITY_TYPE_MDS:
1455 proto = CEPH_MDSC_PROTOCOL;
1456 break;
1457 default:
1458 BUG();
1459 }
1460
1461 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1462 con->connect_seq, global_seq, proto);
1463
1464 con->out_connect.features =
1465 cpu_to_le64(from_msgr(con->msgr)->supported_features);
1466 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1467 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1468 con->out_connect.global_seq = cpu_to_le32(global_seq);
1469 con->out_connect.protocol_version = cpu_to_le32(proto);
1470 con->out_connect.flags = 0;
1471
1472 auth_proto = CEPH_AUTH_UNKNOWN;
1473 auth = get_connect_authorizer(con, &auth_proto);
1474 if (IS_ERR(auth))
1475 return PTR_ERR(auth);
1476
1477 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1478 con->out_connect.authorizer_len = auth ?
1479 cpu_to_le32(auth->authorizer_buf_len) : 0;
1480
1481 con_out_kvec_add(con, sizeof (con->out_connect),
1482 &con->out_connect);
1483 if (auth && auth->authorizer_buf_len)
1484 con_out_kvec_add(con, auth->authorizer_buf_len,
1485 auth->authorizer_buf);
1486
1487 con->out_more = 0;
1488 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1489
1490 return 0;
1491 }
1492
1493 /*
1494 * write as much of pending kvecs to the socket as we can.
1495 * 1 -> done
1496 * 0 -> socket full, but more to do
1497 * <0 -> error
1498 */
1499 static int write_partial_kvec(struct ceph_connection *con)
1500 {
1501 int ret;
1502
1503 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1504 while (con->out_kvec_bytes > 0) {
1505 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1506 con->out_kvec_left, con->out_kvec_bytes,
1507 con->out_more);
1508 if (ret <= 0)
1509 goto out;
1510 con->out_kvec_bytes -= ret;
1511 if (con->out_kvec_bytes == 0)
1512 break; /* done */
1513
1514 /* account for full iov entries consumed */
1515 while (ret >= con->out_kvec_cur->iov_len) {
1516 BUG_ON(!con->out_kvec_left);
1517 ret -= con->out_kvec_cur->iov_len;
1518 con->out_kvec_cur++;
1519 con->out_kvec_left--;
1520 }
1521 /* and for a partially-consumed entry */
1522 if (ret) {
1523 con->out_kvec_cur->iov_len -= ret;
1524 con->out_kvec_cur->iov_base += ret;
1525 }
1526 }
1527 con->out_kvec_left = 0;
1528 ret = 1;
1529 out:
1530 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1531 con->out_kvec_bytes, con->out_kvec_left, ret);
1532 return ret; /* done! */
1533 }
1534
1535 static u32 ceph_crc32c_page(u32 crc, struct page *page,
1536 unsigned int page_offset,
1537 unsigned int length)
1538 {
1539 char *kaddr;
1540
1541 kaddr = kmap(page);
1542 BUG_ON(kaddr == NULL);
1543 crc = crc32c(crc, kaddr + page_offset, length);
1544 kunmap(page);
1545
1546 return crc;
1547 }
1548 /*
1549 * Write as much message data payload as we can. If we finish, queue
1550 * up the footer.
1551 * 1 -> done, footer is now queued in out_kvec[].
1552 * 0 -> socket full, but more to do
1553 * <0 -> error
1554 */
1555 static int write_partial_message_data(struct ceph_connection *con)
1556 {
1557 struct ceph_msg *msg = con->out_msg;
1558 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1559 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
1560 u32 crc;
1561
1562 dout("%s %p msg %p\n", __func__, con, msg);
1563
1564 if (list_empty(&msg->data))
1565 return -EINVAL;
1566
1567 /*
1568 * Iterate through each page that contains data to be
1569 * written, and send as much as possible for each.
1570 *
1571 * If we are calculating the data crc (the default), we will
1572 * need to map the page. If we have no pages, they have
1573 * been revoked, so use the zero page.
1574 */
1575 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
1576 while (cursor->resid) {
1577 struct page *page;
1578 size_t page_offset;
1579 size_t length;
1580 bool last_piece;
1581 int ret;
1582
1583 page = ceph_msg_data_next(cursor, &page_offset, &length,
1584 &last_piece);
1585 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
1586 length, !last_piece);
1587 if (ret <= 0) {
1588 if (do_datacrc)
1589 msg->footer.data_crc = cpu_to_le32(crc);
1590
1591 return ret;
1592 }
1593 if (do_datacrc && cursor->need_crc)
1594 crc = ceph_crc32c_page(crc, page, page_offset, length);
1595 ceph_msg_data_advance(cursor, (size_t)ret);
1596 }
1597
1598 dout("%s %p msg %p done\n", __func__, con, msg);
1599
1600 /* prepare and queue up footer, too */
1601 if (do_datacrc)
1602 msg->footer.data_crc = cpu_to_le32(crc);
1603 else
1604 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1605 con_out_kvec_reset(con);
1606 prepare_write_message_footer(con);
1607
1608 return 1; /* must return > 0 to indicate success */
1609 }
1610
1611 /*
1612 * write some zeros
1613 */
1614 static int write_partial_skip(struct ceph_connection *con)
1615 {
1616 int ret;
1617
1618 dout("%s %p %d left\n", __func__, con, con->out_skip);
1619 while (con->out_skip > 0) {
1620 size_t size = min(con->out_skip, (int) PAGE_SIZE);
1621
1622 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1623 if (ret <= 0)
1624 goto out;
1625 con->out_skip -= ret;
1626 }
1627 ret = 1;
1628 out:
1629 return ret;
1630 }
1631
1632 /*
1633 * Prepare to read connection handshake, or an ack.
1634 */
1635 static void prepare_read_banner(struct ceph_connection *con)
1636 {
1637 dout("prepare_read_banner %p\n", con);
1638 con->in_base_pos = 0;
1639 }
1640
1641 static void prepare_read_connect(struct ceph_connection *con)
1642 {
1643 dout("prepare_read_connect %p\n", con);
1644 con->in_base_pos = 0;
1645 }
1646
1647 static void prepare_read_ack(struct ceph_connection *con)
1648 {
1649 dout("prepare_read_ack %p\n", con);
1650 con->in_base_pos = 0;
1651 }
1652
1653 static void prepare_read_seq(struct ceph_connection *con)
1654 {
1655 dout("prepare_read_seq %p\n", con);
1656 con->in_base_pos = 0;
1657 con->in_tag = CEPH_MSGR_TAG_SEQ;
1658 }
1659
1660 static void prepare_read_tag(struct ceph_connection *con)
1661 {
1662 dout("prepare_read_tag %p\n", con);
1663 con->in_base_pos = 0;
1664 con->in_tag = CEPH_MSGR_TAG_READY;
1665 }
1666
1667 static void prepare_read_keepalive_ack(struct ceph_connection *con)
1668 {
1669 dout("prepare_read_keepalive_ack %p\n", con);
1670 con->in_base_pos = 0;
1671 }
1672
1673 /*
1674 * Prepare to read a message.
1675 */
1676 static int prepare_read_message(struct ceph_connection *con)
1677 {
1678 dout("prepare_read_message %p\n", con);
1679 BUG_ON(con->in_msg != NULL);
1680 con->in_base_pos = 0;
1681 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1682 return 0;
1683 }
1684
1685
1686 static int read_partial(struct ceph_connection *con,
1687 int end, int size, void *object)
1688 {
1689 while (con->in_base_pos < end) {
1690 int left = end - con->in_base_pos;
1691 int have = size - left;
1692 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1693 if (ret <= 0)
1694 return ret;
1695 con->in_base_pos += ret;
1696 }
1697 return 1;
1698 }
1699
1700
1701 /*
1702 * Read all or part of the connect-side handshake on a new connection
1703 */
1704 static int read_partial_banner(struct ceph_connection *con)
1705 {
1706 int size;
1707 int end;
1708 int ret;
1709
1710 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1711
1712 /* peer's banner */
1713 size = strlen(CEPH_BANNER);
1714 end = size;
1715 ret = read_partial(con, end, size, con->in_banner);
1716 if (ret <= 0)
1717 goto out;
1718
1719 size = sizeof (con->actual_peer_addr);
1720 end += size;
1721 ret = read_partial(con, end, size, &con->actual_peer_addr);
1722 if (ret <= 0)
1723 goto out;
1724
1725 size = sizeof (con->peer_addr_for_me);
1726 end += size;
1727 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1728 if (ret <= 0)
1729 goto out;
1730
1731 out:
1732 return ret;
1733 }
1734
1735 static int read_partial_connect(struct ceph_connection *con)
1736 {
1737 int size;
1738 int end;
1739 int ret;
1740
1741 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1742
1743 size = sizeof (con->in_reply);
1744 end = size;
1745 ret = read_partial(con, end, size, &con->in_reply);
1746 if (ret <= 0)
1747 goto out;
1748
1749 size = le32_to_cpu(con->in_reply.authorizer_len);
1750 end += size;
1751 ret = read_partial(con, end, size, con->auth_reply_buf);
1752 if (ret <= 0)
1753 goto out;
1754
1755 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1756 con, (int)con->in_reply.tag,
1757 le32_to_cpu(con->in_reply.connect_seq),
1758 le32_to_cpu(con->in_reply.global_seq));
1759 out:
1760 return ret;
1761
1762 }
1763
1764 /*
1765 * Verify the hello banner looks okay.
1766 */
1767 static int verify_hello(struct ceph_connection *con)
1768 {
1769 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1770 pr_err("connect to %s got bad banner\n",
1771 ceph_pr_addr(&con->peer_addr.in_addr));
1772 con->error_msg = "protocol error, bad banner";
1773 return -1;
1774 }
1775 return 0;
1776 }
1777
1778 static bool addr_is_blank(struct sockaddr_storage *ss)
1779 {
1780 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
1781 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
1782
1783 switch (ss->ss_family) {
1784 case AF_INET:
1785 return addr->s_addr == htonl(INADDR_ANY);
1786 case AF_INET6:
1787 return ipv6_addr_any(addr6);
1788 default:
1789 return true;
1790 }
1791 }
1792
1793 static int addr_port(struct sockaddr_storage *ss)
1794 {
1795 switch (ss->ss_family) {
1796 case AF_INET:
1797 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1798 case AF_INET6:
1799 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1800 }
1801 return 0;
1802 }
1803
1804 static void addr_set_port(struct sockaddr_storage *ss, int p)
1805 {
1806 switch (ss->ss_family) {
1807 case AF_INET:
1808 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1809 break;
1810 case AF_INET6:
1811 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1812 break;
1813 }
1814 }
1815
1816 /*
1817 * Unlike other *_pton function semantics, zero indicates success.
1818 */
1819 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1820 char delim, const char **ipend)
1821 {
1822 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1823 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1824
1825 memset(ss, 0, sizeof(*ss));
1826
1827 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1828 ss->ss_family = AF_INET;
1829 return 0;
1830 }
1831
1832 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1833 ss->ss_family = AF_INET6;
1834 return 0;
1835 }
1836
1837 return -EINVAL;
1838 }
1839
1840 /*
1841 * Extract hostname string and resolve using kernel DNS facility.
1842 */
1843 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1844 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1845 struct sockaddr_storage *ss, char delim, const char **ipend)
1846 {
1847 const char *end, *delim_p;
1848 char *colon_p, *ip_addr = NULL;
1849 int ip_len, ret;
1850
1851 /*
1852 * The end of the hostname occurs immediately preceding the delimiter or
1853 * the port marker (':') where the delimiter takes precedence.
1854 */
1855 delim_p = memchr(name, delim, namelen);
1856 colon_p = memchr(name, ':', namelen);
1857
1858 if (delim_p && colon_p)
1859 end = delim_p < colon_p ? delim_p : colon_p;
1860 else if (!delim_p && colon_p)
1861 end = colon_p;
1862 else {
1863 end = delim_p;
1864 if (!end) /* case: hostname:/ */
1865 end = name + namelen;
1866 }
1867
1868 if (end <= name)
1869 return -EINVAL;
1870
1871 /* do dns_resolve upcall */
1872 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1873 if (ip_len > 0)
1874 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1875 else
1876 ret = -ESRCH;
1877
1878 kfree(ip_addr);
1879
1880 *ipend = end;
1881
1882 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1883 ret, ret ? "failed" : ceph_pr_addr(ss));
1884
1885 return ret;
1886 }
1887 #else
1888 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1889 struct sockaddr_storage *ss, char delim, const char **ipend)
1890 {
1891 return -EINVAL;
1892 }
1893 #endif
1894
1895 /*
1896 * Parse a server name (IP or hostname). If a valid IP address is not found
1897 * then try to extract a hostname to resolve using userspace DNS upcall.
1898 */
1899 static int ceph_parse_server_name(const char *name, size_t namelen,
1900 struct sockaddr_storage *ss, char delim, const char **ipend)
1901 {
1902 int ret;
1903
1904 ret = ceph_pton(name, namelen, ss, delim, ipend);
1905 if (ret)
1906 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1907
1908 return ret;
1909 }
1910
1911 /*
1912 * Parse an ip[:port] list into an addr array. Use the default
1913 * monitor port if a port isn't specified.
1914 */
1915 int ceph_parse_ips(const char *c, const char *end,
1916 struct ceph_entity_addr *addr,
1917 int max_count, int *count)
1918 {
1919 int i, ret = -EINVAL;
1920 const char *p = c;
1921
1922 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1923 for (i = 0; i < max_count; i++) {
1924 const char *ipend;
1925 struct sockaddr_storage *ss = &addr[i].in_addr;
1926 int port;
1927 char delim = ',';
1928
1929 if (*p == '[') {
1930 delim = ']';
1931 p++;
1932 }
1933
1934 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1935 if (ret)
1936 goto bad;
1937 ret = -EINVAL;
1938
1939 p = ipend;
1940
1941 if (delim == ']') {
1942 if (*p != ']') {
1943 dout("missing matching ']'\n");
1944 goto bad;
1945 }
1946 p++;
1947 }
1948
1949 /* port? */
1950 if (p < end && *p == ':') {
1951 port = 0;
1952 p++;
1953 while (p < end && *p >= '0' && *p <= '9') {
1954 port = (port * 10) + (*p - '0');
1955 p++;
1956 }
1957 if (port == 0)
1958 port = CEPH_MON_PORT;
1959 else if (port > 65535)
1960 goto bad;
1961 } else {
1962 port = CEPH_MON_PORT;
1963 }
1964
1965 addr_set_port(ss, port);
1966
1967 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1968
1969 if (p == end)
1970 break;
1971 if (*p != ',')
1972 goto bad;
1973 p++;
1974 }
1975
1976 if (p != end)
1977 goto bad;
1978
1979 if (count)
1980 *count = i + 1;
1981 return 0;
1982
1983 bad:
1984 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1985 return ret;
1986 }
1987 EXPORT_SYMBOL(ceph_parse_ips);
1988
1989 static int process_banner(struct ceph_connection *con)
1990 {
1991 dout("process_banner on %p\n", con);
1992
1993 if (verify_hello(con) < 0)
1994 return -1;
1995
1996 ceph_decode_addr(&con->actual_peer_addr);
1997 ceph_decode_addr(&con->peer_addr_for_me);
1998
1999 /*
2000 * Make sure the other end is who we wanted. note that the other
2001 * end may not yet know their ip address, so if it's 0.0.0.0, give
2002 * them the benefit of the doubt.
2003 */
2004 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
2005 sizeof(con->peer_addr)) != 0 &&
2006 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
2007 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
2008 pr_warn("wrong peer, want %s/%d, got %s/%d\n",
2009 ceph_pr_addr(&con->peer_addr.in_addr),
2010 (int)le32_to_cpu(con->peer_addr.nonce),
2011 ceph_pr_addr(&con->actual_peer_addr.in_addr),
2012 (int)le32_to_cpu(con->actual_peer_addr.nonce));
2013 con->error_msg = "wrong peer at address";
2014 return -1;
2015 }
2016
2017 /*
2018 * did we learn our address?
2019 */
2020 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
2021 int port = addr_port(&con->msgr->inst.addr.in_addr);
2022
2023 memcpy(&con->msgr->inst.addr.in_addr,
2024 &con->peer_addr_for_me.in_addr,
2025 sizeof(con->peer_addr_for_me.in_addr));
2026 addr_set_port(&con->msgr->inst.addr.in_addr, port);
2027 encode_my_addr(con->msgr);
2028 dout("process_banner learned my addr is %s\n",
2029 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
2030 }
2031
2032 return 0;
2033 }
2034
2035 static int process_connect(struct ceph_connection *con)
2036 {
2037 u64 sup_feat = from_msgr(con->msgr)->supported_features;
2038 u64 req_feat = from_msgr(con->msgr)->required_features;
2039 u64 server_feat = le64_to_cpu(con->in_reply.features);
2040 int ret;
2041
2042 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2043
2044 if (con->auth_reply_buf) {
2045 /*
2046 * Any connection that defines ->get_authorizer()
2047 * should also define ->verify_authorizer_reply().
2048 * See get_connect_authorizer().
2049 */
2050 ret = con->ops->verify_authorizer_reply(con);
2051 if (ret < 0) {
2052 con->error_msg = "bad authorize reply";
2053 return ret;
2054 }
2055 }
2056
2057 switch (con->in_reply.tag) {
2058 case CEPH_MSGR_TAG_FEATURES:
2059 pr_err("%s%lld %s feature set mismatch,"
2060 " my %llx < server's %llx, missing %llx\n",
2061 ENTITY_NAME(con->peer_name),
2062 ceph_pr_addr(&con->peer_addr.in_addr),
2063 sup_feat, server_feat, server_feat & ~sup_feat);
2064 con->error_msg = "missing required protocol features";
2065 reset_connection(con);
2066 return -1;
2067
2068 case CEPH_MSGR_TAG_BADPROTOVER:
2069 pr_err("%s%lld %s protocol version mismatch,"
2070 " my %d != server's %d\n",
2071 ENTITY_NAME(con->peer_name),
2072 ceph_pr_addr(&con->peer_addr.in_addr),
2073 le32_to_cpu(con->out_connect.protocol_version),
2074 le32_to_cpu(con->in_reply.protocol_version));
2075 con->error_msg = "protocol version mismatch";
2076 reset_connection(con);
2077 return -1;
2078
2079 case CEPH_MSGR_TAG_BADAUTHORIZER:
2080 con->auth_retry++;
2081 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2082 con->auth_retry);
2083 if (con->auth_retry == 2) {
2084 con->error_msg = "connect authorization failure";
2085 return -1;
2086 }
2087 con_out_kvec_reset(con);
2088 ret = prepare_write_connect(con);
2089 if (ret < 0)
2090 return ret;
2091 prepare_read_connect(con);
2092 break;
2093
2094 case CEPH_MSGR_TAG_RESETSESSION:
2095 /*
2096 * If we connected with a large connect_seq but the peer
2097 * has no record of a session with us (no connection, or
2098 * connect_seq == 0), they will send RESETSESION to indicate
2099 * that they must have reset their session, and may have
2100 * dropped messages.
2101 */
2102 dout("process_connect got RESET peer seq %u\n",
2103 le32_to_cpu(con->in_reply.connect_seq));
2104 pr_err("%s%lld %s connection reset\n",
2105 ENTITY_NAME(con->peer_name),
2106 ceph_pr_addr(&con->peer_addr.in_addr));
2107 reset_connection(con);
2108 con_out_kvec_reset(con);
2109 ret = prepare_write_connect(con);
2110 if (ret < 0)
2111 return ret;
2112 prepare_read_connect(con);
2113
2114 /* Tell ceph about it. */
2115 mutex_unlock(&con->mutex);
2116 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
2117 if (con->ops->peer_reset)
2118 con->ops->peer_reset(con);
2119 mutex_lock(&con->mutex);
2120 if (con->state != CON_STATE_NEGOTIATING)
2121 return -EAGAIN;
2122 break;
2123
2124 case CEPH_MSGR_TAG_RETRY_SESSION:
2125 /*
2126 * If we sent a smaller connect_seq than the peer has, try
2127 * again with a larger value.
2128 */
2129 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2130 le32_to_cpu(con->out_connect.connect_seq),
2131 le32_to_cpu(con->in_reply.connect_seq));
2132 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
2133 con_out_kvec_reset(con);
2134 ret = prepare_write_connect(con);
2135 if (ret < 0)
2136 return ret;
2137 prepare_read_connect(con);
2138 break;
2139
2140 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2141 /*
2142 * If we sent a smaller global_seq than the peer has, try
2143 * again with a larger value.
2144 */
2145 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2146 con->peer_global_seq,
2147 le32_to_cpu(con->in_reply.global_seq));
2148 get_global_seq(con->msgr,
2149 le32_to_cpu(con->in_reply.global_seq));
2150 con_out_kvec_reset(con);
2151 ret = prepare_write_connect(con);
2152 if (ret < 0)
2153 return ret;
2154 prepare_read_connect(con);
2155 break;
2156
2157 case CEPH_MSGR_TAG_SEQ:
2158 case CEPH_MSGR_TAG_READY:
2159 if (req_feat & ~server_feat) {
2160 pr_err("%s%lld %s protocol feature mismatch,"
2161 " my required %llx > server's %llx, need %llx\n",
2162 ENTITY_NAME(con->peer_name),
2163 ceph_pr_addr(&con->peer_addr.in_addr),
2164 req_feat, server_feat, req_feat & ~server_feat);
2165 con->error_msg = "missing required protocol features";
2166 reset_connection(con);
2167 return -1;
2168 }
2169
2170 WARN_ON(con->state != CON_STATE_NEGOTIATING);
2171 con->state = CON_STATE_OPEN;
2172 con->auth_retry = 0; /* we authenticated; clear flag */
2173 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2174 con->connect_seq++;
2175 con->peer_features = server_feat;
2176 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2177 con->peer_global_seq,
2178 le32_to_cpu(con->in_reply.connect_seq),
2179 con->connect_seq);
2180 WARN_ON(con->connect_seq !=
2181 le32_to_cpu(con->in_reply.connect_seq));
2182
2183 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
2184 con_flag_set(con, CON_FLAG_LOSSYTX);
2185
2186 con->delay = 0; /* reset backoff memory */
2187
2188 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2189 prepare_write_seq(con);
2190 prepare_read_seq(con);
2191 } else {
2192 prepare_read_tag(con);
2193 }
2194 break;
2195
2196 case CEPH_MSGR_TAG_WAIT:
2197 /*
2198 * If there is a connection race (we are opening
2199 * connections to each other), one of us may just have
2200 * to WAIT. This shouldn't happen if we are the
2201 * client.
2202 */
2203 con->error_msg = "protocol error, got WAIT as client";
2204 return -1;
2205
2206 default:
2207 con->error_msg = "protocol error, garbage tag during connect";
2208 return -1;
2209 }
2210 return 0;
2211 }
2212
2213
2214 /*
2215 * read (part of) an ack
2216 */
2217 static int read_partial_ack(struct ceph_connection *con)
2218 {
2219 int size = sizeof (con->in_temp_ack);
2220 int end = size;
2221
2222 return read_partial(con, end, size, &con->in_temp_ack);
2223 }
2224
2225 /*
2226 * We can finally discard anything that's been acked.
2227 */
2228 static void process_ack(struct ceph_connection *con)
2229 {
2230 struct ceph_msg *m;
2231 u64 ack = le64_to_cpu(con->in_temp_ack);
2232 u64 seq;
2233 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
2234 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
2235
2236 /*
2237 * In the reconnect case, con_fault() has requeued messages
2238 * in out_sent. We should cleanup old messages according to
2239 * the reconnect seq.
2240 */
2241 while (!list_empty(list)) {
2242 m = list_first_entry(list, struct ceph_msg, list_head);
2243 if (reconnect && m->needs_out_seq)
2244 break;
2245 seq = le64_to_cpu(m->hdr.seq);
2246 if (seq > ack)
2247 break;
2248 dout("got ack for seq %llu type %d at %p\n", seq,
2249 le16_to_cpu(m->hdr.type), m);
2250 m->ack_stamp = jiffies;
2251 ceph_msg_remove(m);
2252 }
2253
2254 prepare_read_tag(con);
2255 }
2256
2257
2258 static int read_partial_message_section(struct ceph_connection *con,
2259 struct kvec *section,
2260 unsigned int sec_len, u32 *crc)
2261 {
2262 int ret, left;
2263
2264 BUG_ON(!section);
2265
2266 while (section->iov_len < sec_len) {
2267 BUG_ON(section->iov_base == NULL);
2268 left = sec_len - section->iov_len;
2269 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2270 section->iov_len, left);
2271 if (ret <= 0)
2272 return ret;
2273 section->iov_len += ret;
2274 }
2275 if (section->iov_len == sec_len)
2276 *crc = crc32c(0, section->iov_base, section->iov_len);
2277
2278 return 1;
2279 }
2280
2281 static int read_partial_msg_data(struct ceph_connection *con)
2282 {
2283 struct ceph_msg *msg = con->in_msg;
2284 struct ceph_msg_data_cursor *cursor = &msg->cursor;
2285 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2286 struct page *page;
2287 size_t page_offset;
2288 size_t length;
2289 u32 crc = 0;
2290 int ret;
2291
2292 BUG_ON(!msg);
2293 if (list_empty(&msg->data))
2294 return -EIO;
2295
2296 if (do_datacrc)
2297 crc = con->in_data_crc;
2298 while (cursor->resid) {
2299 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
2300 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2301 if (ret <= 0) {
2302 if (do_datacrc)
2303 con->in_data_crc = crc;
2304
2305 return ret;
2306 }
2307
2308 if (do_datacrc)
2309 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2310 ceph_msg_data_advance(cursor, (size_t)ret);
2311 }
2312 if (do_datacrc)
2313 con->in_data_crc = crc;
2314
2315 return 1; /* must return > 0 to indicate success */
2316 }
2317
2318 /*
2319 * read (part of) a message.
2320 */
2321 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2322
2323 static int read_partial_message(struct ceph_connection *con)
2324 {
2325 struct ceph_msg *m = con->in_msg;
2326 int size;
2327 int end;
2328 int ret;
2329 unsigned int front_len, middle_len, data_len;
2330 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2331 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
2332 u64 seq;
2333 u32 crc;
2334
2335 dout("read_partial_message con %p msg %p\n", con, m);
2336
2337 /* header */
2338 size = sizeof (con->in_hdr);
2339 end = size;
2340 ret = read_partial(con, end, size, &con->in_hdr);
2341 if (ret <= 0)
2342 return ret;
2343
2344 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2345 if (cpu_to_le32(crc) != con->in_hdr.crc) {
2346 pr_err("read_partial_message bad hdr crc %u != expected %u\n",
2347 crc, con->in_hdr.crc);
2348 return -EBADMSG;
2349 }
2350
2351 front_len = le32_to_cpu(con->in_hdr.front_len);
2352 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2353 return -EIO;
2354 middle_len = le32_to_cpu(con->in_hdr.middle_len);
2355 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
2356 return -EIO;
2357 data_len = le32_to_cpu(con->in_hdr.data_len);
2358 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2359 return -EIO;
2360
2361 /* verify seq# */
2362 seq = le64_to_cpu(con->in_hdr.seq);
2363 if ((s64)seq - (s64)con->in_seq < 1) {
2364 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2365 ENTITY_NAME(con->peer_name),
2366 ceph_pr_addr(&con->peer_addr.in_addr),
2367 seq, con->in_seq + 1);
2368 con->in_base_pos = -front_len - middle_len - data_len -
2369 sizeof_footer(con);
2370 con->in_tag = CEPH_MSGR_TAG_READY;
2371 return 1;
2372 } else if ((s64)seq - (s64)con->in_seq > 1) {
2373 pr_err("read_partial_message bad seq %lld expected %lld\n",
2374 seq, con->in_seq + 1);
2375 con->error_msg = "bad message sequence # for incoming message";
2376 return -EBADE;
2377 }
2378
2379 /* allocate message? */
2380 if (!con->in_msg) {
2381 int skip = 0;
2382
2383 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
2384 front_len, data_len);
2385 ret = ceph_con_in_msg_alloc(con, &skip);
2386 if (ret < 0)
2387 return ret;
2388
2389 BUG_ON(!con->in_msg ^ skip);
2390 if (skip) {
2391 /* skip this message */
2392 dout("alloc_msg said skip message\n");
2393 con->in_base_pos = -front_len - middle_len - data_len -
2394 sizeof_footer(con);
2395 con->in_tag = CEPH_MSGR_TAG_READY;
2396 con->in_seq++;
2397 return 1;
2398 }
2399
2400 BUG_ON(!con->in_msg);
2401 BUG_ON(con->in_msg->con != con);
2402 m = con->in_msg;
2403 m->front.iov_len = 0; /* haven't read it yet */
2404 if (m->middle)
2405 m->middle->vec.iov_len = 0;
2406
2407 /* prepare for data payload, if any */
2408
2409 if (data_len)
2410 prepare_message_data(con->in_msg, data_len);
2411 }
2412
2413 /* front */
2414 ret = read_partial_message_section(con, &m->front, front_len,
2415 &con->in_front_crc);
2416 if (ret <= 0)
2417 return ret;
2418
2419 /* middle */
2420 if (m->middle) {
2421 ret = read_partial_message_section(con, &m->middle->vec,
2422 middle_len,
2423 &con->in_middle_crc);
2424 if (ret <= 0)
2425 return ret;
2426 }
2427
2428 /* (page) data */
2429 if (data_len) {
2430 ret = read_partial_msg_data(con);
2431 if (ret <= 0)
2432 return ret;
2433 }
2434
2435 /* footer */
2436 size = sizeof_footer(con);
2437 end += size;
2438 ret = read_partial(con, end, size, &m->footer);
2439 if (ret <= 0)
2440 return ret;
2441
2442 if (!need_sign) {
2443 m->footer.flags = m->old_footer.flags;
2444 m->footer.sig = 0;
2445 }
2446
2447 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2448 m, front_len, m->footer.front_crc, middle_len,
2449 m->footer.middle_crc, data_len, m->footer.data_crc);
2450
2451 /* crc ok? */
2452 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2453 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2454 m, con->in_front_crc, m->footer.front_crc);
2455 return -EBADMSG;
2456 }
2457 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2458 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2459 m, con->in_middle_crc, m->footer.middle_crc);
2460 return -EBADMSG;
2461 }
2462 if (do_datacrc &&
2463 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2464 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2465 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2466 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2467 return -EBADMSG;
2468 }
2469
2470 if (need_sign && con->ops->check_message_signature &&
2471 con->ops->check_message_signature(m)) {
2472 pr_err("read_partial_message %p signature check failed\n", m);
2473 return -EBADMSG;
2474 }
2475
2476 return 1; /* done! */
2477 }
2478
2479 /*
2480 * Process message. This happens in the worker thread. The callback should
2481 * be careful not to do anything that waits on other incoming messages or it
2482 * may deadlock.
2483 */
2484 static void process_message(struct ceph_connection *con)
2485 {
2486 struct ceph_msg *msg = con->in_msg;
2487
2488 BUG_ON(con->in_msg->con != con);
2489 con->in_msg = NULL;
2490
2491 /* if first message, set peer_name */
2492 if (con->peer_name.type == 0)
2493 con->peer_name = msg->hdr.src;
2494
2495 con->in_seq++;
2496 mutex_unlock(&con->mutex);
2497
2498 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2499 msg, le64_to_cpu(msg->hdr.seq),
2500 ENTITY_NAME(msg->hdr.src),
2501 le16_to_cpu(msg->hdr.type),
2502 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2503 le32_to_cpu(msg->hdr.front_len),
2504 le32_to_cpu(msg->hdr.data_len),
2505 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2506 con->ops->dispatch(con, msg);
2507
2508 mutex_lock(&con->mutex);
2509 }
2510
2511 static int read_keepalive_ack(struct ceph_connection *con)
2512 {
2513 struct ceph_timespec ceph_ts;
2514 size_t size = sizeof(ceph_ts);
2515 int ret = read_partial(con, size, size, &ceph_ts);
2516 if (ret <= 0)
2517 return ret;
2518 ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts);
2519 prepare_read_tag(con);
2520 return 1;
2521 }
2522
2523 /*
2524 * Write something to the socket. Called in a worker thread when the
2525 * socket appears to be writeable and we have something ready to send.
2526 */
2527 static int try_write(struct ceph_connection *con)
2528 {
2529 int ret = 1;
2530
2531 dout("try_write start %p state %lu\n", con, con->state);
2532
2533 more:
2534 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2535
2536 /* open the socket first? */
2537 if (con->state == CON_STATE_PREOPEN) {
2538 BUG_ON(con->sock);
2539 con->state = CON_STATE_CONNECTING;
2540
2541 con_out_kvec_reset(con);
2542 prepare_write_banner(con);
2543 prepare_read_banner(con);
2544
2545 BUG_ON(con->in_msg);
2546 con->in_tag = CEPH_MSGR_TAG_READY;
2547 dout("try_write initiating connect on %p new state %lu\n",
2548 con, con->state);
2549 ret = ceph_tcp_connect(con);
2550 if (ret < 0) {
2551 con->error_msg = "connect error";
2552 goto out;
2553 }
2554 }
2555
2556 more_kvec:
2557 /* kvec data queued? */
2558 if (con->out_kvec_left) {
2559 ret = write_partial_kvec(con);
2560 if (ret <= 0)
2561 goto out;
2562 }
2563 if (con->out_skip) {
2564 ret = write_partial_skip(con);
2565 if (ret <= 0)
2566 goto out;
2567 }
2568
2569 /* msg pages? */
2570 if (con->out_msg) {
2571 if (con->out_msg_done) {
2572 ceph_msg_put(con->out_msg);
2573 con->out_msg = NULL; /* we're done with this one */
2574 goto do_next;
2575 }
2576
2577 ret = write_partial_message_data(con);
2578 if (ret == 1)
2579 goto more_kvec; /* we need to send the footer, too! */
2580 if (ret == 0)
2581 goto out;
2582 if (ret < 0) {
2583 dout("try_write write_partial_message_data err %d\n",
2584 ret);
2585 goto out;
2586 }
2587 }
2588
2589 do_next:
2590 if (con->state == CON_STATE_OPEN) {
2591 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2592 prepare_write_keepalive(con);
2593 goto more;
2594 }
2595 /* is anything else pending? */
2596 if (!list_empty(&con->out_queue)) {
2597 prepare_write_message(con);
2598 goto more;
2599 }
2600 if (con->in_seq > con->in_seq_acked) {
2601 prepare_write_ack(con);
2602 goto more;
2603 }
2604 }
2605
2606 /* Nothing to do! */
2607 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2608 dout("try_write nothing else to write.\n");
2609 ret = 0;
2610 out:
2611 dout("try_write done on %p ret %d\n", con, ret);
2612 return ret;
2613 }
2614
2615
2616
2617 /*
2618 * Read what we can from the socket.
2619 */
2620 static int try_read(struct ceph_connection *con)
2621 {
2622 int ret = -1;
2623
2624 more:
2625 dout("try_read start on %p state %lu\n", con, con->state);
2626 if (con->state != CON_STATE_CONNECTING &&
2627 con->state != CON_STATE_NEGOTIATING &&
2628 con->state != CON_STATE_OPEN)
2629 return 0;
2630
2631 BUG_ON(!con->sock);
2632
2633 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2634 con->in_base_pos);
2635
2636 if (con->state == CON_STATE_CONNECTING) {
2637 dout("try_read connecting\n");
2638 ret = read_partial_banner(con);
2639 if (ret <= 0)
2640 goto out;
2641 ret = process_banner(con);
2642 if (ret < 0)
2643 goto out;
2644
2645 con->state = CON_STATE_NEGOTIATING;
2646
2647 /*
2648 * Received banner is good, exchange connection info.
2649 * Do not reset out_kvec, as sending our banner raced
2650 * with receiving peer banner after connect completed.
2651 */
2652 ret = prepare_write_connect(con);
2653 if (ret < 0)
2654 goto out;
2655 prepare_read_connect(con);
2656
2657 /* Send connection info before awaiting response */
2658 goto out;
2659 }
2660
2661 if (con->state == CON_STATE_NEGOTIATING) {
2662 dout("try_read negotiating\n");
2663 ret = read_partial_connect(con);
2664 if (ret <= 0)
2665 goto out;
2666 ret = process_connect(con);
2667 if (ret < 0)
2668 goto out;
2669 goto more;
2670 }
2671
2672 WARN_ON(con->state != CON_STATE_OPEN);
2673
2674 if (con->in_base_pos < 0) {
2675 /*
2676 * skipping + discarding content.
2677 *
2678 * FIXME: there must be a better way to do this!
2679 */
2680 static char buf[SKIP_BUF_SIZE];
2681 int skip = min((int) sizeof (buf), -con->in_base_pos);
2682
2683 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2684 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2685 if (ret <= 0)
2686 goto out;
2687 con->in_base_pos += ret;
2688 if (con->in_base_pos)
2689 goto more;
2690 }
2691 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2692 /*
2693 * what's next?
2694 */
2695 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2696 if (ret <= 0)
2697 goto out;
2698 dout("try_read got tag %d\n", (int)con->in_tag);
2699 switch (con->in_tag) {
2700 case CEPH_MSGR_TAG_MSG:
2701 prepare_read_message(con);
2702 break;
2703 case CEPH_MSGR_TAG_ACK:
2704 prepare_read_ack(con);
2705 break;
2706 case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
2707 prepare_read_keepalive_ack(con);
2708 break;
2709 case CEPH_MSGR_TAG_CLOSE:
2710 con_close_socket(con);
2711 con->state = CON_STATE_CLOSED;
2712 goto out;
2713 default:
2714 goto bad_tag;
2715 }
2716 }
2717 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2718 ret = read_partial_message(con);
2719 if (ret <= 0) {
2720 switch (ret) {
2721 case -EBADMSG:
2722 con->error_msg = "bad crc/signature";
2723 /* fall through */
2724 case -EBADE:
2725 ret = -EIO;
2726 break;
2727 case -EIO:
2728 con->error_msg = "io error";
2729 break;
2730 }
2731 goto out;
2732 }
2733 if (con->in_tag == CEPH_MSGR_TAG_READY)
2734 goto more;
2735 process_message(con);
2736 if (con->state == CON_STATE_OPEN)
2737 prepare_read_tag(con);
2738 goto more;
2739 }
2740 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2741 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2742 /*
2743 * the final handshake seq exchange is semantically
2744 * equivalent to an ACK
2745 */
2746 ret = read_partial_ack(con);
2747 if (ret <= 0)
2748 goto out;
2749 process_ack(con);
2750 goto more;
2751 }
2752 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
2753 ret = read_keepalive_ack(con);
2754 if (ret <= 0)
2755 goto out;
2756 goto more;
2757 }
2758
2759 out:
2760 dout("try_read done on %p ret %d\n", con, ret);
2761 return ret;
2762
2763 bad_tag:
2764 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2765 con->error_msg = "protocol error, garbage tag";
2766 ret = -1;
2767 goto out;
2768 }
2769
2770
2771 /*
2772 * Atomically queue work on a connection after the specified delay.
2773 * Bump @con reference to avoid races with connection teardown.
2774 * Returns 0 if work was queued, or an error code otherwise.
2775 */
2776 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2777 {
2778 if (!con->ops->get(con)) {
2779 dout("%s %p ref count 0\n", __func__, con);
2780 return -ENOENT;
2781 }
2782
2783 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2784 dout("%s %p - already queued\n", __func__, con);
2785 con->ops->put(con);
2786 return -EBUSY;
2787 }
2788
2789 dout("%s %p %lu\n", __func__, con, delay);
2790 return 0;
2791 }
2792
2793 static void queue_con(struct ceph_connection *con)
2794 {
2795 (void) queue_con_delay(con, 0);
2796 }
2797
2798 static void cancel_con(struct ceph_connection *con)
2799 {
2800 if (cancel_delayed_work(&con->work)) {
2801 dout("%s %p\n", __func__, con);
2802 con->ops->put(con);
2803 }
2804 }
2805
2806 static bool con_sock_closed(struct ceph_connection *con)
2807 {
2808 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
2809 return false;
2810
2811 #define CASE(x) \
2812 case CON_STATE_ ## x: \
2813 con->error_msg = "socket closed (con state " #x ")"; \
2814 break;
2815
2816 switch (con->state) {
2817 CASE(CLOSED);
2818 CASE(PREOPEN);
2819 CASE(CONNECTING);
2820 CASE(NEGOTIATING);
2821 CASE(OPEN);
2822 CASE(STANDBY);
2823 default:
2824 pr_warn("%s con %p unrecognized state %lu\n",
2825 __func__, con, con->state);
2826 con->error_msg = "unrecognized con state";
2827 BUG();
2828 break;
2829 }
2830 #undef CASE
2831
2832 return true;
2833 }
2834
2835 static bool con_backoff(struct ceph_connection *con)
2836 {
2837 int ret;
2838
2839 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2840 return false;
2841
2842 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2843 if (ret) {
2844 dout("%s: con %p FAILED to back off %lu\n", __func__,
2845 con, con->delay);
2846 BUG_ON(ret == -ENOENT);
2847 con_flag_set(con, CON_FLAG_BACKOFF);
2848 }
2849
2850 return true;
2851 }
2852
2853 /* Finish fault handling; con->mutex must *not* be held here */
2854
2855 static void con_fault_finish(struct ceph_connection *con)
2856 {
2857 dout("%s %p\n", __func__, con);
2858
2859 /*
2860 * in case we faulted due to authentication, invalidate our
2861 * current tickets so that we can get new ones.
2862 */
2863 if (con->auth_retry) {
2864 dout("auth_retry %d, invalidating\n", con->auth_retry);
2865 if (con->ops->invalidate_authorizer)
2866 con->ops->invalidate_authorizer(con);
2867 con->auth_retry = 0;
2868 }
2869
2870 if (con->ops->fault)
2871 con->ops->fault(con);
2872 }
2873
2874 /*
2875 * Do some work on a connection. Drop a connection ref when we're done.
2876 */
2877 static void ceph_con_workfn(struct work_struct *work)
2878 {
2879 struct ceph_connection *con = container_of(work, struct ceph_connection,
2880 work.work);
2881 bool fault;
2882
2883 mutex_lock(&con->mutex);
2884 while (true) {
2885 int ret;
2886
2887 if ((fault = con_sock_closed(con))) {
2888 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2889 break;
2890 }
2891 if (con_backoff(con)) {
2892 dout("%s: con %p BACKOFF\n", __func__, con);
2893 break;
2894 }
2895 if (con->state == CON_STATE_STANDBY) {
2896 dout("%s: con %p STANDBY\n", __func__, con);
2897 break;
2898 }
2899 if (con->state == CON_STATE_CLOSED) {
2900 dout("%s: con %p CLOSED\n", __func__, con);
2901 BUG_ON(con->sock);
2902 break;
2903 }
2904 if (con->state == CON_STATE_PREOPEN) {
2905 dout("%s: con %p PREOPEN\n", __func__, con);
2906 BUG_ON(con->sock);
2907 }
2908
2909 ret = try_read(con);
2910 if (ret < 0) {
2911 if (ret == -EAGAIN)
2912 continue;
2913 if (!con->error_msg)
2914 con->error_msg = "socket error on read";
2915 fault = true;
2916 break;
2917 }
2918
2919 ret = try_write(con);
2920 if (ret < 0) {
2921 if (ret == -EAGAIN)
2922 continue;
2923 if (!con->error_msg)
2924 con->error_msg = "socket error on write";
2925 fault = true;
2926 }
2927
2928 break; /* If we make it to here, we're done */
2929 }
2930 if (fault)
2931 con_fault(con);
2932 mutex_unlock(&con->mutex);
2933
2934 if (fault)
2935 con_fault_finish(con);
2936
2937 con->ops->put(con);
2938 }
2939
2940 /*
2941 * Generic error/fault handler. A retry mechanism is used with
2942 * exponential backoff
2943 */
2944 static void con_fault(struct ceph_connection *con)
2945 {
2946 dout("fault %p state %lu to peer %s\n",
2947 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2948
2949 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2950 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2951 con->error_msg = NULL;
2952
2953 WARN_ON(con->state != CON_STATE_CONNECTING &&
2954 con->state != CON_STATE_NEGOTIATING &&
2955 con->state != CON_STATE_OPEN);
2956
2957 con_close_socket(con);
2958
2959 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
2960 dout("fault on LOSSYTX channel, marking CLOSED\n");
2961 con->state = CON_STATE_CLOSED;
2962 return;
2963 }
2964
2965 if (con->in_msg) {
2966 BUG_ON(con->in_msg->con != con);
2967 ceph_msg_put(con->in_msg);
2968 con->in_msg = NULL;
2969 }
2970
2971 /* Requeue anything that hasn't been acked */
2972 list_splice_init(&con->out_sent, &con->out_queue);
2973
2974 /* If there are no messages queued or keepalive pending, place
2975 * the connection in a STANDBY state */
2976 if (list_empty(&con->out_queue) &&
2977 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
2978 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2979 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2980 con->state = CON_STATE_STANDBY;
2981 } else {
2982 /* retry after a delay. */
2983 con->state = CON_STATE_PREOPEN;
2984 if (con->delay == 0)
2985 con->delay = BASE_DELAY_INTERVAL;
2986 else if (con->delay < MAX_DELAY_INTERVAL)
2987 con->delay *= 2;
2988 con_flag_set(con, CON_FLAG_BACKOFF);
2989 queue_con(con);
2990 }
2991 }
2992
2993
2994
2995 /*
2996 * initialize a new messenger instance
2997 */
2998 void ceph_messenger_init(struct ceph_messenger *msgr,
2999 struct ceph_entity_addr *myaddr)
3000 {
3001 spin_lock_init(&msgr->global_seq_lock);
3002
3003 if (myaddr)
3004 msgr->inst.addr = *myaddr;
3005
3006 /* select a random nonce */
3007 msgr->inst.addr.type = 0;
3008 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
3009 encode_my_addr(msgr);
3010
3011 atomic_set(&msgr->stopping, 0);
3012 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
3013
3014 dout("%s %p\n", __func__, msgr);
3015 }
3016 EXPORT_SYMBOL(ceph_messenger_init);
3017
3018 void ceph_messenger_fini(struct ceph_messenger *msgr)
3019 {
3020 put_net(read_pnet(&msgr->net));
3021 }
3022 EXPORT_SYMBOL(ceph_messenger_fini);
3023
3024 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
3025 {
3026 if (msg->con)
3027 msg->con->ops->put(msg->con);
3028
3029 msg->con = con ? con->ops->get(con) : NULL;
3030 BUG_ON(msg->con != con);
3031 }
3032
3033 static void clear_standby(struct ceph_connection *con)
3034 {
3035 /* come back from STANDBY? */
3036 if (con->state == CON_STATE_STANDBY) {
3037 dout("clear_standby %p and ++connect_seq\n", con);
3038 con->state = CON_STATE_PREOPEN;
3039 con->connect_seq++;
3040 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
3041 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
3042 }
3043 }
3044
3045 /*
3046 * Queue up an outgoing message on the given connection.
3047 */
3048 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
3049 {
3050 /* set src+dst */
3051 msg->hdr.src = con->msgr->inst.name;
3052 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
3053 msg->needs_out_seq = true;
3054
3055 mutex_lock(&con->mutex);
3056
3057 if (con->state == CON_STATE_CLOSED) {
3058 dout("con_send %p closed, dropping %p\n", con, msg);
3059 ceph_msg_put(msg);
3060 mutex_unlock(&con->mutex);
3061 return;
3062 }
3063
3064 msg_con_set(msg, con);
3065
3066 BUG_ON(!list_empty(&msg->list_head));
3067 list_add_tail(&msg->list_head, &con->out_queue);
3068 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
3069 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
3070 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
3071 le32_to_cpu(msg->hdr.front_len),
3072 le32_to_cpu(msg->hdr.middle_len),
3073 le32_to_cpu(msg->hdr.data_len));
3074
3075 clear_standby(con);
3076 mutex_unlock(&con->mutex);
3077
3078 /* if there wasn't anything waiting to send before, queue
3079 * new work */
3080 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3081 queue_con(con);
3082 }
3083 EXPORT_SYMBOL(ceph_con_send);
3084
3085 /*
3086 * Revoke a message that was previously queued for send
3087 */
3088 void ceph_msg_revoke(struct ceph_msg *msg)
3089 {
3090 struct ceph_connection *con = msg->con;
3091
3092 if (!con) {
3093 dout("%s msg %p null con\n", __func__, msg);
3094 return; /* Message not in our possession */
3095 }
3096
3097 mutex_lock(&con->mutex);
3098 if (!list_empty(&msg->list_head)) {
3099 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
3100 list_del_init(&msg->list_head);
3101 msg->hdr.seq = 0;
3102
3103 ceph_msg_put(msg);
3104 }
3105 if (con->out_msg == msg) {
3106 BUG_ON(con->out_skip);
3107 /* footer */
3108 if (con->out_msg_done) {
3109 con->out_skip += con_out_kvec_skip(con);
3110 } else {
3111 BUG_ON(!msg->data_length);
3112 con->out_skip += sizeof_footer(con);
3113 }
3114 /* data, middle, front */
3115 if (msg->data_length)
3116 con->out_skip += msg->cursor.total_resid;
3117 if (msg->middle)
3118 con->out_skip += con_out_kvec_skip(con);
3119 con->out_skip += con_out_kvec_skip(con);
3120
3121 dout("%s %p msg %p - was sending, will write %d skip %d\n",
3122 __func__, con, msg, con->out_kvec_bytes, con->out_skip);
3123 msg->hdr.seq = 0;
3124 con->out_msg = NULL;
3125 ceph_msg_put(msg);
3126 }
3127
3128 mutex_unlock(&con->mutex);
3129 }
3130
3131 /*
3132 * Revoke a message that we may be reading data into
3133 */
3134 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
3135 {
3136 struct ceph_connection *con = msg->con;
3137
3138 if (!con) {
3139 dout("%s msg %p null con\n", __func__, msg);
3140 return; /* Message not in our possession */
3141 }
3142
3143 mutex_lock(&con->mutex);
3144 if (con->in_msg == msg) {
3145 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3146 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3147 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
3148
3149 /* skip rest of message */
3150 dout("%s %p msg %p revoked\n", __func__, con, msg);
3151 con->in_base_pos = con->in_base_pos -
3152 sizeof(struct ceph_msg_header) -
3153 front_len -
3154 middle_len -
3155 data_len -
3156 sizeof(struct ceph_msg_footer);
3157 ceph_msg_put(con->in_msg);
3158 con->in_msg = NULL;
3159 con->in_tag = CEPH_MSGR_TAG_READY;
3160 con->in_seq++;
3161 } else {
3162 dout("%s %p in_msg %p msg %p no-op\n",
3163 __func__, con, con->in_msg, msg);
3164 }
3165 mutex_unlock(&con->mutex);
3166 }
3167
3168 /*
3169 * Queue a keepalive byte to ensure the tcp connection is alive.
3170 */
3171 void ceph_con_keepalive(struct ceph_connection *con)
3172 {
3173 dout("con_keepalive %p\n", con);
3174 mutex_lock(&con->mutex);
3175 clear_standby(con);
3176 mutex_unlock(&con->mutex);
3177 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
3178 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3179 queue_con(con);
3180 }
3181 EXPORT_SYMBOL(ceph_con_keepalive);
3182
3183 bool ceph_con_keepalive_expired(struct ceph_connection *con,
3184 unsigned long interval)
3185 {
3186 if (interval > 0 &&
3187 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
3188 struct timespec now;
3189 struct timespec ts;
3190 ktime_get_real_ts(&now);
3191 jiffies_to_timespec(interval, &ts);
3192 ts = timespec_add(con->last_keepalive_ack, ts);
3193 return timespec_compare(&now, &ts) >= 0;
3194 }
3195 return false;
3196 }
3197
3198 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
3199 {
3200 struct ceph_msg_data *data;
3201
3202 if (WARN_ON(!ceph_msg_data_type_valid(type)))
3203 return NULL;
3204
3205 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
3206 if (data)
3207 data->type = type;
3208 INIT_LIST_HEAD(&data->links);
3209
3210 return data;
3211 }
3212
3213 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3214 {
3215 if (!data)
3216 return;
3217
3218 WARN_ON(!list_empty(&data->links));
3219 if (data->type == CEPH_MSG_DATA_PAGELIST)
3220 ceph_pagelist_release(data->pagelist);
3221 kmem_cache_free(ceph_msg_data_cache, data);
3222 }
3223
3224 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
3225 size_t length, size_t alignment)
3226 {
3227 struct ceph_msg_data *data;
3228
3229 BUG_ON(!pages);
3230 BUG_ON(!length);
3231
3232 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
3233 BUG_ON(!data);
3234 data->pages = pages;
3235 data->length = length;
3236 data->alignment = alignment & ~PAGE_MASK;
3237
3238 list_add_tail(&data->links, &msg->data);
3239 msg->data_length += length;
3240 }
3241 EXPORT_SYMBOL(ceph_msg_data_add_pages);
3242
3243 void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
3244 struct ceph_pagelist *pagelist)
3245 {
3246 struct ceph_msg_data *data;
3247
3248 BUG_ON(!pagelist);
3249 BUG_ON(!pagelist->length);
3250
3251 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
3252 BUG_ON(!data);
3253 data->pagelist = pagelist;
3254
3255 list_add_tail(&data->links, &msg->data);
3256 msg->data_length += pagelist->length;
3257 }
3258 EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
3259
3260 #ifdef CONFIG_BLOCK
3261 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
3262 size_t length)
3263 {
3264 struct ceph_msg_data *data;
3265
3266 BUG_ON(!bio);
3267
3268 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
3269 BUG_ON(!data);
3270 data->bio = bio;
3271 data->bio_length = length;
3272
3273 list_add_tail(&data->links, &msg->data);
3274 msg->data_length += length;
3275 }
3276 EXPORT_SYMBOL(ceph_msg_data_add_bio);
3277 #endif /* CONFIG_BLOCK */
3278
3279 /*
3280 * construct a new message with given type, size
3281 * the new msg has a ref count of 1.
3282 */
3283 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3284 bool can_fail)
3285 {
3286 struct ceph_msg *m;
3287
3288 m = kmem_cache_zalloc(ceph_msg_cache, flags);
3289 if (m == NULL)
3290 goto out;
3291
3292 m->hdr.type = cpu_to_le16(type);
3293 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
3294 m->hdr.front_len = cpu_to_le32(front_len);
3295
3296 INIT_LIST_HEAD(&m->list_head);
3297 kref_init(&m->kref);
3298 INIT_LIST_HEAD(&m->data);
3299
3300 /* front */
3301 if (front_len) {
3302 m->front.iov_base = ceph_kvmalloc(front_len, flags);
3303 if (m->front.iov_base == NULL) {
3304 dout("ceph_msg_new can't allocate %d bytes\n",
3305 front_len);
3306 goto out2;
3307 }
3308 } else {
3309 m->front.iov_base = NULL;
3310 }
3311 m->front_alloc_len = m->front.iov_len = front_len;
3312
3313 dout("ceph_msg_new %p front %d\n", m, front_len);
3314 return m;
3315
3316 out2:
3317 ceph_msg_put(m);
3318 out:
3319 if (!can_fail) {
3320 pr_err("msg_new can't create type %d front %d\n", type,
3321 front_len);
3322 WARN_ON(1);
3323 } else {
3324 dout("msg_new can't create type %d front %d\n", type,
3325 front_len);
3326 }
3327 return NULL;
3328 }
3329 EXPORT_SYMBOL(ceph_msg_new);
3330
3331 /*
3332 * Allocate "middle" portion of a message, if it is needed and wasn't
3333 * allocated by alloc_msg. This allows us to read a small fixed-size
3334 * per-type header in the front and then gracefully fail (i.e.,
3335 * propagate the error to the caller based on info in the front) when
3336 * the middle is too large.
3337 */
3338 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
3339 {
3340 int type = le16_to_cpu(msg->hdr.type);
3341 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3342
3343 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3344 ceph_msg_type_name(type), middle_len);
3345 BUG_ON(!middle_len);
3346 BUG_ON(msg->middle);
3347
3348 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
3349 if (!msg->middle)
3350 return -ENOMEM;
3351 return 0;
3352 }
3353
3354 /*
3355 * Allocate a message for receiving an incoming message on a
3356 * connection, and save the result in con->in_msg. Uses the
3357 * connection's private alloc_msg op if available.
3358 *
3359 * Returns 0 on success, or a negative error code.
3360 *
3361 * On success, if we set *skip = 1:
3362 * - the next message should be skipped and ignored.
3363 * - con->in_msg == NULL
3364 * or if we set *skip = 0:
3365 * - con->in_msg is non-null.
3366 * On error (ENOMEM, EAGAIN, ...),
3367 * - con->in_msg == NULL
3368 */
3369 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3370 {
3371 struct ceph_msg_header *hdr = &con->in_hdr;
3372 int middle_len = le32_to_cpu(hdr->middle_len);
3373 struct ceph_msg *msg;
3374 int ret = 0;
3375
3376 BUG_ON(con->in_msg != NULL);
3377 BUG_ON(!con->ops->alloc_msg);
3378
3379 mutex_unlock(&con->mutex);
3380 msg = con->ops->alloc_msg(con, hdr, skip);
3381 mutex_lock(&con->mutex);
3382 if (con->state != CON_STATE_OPEN) {
3383 if (msg)
3384 ceph_msg_put(msg);
3385 return -EAGAIN;
3386 }
3387 if (msg) {
3388 BUG_ON(*skip);
3389 msg_con_set(msg, con);
3390 con->in_msg = msg;
3391 } else {
3392 /*
3393 * Null message pointer means either we should skip
3394 * this message or we couldn't allocate memory. The
3395 * former is not an error.
3396 */
3397 if (*skip)
3398 return 0;
3399
3400 con->error_msg = "error allocating memory for incoming message";
3401 return -ENOMEM;
3402 }
3403 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
3404
3405 if (middle_len && !con->in_msg->middle) {
3406 ret = ceph_alloc_middle(con, con->in_msg);
3407 if (ret < 0) {
3408 ceph_msg_put(con->in_msg);
3409 con->in_msg = NULL;
3410 }
3411 }
3412
3413 return ret;
3414 }
3415
3416
3417 /*
3418 * Free a generically kmalloc'd message.
3419 */
3420 static void ceph_msg_free(struct ceph_msg *m)
3421 {
3422 dout("%s %p\n", __func__, m);
3423 kvfree(m->front.iov_base);
3424 kmem_cache_free(ceph_msg_cache, m);
3425 }
3426
3427 static void ceph_msg_release(struct kref *kref)
3428 {
3429 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
3430 struct ceph_msg_data *data, *next;
3431
3432 dout("%s %p\n", __func__, m);
3433 WARN_ON(!list_empty(&m->list_head));
3434
3435 msg_con_set(m, NULL);
3436
3437 /* drop middle, data, if any */
3438 if (m->middle) {
3439 ceph_buffer_put(m->middle);
3440 m->middle = NULL;
3441 }
3442
3443 list_for_each_entry_safe(data, next, &m->data, links) {
3444 list_del_init(&data->links);
3445 ceph_msg_data_destroy(data);
3446 }
3447 m->data_length = 0;
3448
3449 if (m->pool)
3450 ceph_msgpool_put(m->pool, m);
3451 else
3452 ceph_msg_free(m);
3453 }
3454
3455 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
3456 {
3457 dout("%s %p (was %d)\n", __func__, msg,
3458 kref_read(&msg->kref));
3459 kref_get(&msg->kref);
3460 return msg;
3461 }
3462 EXPORT_SYMBOL(ceph_msg_get);
3463
3464 void ceph_msg_put(struct ceph_msg *msg)
3465 {
3466 dout("%s %p (was %d)\n", __func__, msg,
3467 kref_read(&msg->kref));
3468 kref_put(&msg->kref, ceph_msg_release);
3469 }
3470 EXPORT_SYMBOL(ceph_msg_put);
3471
3472 void ceph_msg_dump(struct ceph_msg *msg)
3473 {
3474 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3475 msg->front_alloc_len, msg->data_length);
3476 print_hex_dump(KERN_DEBUG, "header: ",
3477 DUMP_PREFIX_OFFSET, 16, 1,
3478 &msg->hdr, sizeof(msg->hdr), true);
3479 print_hex_dump(KERN_DEBUG, " front: ",
3480 DUMP_PREFIX_OFFSET, 16, 1,
3481 msg->front.iov_base, msg->front.iov_len, true);
3482 if (msg->middle)
3483 print_hex_dump(KERN_DEBUG, "middle: ",
3484 DUMP_PREFIX_OFFSET, 16, 1,
3485 msg->middle->vec.iov_base,
3486 msg->middle->vec.iov_len, true);
3487 print_hex_dump(KERN_DEBUG, "footer: ",
3488 DUMP_PREFIX_OFFSET, 16, 1,
3489 &msg->footer, sizeof(msg->footer), true);
3490 }
3491 EXPORT_SYMBOL(ceph_msg_dump);