]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ceph/messenger.c
HID: uhid: forbid UHID_CREATE under KERNEL_DS or elevated privileges
[mirror_ubuntu-bionic-kernel.git] / net / ceph / messenger.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
31b8006e
SW
3
4#include <linux/crc32c.h>
5#include <linux/ctype.h>
6#include <linux/highmem.h>
7#include <linux/inet.h>
8#include <linux/kthread.h>
9#include <linux/net.h>
757856d2 10#include <linux/nsproxy.h>
633ee407 11#include <linux/sched/mm.h>
5a0e3ad6 12#include <linux/slab.h>
31b8006e
SW
13#include <linux/socket.h>
14#include <linux/string.h>
3ebc21f7 15#ifdef CONFIG_BLOCK
68b4476b 16#include <linux/bio.h>
3ebc21f7 17#endif /* CONFIG_BLOCK */
ee3b56f2 18#include <linux/dns_resolver.h>
31b8006e
SW
19#include <net/tcp.h>
20
2b3e0c90 21#include <linux/ceph/ceph_features.h>
3d14c5d2
YS
22#include <linux/ceph/libceph.h>
23#include <linux/ceph/messenger.h>
24#include <linux/ceph/decode.h>
25#include <linux/ceph/pagelist.h>
bc3b2d7f 26#include <linux/export.h>
31b8006e
SW
27
28/*
29 * Ceph uses the messenger to exchange ceph_msg messages with other
30 * hosts in the system. The messenger provides ordered and reliable
31 * delivery. We tolerate TCP disconnects by reconnecting (with
32 * exponential backoff) in the case of a fault (disconnection, bad
33 * crc, protocol error). Acks allow sent messages to be discarded by
34 * the sender.
35 */
36
bc18f4b1
AE
37/*
38 * We track the state of the socket on a given connection using
39 * values defined below. The transition to a new socket state is
40 * handled by a function which verifies we aren't coming from an
41 * unexpected state.
42 *
43 * --------
44 * | NEW* | transient initial state
45 * --------
46 * | con_sock_state_init()
47 * v
48 * ----------
49 * | CLOSED | initialized, but no socket (and no
50 * ---------- TCP connection)
51 * ^ \
52 * | \ con_sock_state_connecting()
53 * | ----------------------
54 * | \
55 * + con_sock_state_closed() \
fbb85a47
SW
56 * |+--------------------------- \
57 * | \ \ \
58 * | ----------- \ \
59 * | | CLOSING | socket event; \ \
60 * | ----------- await close \ \
61 * | ^ \ |
62 * | | \ |
63 * | + con_sock_state_closing() \ |
64 * | / \ | |
65 * | / --------------- | |
66 * | / \ v v
bc18f4b1
AE
67 * | / --------------
68 * | / -----------------| CONNECTING | socket created, TCP
69 * | | / -------------- connect initiated
70 * | | | con_sock_state_connected()
71 * | | v
72 * -------------
73 * | CONNECTED | TCP connection established
74 * -------------
75 *
76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
77 */
ce2c8903
AE
78
79#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
80#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
81#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
82#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
83#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
84
8dacc7da
SW
85/*
86 * connection states
87 */
88#define CON_STATE_CLOSED 1 /* -> PREOPEN */
89#define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
90#define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
91#define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
92#define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
93#define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
94
4a861692
SW
95/*
96 * ceph_connection flag bits
97 */
98#define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
99 * messages on errors */
100#define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
101#define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
102#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
103#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
8dacc7da 104
c9ffc77a
AE
105static bool con_flag_valid(unsigned long con_flag)
106{
107 switch (con_flag) {
108 case CON_FLAG_LOSSYTX:
109 case CON_FLAG_KEEPALIVE_PENDING:
110 case CON_FLAG_WRITE_PENDING:
111 case CON_FLAG_SOCK_CLOSED:
112 case CON_FLAG_BACKOFF:
113 return true;
114 default:
115 return false;
116 }
117}
118
119static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
120{
121 BUG_ON(!con_flag_valid(con_flag));
122
123 clear_bit(con_flag, &con->flags);
124}
125
126static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
127{
128 BUG_ON(!con_flag_valid(con_flag));
129
130 set_bit(con_flag, &con->flags);
131}
132
133static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
134{
135 BUG_ON(!con_flag_valid(con_flag));
136
137 return test_bit(con_flag, &con->flags);
138}
139
140static bool con_flag_test_and_clear(struct ceph_connection *con,
141 unsigned long con_flag)
142{
143 BUG_ON(!con_flag_valid(con_flag));
144
145 return test_and_clear_bit(con_flag, &con->flags);
146}
147
148static bool con_flag_test_and_set(struct ceph_connection *con,
149 unsigned long con_flag)
150{
151 BUG_ON(!con_flag_valid(con_flag));
152
153 return test_and_set_bit(con_flag, &con->flags);
154}
155
e3d5d638
AE
156/* Slab caches for frequently-allocated structures */
157
158static struct kmem_cache *ceph_msg_cache;
81b36be4 159static struct kmem_cache *ceph_msg_data_cache;
e3d5d638 160
31b8006e
SW
161/* static tag bytes (protocol control messages) */
162static char tag_msg = CEPH_MSGR_TAG_MSG;
163static char tag_ack = CEPH_MSGR_TAG_ACK;
164static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
8b9558aa 165static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
31b8006e 166
a6a5349d
SW
167#ifdef CONFIG_LOCKDEP
168static struct lock_class_key socket_class;
169#endif
170
84495f49
AE
171/*
172 * When skipping (ignoring) a block of input we read it into a "skip
173 * buffer," which is this many bytes in size.
174 */
175#define SKIP_BUF_SIZE 1024
31b8006e
SW
176
177static void queue_con(struct ceph_connection *con);
37ab77ac 178static void cancel_con(struct ceph_connection *con);
68931622 179static void ceph_con_workfn(struct work_struct *);
93209264 180static void con_fault(struct ceph_connection *con);
31b8006e 181
31b8006e 182/*
f64a9317
AE
183 * Nicely render a sockaddr as a string. An array of formatted
184 * strings is used, to approximate reentrancy.
31b8006e 185 */
f64a9317
AE
186#define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
187#define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
188#define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
189#define MAX_ADDR_STR_LEN 64 /* 54 is enough */
190
191static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
192static atomic_t addr_str_seq = ATOMIC_INIT(0);
31b8006e 193
57666519 194static struct page *zero_page; /* used in certain error cases */
57666519 195
3d14c5d2 196const char *ceph_pr_addr(const struct sockaddr_storage *ss)
31b8006e
SW
197{
198 int i;
199 char *s;
99f0f3b2
AE
200 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
201 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
31b8006e 202
f64a9317 203 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
31b8006e
SW
204 s = addr_str[i];
205
206 switch (ss->ss_family) {
207 case AF_INET:
bd406145
AE
208 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
209 ntohs(in4->sin_port));
31b8006e
SW
210 break;
211
212 case AF_INET6:
bd406145
AE
213 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
214 ntohs(in6->sin6_port));
31b8006e
SW
215 break;
216
217 default:
d3002b97
AE
218 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
219 ss->ss_family);
31b8006e
SW
220 }
221
222 return s;
223}
3d14c5d2 224EXPORT_SYMBOL(ceph_pr_addr);
31b8006e 225
63f2d211
SW
226static void encode_my_addr(struct ceph_messenger *msgr)
227{
228 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
229 ceph_encode_addr(&msgr->my_enc_addr);
230}
231
31b8006e
SW
232/*
233 * work queue for all reading and writing to/from the socket.
234 */
e0f43c94 235static struct workqueue_struct *ceph_msgr_wq;
31b8006e 236
e3d5d638
AE
237static int ceph_msgr_slab_init(void)
238{
239 BUG_ON(ceph_msg_cache);
5ee61e95 240 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
81b36be4
AE
241 if (!ceph_msg_cache)
242 return -ENOMEM;
243
244 BUG_ON(ceph_msg_data_cache);
5ee61e95 245 ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0);
81b36be4
AE
246 if (ceph_msg_data_cache)
247 return 0;
248
249 kmem_cache_destroy(ceph_msg_cache);
250 ceph_msg_cache = NULL;
251
252 return -ENOMEM;
e3d5d638
AE
253}
254
255static void ceph_msgr_slab_exit(void)
256{
81b36be4
AE
257 BUG_ON(!ceph_msg_data_cache);
258 kmem_cache_destroy(ceph_msg_data_cache);
259 ceph_msg_data_cache = NULL;
260
e3d5d638
AE
261 BUG_ON(!ceph_msg_cache);
262 kmem_cache_destroy(ceph_msg_cache);
263 ceph_msg_cache = NULL;
264}
265
15417167 266static void _ceph_msgr_exit(void)
6173d1f0 267{
d3002b97 268 if (ceph_msgr_wq) {
6173d1f0 269 destroy_workqueue(ceph_msgr_wq);
d3002b97
AE
270 ceph_msgr_wq = NULL;
271 }
6173d1f0 272
6173d1f0 273 BUG_ON(zero_page == NULL);
09cbfeaf 274 put_page(zero_page);
6173d1f0 275 zero_page = NULL;
d920ff6f
BC
276
277 ceph_msgr_slab_exit();
6173d1f0
AE
278}
279
3d14c5d2 280int ceph_msgr_init(void)
31b8006e 281{
d920ff6f
BC
282 if (ceph_msgr_slab_init())
283 return -ENOMEM;
284
57666519
AE
285 BUG_ON(zero_page != NULL);
286 zero_page = ZERO_PAGE(0);
09cbfeaf 287 get_page(zero_page);
57666519 288
f9865f06
ID
289 /*
290 * The number of active work items is limited by the number of
291 * connections, so leave @max_active at default.
292 */
293 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
6173d1f0
AE
294 if (ceph_msgr_wq)
295 return 0;
57666519 296
6173d1f0
AE
297 pr_err("msgr_init failed to create workqueue\n");
298 _ceph_msgr_exit();
57666519 299
6173d1f0 300 return -ENOMEM;
31b8006e 301}
3d14c5d2 302EXPORT_SYMBOL(ceph_msgr_init);
31b8006e
SW
303
304void ceph_msgr_exit(void)
305{
57666519 306 BUG_ON(ceph_msgr_wq == NULL);
57666519 307
6173d1f0 308 _ceph_msgr_exit();
31b8006e 309}
3d14c5d2 310EXPORT_SYMBOL(ceph_msgr_exit);
31b8006e 311
cd84db6e 312void ceph_msgr_flush(void)
a922d38f
SW
313{
314 flush_workqueue(ceph_msgr_wq);
315}
3d14c5d2 316EXPORT_SYMBOL(ceph_msgr_flush);
a922d38f 317
ce2c8903
AE
318/* Connection socket state transition functions */
319
320static void con_sock_state_init(struct ceph_connection *con)
321{
322 int old_state;
323
324 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
325 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
326 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
327 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
328 CON_SOCK_STATE_CLOSED);
ce2c8903
AE
329}
330
331static void con_sock_state_connecting(struct ceph_connection *con)
332{
333 int old_state;
334
335 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
336 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
337 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
338 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
339 CON_SOCK_STATE_CONNECTING);
ce2c8903
AE
340}
341
342static void con_sock_state_connected(struct ceph_connection *con)
343{
344 int old_state;
345
346 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
347 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
348 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
349 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
350 CON_SOCK_STATE_CONNECTED);
ce2c8903
AE
351}
352
353static void con_sock_state_closing(struct ceph_connection *con)
354{
355 int old_state;
356
357 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
358 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
359 old_state != CON_SOCK_STATE_CONNECTED &&
360 old_state != CON_SOCK_STATE_CLOSING))
361 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
362 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
363 CON_SOCK_STATE_CLOSING);
ce2c8903
AE
364}
365
366static void con_sock_state_closed(struct ceph_connection *con)
367{
368 int old_state;
369
370 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
371 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
fbb85a47 372 old_state != CON_SOCK_STATE_CLOSING &&
8007b8d6
SW
373 old_state != CON_SOCK_STATE_CONNECTING &&
374 old_state != CON_SOCK_STATE_CLOSED))
ce2c8903 375 printk("%s: unexpected old state %d\n", __func__, old_state);
8007b8d6
SW
376 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
377 CON_SOCK_STATE_CLOSED);
ce2c8903 378}
a922d38f 379
31b8006e
SW
380/*
381 * socket callback functions
382 */
383
384/* data available on socket, or listen socket received a connect */
676d2369 385static void ceph_sock_data_ready(struct sock *sk)
31b8006e 386{
bd406145 387 struct ceph_connection *con = sk->sk_user_data;
a2a32584
GH
388 if (atomic_read(&con->msgr->stopping)) {
389 return;
390 }
bd406145 391
31b8006e 392 if (sk->sk_state != TCP_CLOSE_WAIT) {
327800bd 393 dout("%s on %p state = %lu, queueing work\n", __func__,
31b8006e
SW
394 con, con->state);
395 queue_con(con);
396 }
397}
398
399/* socket has buffer space for writing */
327800bd 400static void ceph_sock_write_space(struct sock *sk)
31b8006e 401{
d3002b97 402 struct ceph_connection *con = sk->sk_user_data;
31b8006e 403
182fac26
JS
404 /* only queue to workqueue if there is data we want to write,
405 * and there is sufficient space in the socket buffer to accept
327800bd 406 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
182fac26
JS
407 * doesn't get called again until try_write() fills the socket
408 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
409 * and net/core/stream.c:sk_stream_write_space().
410 */
c9ffc77a 411 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
64dc6130 412 if (sk_stream_is_writeable(sk)) {
327800bd 413 dout("%s %p queueing write work\n", __func__, con);
182fac26
JS
414 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
415 queue_con(con);
416 }
31b8006e 417 } else {
327800bd 418 dout("%s %p nothing to write\n", __func__, con);
31b8006e 419 }
31b8006e
SW
420}
421
422/* socket's state has changed */
327800bd 423static void ceph_sock_state_change(struct sock *sk)
31b8006e 424{
bd406145 425 struct ceph_connection *con = sk->sk_user_data;
31b8006e 426
327800bd 427 dout("%s %p state = %lu sk_state = %u\n", __func__,
31b8006e
SW
428 con, con->state, sk->sk_state);
429
31b8006e
SW
430 switch (sk->sk_state) {
431 case TCP_CLOSE:
327800bd 432 dout("%s TCP_CLOSE\n", __func__);
18370b36 433 /* fall through */
31b8006e 434 case TCP_CLOSE_WAIT:
327800bd 435 dout("%s TCP_CLOSE_WAIT\n", __func__);
ce2c8903 436 con_sock_state_closing(con);
c9ffc77a 437 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
d65c9e0b 438 queue_con(con);
31b8006e
SW
439 break;
440 case TCP_ESTABLISHED:
327800bd 441 dout("%s TCP_ESTABLISHED\n", __func__);
ce2c8903 442 con_sock_state_connected(con);
31b8006e
SW
443 queue_con(con);
444 break;
d3002b97
AE
445 default: /* Everything else is uninteresting */
446 break;
31b8006e
SW
447 }
448}
449
450/*
451 * set up socket callbacks
452 */
453static void set_sock_callbacks(struct socket *sock,
454 struct ceph_connection *con)
455{
456 struct sock *sk = sock->sk;
bd406145 457 sk->sk_user_data = con;
327800bd
AE
458 sk->sk_data_ready = ceph_sock_data_ready;
459 sk->sk_write_space = ceph_sock_write_space;
460 sk->sk_state_change = ceph_sock_state_change;
31b8006e
SW
461}
462
463
464/*
465 * socket helpers
466 */
467
468/*
469 * initiate connection to a remote socket.
470 */
41617d0c 471static int ceph_tcp_connect(struct ceph_connection *con)
31b8006e 472{
f91d3471 473 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
31b8006e 474 struct socket *sock;
633ee407 475 unsigned int noio_flag;
31b8006e
SW
476 int ret;
477
478 BUG_ON(con->sock);
633ee407
ID
479
480 /* sock_create_kern() allocates with GFP_KERNEL */
481 noio_flag = memalloc_noio_save();
757856d2 482 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
eeb1bd5c 483 SOCK_STREAM, IPPROTO_TCP, &sock);
633ee407 484 memalloc_noio_restore(noio_flag);
31b8006e 485 if (ret)
41617d0c 486 return ret;
6d7fdb0a 487 sock->sk->sk_allocation = GFP_NOFS;
31b8006e 488
a6a5349d
SW
489#ifdef CONFIG_LOCKDEP
490 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
491#endif
492
31b8006e
SW
493 set_sock_callbacks(sock, con);
494
3d14c5d2 495 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
31b8006e 496
89a86be0 497 con_sock_state_connecting(con);
f91d3471
SW
498 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
499 O_NONBLOCK);
31b8006e
SW
500 if (ret == -EINPROGRESS) {
501 dout("connect %s EINPROGRESS sk_state = %u\n",
3d14c5d2 502 ceph_pr_addr(&con->peer_addr.in_addr),
31b8006e 503 sock->sk->sk_state);
a5bc3129 504 } else if (ret < 0) {
31b8006e 505 pr_err("connect %s error %d\n",
3d14c5d2 506 ceph_pr_addr(&con->peer_addr.in_addr), ret);
31b8006e 507 sock_release(sock);
41617d0c 508 return ret;
a5bc3129 509 }
89baaa57 510
859bff51 511 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) {
ba988f87
CH
512 int optval = 1;
513
514 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
515 (char *)&optval, sizeof(optval));
516 if (ret)
517 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
518 ret);
519 }
520
a5bc3129 521 con->sock = sock;
41617d0c 522 return 0;
31b8006e
SW
523}
524
525static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
526{
527 struct kvec iov = {buf, len};
528 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
98bdb0aa 529 int r;
31b8006e 530
100803a8
AV
531 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len);
532 r = sock_recvmsg(sock, &msg, msg.msg_flags);
98bdb0aa
SW
533 if (r == -EAGAIN)
534 r = 0;
535 return r;
31b8006e
SW
536}
537
afb3d90e
AE
538static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
539 int page_offset, size_t length)
540{
100803a8
AV
541 struct bio_vec bvec = {
542 .bv_page = page,
543 .bv_offset = page_offset,
544 .bv_len = length
545 };
546 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
547 int r;
afb3d90e
AE
548
549 BUG_ON(page_offset + length > PAGE_SIZE);
100803a8
AV
550 iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length);
551 r = sock_recvmsg(sock, &msg, msg.msg_flags);
552 if (r == -EAGAIN)
553 r = 0;
554 return r;
afb3d90e
AE
555}
556
31b8006e
SW
557/*
558 * write something. @more is true if caller will be sending more data
559 * shortly.
560 */
561static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
562 size_t kvlen, size_t len, int more)
563{
564 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
42961d23 565 int r;
31b8006e
SW
566
567 if (more)
568 msg.msg_flags |= MSG_MORE;
569 else
570 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
571
42961d23
SW
572 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
573 if (r == -EAGAIN)
574 r = 0;
575 return r;
31b8006e
SW
576}
577
178eda29 578static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
e1dcb128 579 int offset, size_t size, bool more)
31739139
AE
580{
581 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
582 int ret;
583
584 ret = kernel_sendpage(sock, page, offset, size, flags);
585 if (ret == -EAGAIN)
586 ret = 0;
587
588 return ret;
589}
590
178eda29
CC
591static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
592 int offset, size_t size, bool more)
593{
61ff6e9b
AV
594 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
595 struct bio_vec bvec;
178eda29 596 int ret;
178eda29
CC
597
598 /* sendpage cannot properly handle pages with page_count == 0,
599 * we need to fallback to sendmsg if that's the case */
600 if (page_count(page) >= 1)
601 return __ceph_tcp_sendpage(sock, page, offset, size, more);
602
61ff6e9b
AV
603 bvec.bv_page = page;
604 bvec.bv_offset = offset;
605 bvec.bv_len = size;
606
607 if (more)
608 msg.msg_flags |= MSG_MORE;
609 else
610 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
611
612 iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size);
613 ret = sock_sendmsg(sock, &msg);
614 if (ret == -EAGAIN)
615 ret = 0;
178eda29
CC
616
617 return ret;
618}
31b8006e
SW
619
620/*
621 * Shutdown/close the socket for the given connection.
622 */
623static int con_close_socket(struct ceph_connection *con)
624{
8007b8d6 625 int rc = 0;
31b8006e
SW
626
627 dout("con_close_socket on %p sock %p\n", con, con->sock);
8007b8d6
SW
628 if (con->sock) {
629 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
630 sock_release(con->sock);
631 con->sock = NULL;
632 }
456ea468
AE
633
634 /*
4a861692 635 * Forcibly clear the SOCK_CLOSED flag. It gets set
456ea468
AE
636 * independent of the connection mutex, and we could have
637 * received a socket close event before we had the chance to
638 * shut the socket down.
639 */
c9ffc77a 640 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
8007b8d6 641
ce2c8903 642 con_sock_state_closed(con);
31b8006e
SW
643 return rc;
644}
645
646/*
647 * Reset a connection. Discard all incoming and outgoing messages
648 * and clear *_seq state.
649 */
650static void ceph_msg_remove(struct ceph_msg *msg)
651{
652 list_del_init(&msg->list_head);
38941f80 653
31b8006e
SW
654 ceph_msg_put(msg);
655}
656static void ceph_msg_remove_list(struct list_head *head)
657{
658 while (!list_empty(head)) {
659 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
660 list_head);
661 ceph_msg_remove(msg);
662 }
663}
664
665static void reset_connection(struct ceph_connection *con)
666{
667 /* reset connection, out_queue, msg_ and connect_seq */
668 /* discard existing out_queue and msg_seq */
0fa6ebc6 669 dout("reset_connection %p\n", con);
31b8006e
SW
670 ceph_msg_remove_list(&con->out_queue);
671 ceph_msg_remove_list(&con->out_sent);
672
cf3e5c40 673 if (con->in_msg) {
38941f80 674 BUG_ON(con->in_msg->con != con);
cf3e5c40
SW
675 ceph_msg_put(con->in_msg);
676 con->in_msg = NULL;
677 }
678
31b8006e
SW
679 con->connect_seq = 0;
680 con->out_seq = 0;
c86a2930 681 if (con->out_msg) {
583d0fef 682 BUG_ON(con->out_msg->con != con);
c86a2930
SW
683 ceph_msg_put(con->out_msg);
684 con->out_msg = NULL;
685 }
31b8006e 686 con->in_seq = 0;
0e0d5e0c 687 con->in_seq_acked = 0;
67645d76
ID
688
689 con->out_skip = 0;
31b8006e
SW
690}
691
692/*
693 * mark a peer down. drop any open connections.
694 */
695void ceph_con_close(struct ceph_connection *con)
696{
8c50c817 697 mutex_lock(&con->mutex);
3d14c5d2
YS
698 dout("con_close %p peer %s\n", con,
699 ceph_pr_addr(&con->peer_addr.in_addr));
8dacc7da 700 con->state = CON_STATE_CLOSED;
a5988c49 701
c9ffc77a
AE
702 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
703 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
704 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
705 con_flag_clear(con, CON_FLAG_BACKOFF);
a5988c49 706
31b8006e 707 reset_connection(con);
6f2bc3ff 708 con->peer_global_seq = 0;
37ab77ac 709 cancel_con(con);
ee76e073 710 con_close_socket(con);
ec302645 711 mutex_unlock(&con->mutex);
31b8006e 712}
3d14c5d2 713EXPORT_SYMBOL(ceph_con_close);
31b8006e 714
31b8006e
SW
715/*
716 * Reopen a closed connection, with a new peer address.
717 */
b7a9e5dd
SW
718void ceph_con_open(struct ceph_connection *con,
719 __u8 entity_type, __u64 entity_num,
720 struct ceph_entity_addr *addr)
31b8006e 721{
5469155f 722 mutex_lock(&con->mutex);
3d14c5d2 723 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
8dacc7da 724
122070a2 725 WARN_ON(con->state != CON_STATE_CLOSED);
8dacc7da 726 con->state = CON_STATE_PREOPEN;
a5988c49 727
b7a9e5dd
SW
728 con->peer_name.type = (__u8) entity_type;
729 con->peer_name.num = cpu_to_le64(entity_num);
730
31b8006e 731 memcpy(&con->peer_addr, addr, sizeof(*addr));
03c677e1 732 con->delay = 0; /* reset backoff memory */
5469155f 733 mutex_unlock(&con->mutex);
31b8006e
SW
734 queue_con(con);
735}
3d14c5d2 736EXPORT_SYMBOL(ceph_con_open);
31b8006e 737
87b315a5
SW
738/*
739 * return true if this connection ever successfully opened
740 */
741bool ceph_con_opened(struct ceph_connection *con)
742{
743 return con->connect_seq > 0;
744}
745
31b8006e
SW
746/*
747 * initialize a new connection.
748 */
1bfd89f4
AE
749void ceph_con_init(struct ceph_connection *con, void *private,
750 const struct ceph_connection_operations *ops,
b7a9e5dd 751 struct ceph_messenger *msgr)
31b8006e
SW
752{
753 dout("con_init %p\n", con);
754 memset(con, 0, sizeof(*con));
1bfd89f4
AE
755 con->private = private;
756 con->ops = ops;
31b8006e 757 con->msgr = msgr;
ce2c8903
AE
758
759 con_sock_state_init(con);
760
ec302645 761 mutex_init(&con->mutex);
31b8006e
SW
762 INIT_LIST_HEAD(&con->out_queue);
763 INIT_LIST_HEAD(&con->out_sent);
68931622 764 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
a5988c49 765
8dacc7da 766 con->state = CON_STATE_CLOSED;
31b8006e 767}
3d14c5d2 768EXPORT_SYMBOL(ceph_con_init);
31b8006e
SW
769
770
771/*
772 * We maintain a global counter to order connection attempts. Get
773 * a unique seq greater than @gt.
774 */
775static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
776{
777 u32 ret;
778
779 spin_lock(&msgr->global_seq_lock);
780 if (msgr->global_seq < gt)
781 msgr->global_seq = gt;
782 ret = ++msgr->global_seq;
783 spin_unlock(&msgr->global_seq_lock);
784 return ret;
785}
786
e2200423 787static void con_out_kvec_reset(struct ceph_connection *con)
859eb799 788{
67645d76
ID
789 BUG_ON(con->out_skip);
790
859eb799
AE
791 con->out_kvec_left = 0;
792 con->out_kvec_bytes = 0;
793 con->out_kvec_cur = &con->out_kvec[0];
794}
795
e2200423 796static void con_out_kvec_add(struct ceph_connection *con,
859eb799
AE
797 size_t size, void *data)
798{
67645d76 799 int index = con->out_kvec_left;
859eb799 800
67645d76 801 BUG_ON(con->out_skip);
859eb799
AE
802 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
803
804 con->out_kvec[index].iov_len = size;
805 con->out_kvec[index].iov_base = data;
806 con->out_kvec_left++;
807 con->out_kvec_bytes += size;
808}
31b8006e 809
67645d76
ID
810/*
811 * Chop off a kvec from the end. Return residual number of bytes for
812 * that kvec, i.e. how many bytes would have been written if the kvec
813 * hadn't been nuked.
814 */
815static int con_out_kvec_skip(struct ceph_connection *con)
816{
817 int off = con->out_kvec_cur - con->out_kvec;
818 int skip = 0;
819
820 if (con->out_kvec_bytes > 0) {
821 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
822 BUG_ON(con->out_kvec_bytes < skip);
823 BUG_ON(!con->out_kvec_left);
824 con->out_kvec_bytes -= skip;
825 con->out_kvec_left--;
826 }
827
828 return skip;
829}
830
df6ad1f9 831#ifdef CONFIG_BLOCK
6aaa4511
AE
832
833/*
834 * For a bio data item, a piece is whatever remains of the next
835 * entry in the current bio iovec, or the first entry in the next
836 * bio in the list.
837 */
8ae4f4f5 838static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
25aff7c5 839 size_t length)
6aaa4511 840{
8ae4f4f5 841 struct ceph_msg_data *data = cursor->data;
6aaa4511
AE
842 struct bio *bio;
843
844 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
845
846 bio = data->bio;
847 BUG_ON(!bio);
6aaa4511 848
ca8b3a69 849 cursor->resid = min(length, data->bio_length);
6aaa4511 850 cursor->bio = bio;
f38a5181
KO
851 cursor->bvec_iter = bio->bi_iter;
852 cursor->last_piece =
853 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
6aaa4511
AE
854}
855
8ae4f4f5 856static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
6aaa4511
AE
857 size_t *page_offset,
858 size_t *length)
859{
8ae4f4f5 860 struct ceph_msg_data *data = cursor->data;
6aaa4511 861 struct bio *bio;
f38a5181 862 struct bio_vec bio_vec;
6aaa4511
AE
863
864 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
865
866 bio = cursor->bio;
867 BUG_ON(!bio);
868
f38a5181 869 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
6aaa4511 870
f38a5181 871 *page_offset = (size_t) bio_vec.bv_offset;
6aaa4511 872 BUG_ON(*page_offset >= PAGE_SIZE);
25aff7c5
AE
873 if (cursor->last_piece) /* pagelist offset is always 0 */
874 *length = cursor->resid;
875 else
f38a5181 876 *length = (size_t) bio_vec.bv_len;
25aff7c5 877 BUG_ON(*length > cursor->resid);
5df521b1 878 BUG_ON(*page_offset + *length > PAGE_SIZE);
6aaa4511 879
f38a5181 880 return bio_vec.bv_page;
6aaa4511
AE
881}
882
8ae4f4f5
AE
883static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
884 size_t bytes)
6aaa4511 885{
6aaa4511 886 struct bio *bio;
f38a5181 887 struct bio_vec bio_vec;
6aaa4511 888
8ae4f4f5 889 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
6aaa4511
AE
890
891 bio = cursor->bio;
892 BUG_ON(!bio);
893
f38a5181 894 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
6aaa4511
AE
895
896 /* Advance the cursor offset */
897
25aff7c5
AE
898 BUG_ON(cursor->resid < bytes);
899 cursor->resid -= bytes;
f38a5181
KO
900
901 bio_advance_iter(bio, &cursor->bvec_iter, bytes);
902
903 if (bytes < bio_vec.bv_len)
6aaa4511
AE
904 return false; /* more bytes to process in this segment */
905
906 /* Move on to the next segment, and possibly the next bio */
907
f38a5181 908 if (!cursor->bvec_iter.bi_size) {
6aaa4511 909 bio = bio->bi_next;
0ec1d15e
ID
910 cursor->bio = bio;
911 if (bio)
912 cursor->bvec_iter = bio->bi_iter;
913 else
914 memset(&cursor->bvec_iter, 0,
915 sizeof(cursor->bvec_iter));
6aaa4511 916 }
6aaa4511 917
25aff7c5
AE
918 if (!cursor->last_piece) {
919 BUG_ON(!cursor->resid);
920 BUG_ON(!bio);
921 /* A short read is OK, so use <= rather than == */
f38a5181 922 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
6aaa4511 923 cursor->last_piece = true;
25aff7c5 924 }
6aaa4511
AE
925
926 return true;
927}
ea96571f 928#endif /* CONFIG_BLOCK */
df6ad1f9 929
e766d7b5
AE
930/*
931 * For a page array, a piece comes from the first page in the array
932 * that has not already been fully consumed.
933 */
8ae4f4f5 934static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
25aff7c5 935 size_t length)
e766d7b5 936{
8ae4f4f5 937 struct ceph_msg_data *data = cursor->data;
e766d7b5
AE
938 int page_count;
939
940 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
941
942 BUG_ON(!data->pages);
943 BUG_ON(!data->length);
944
ca8b3a69 945 cursor->resid = min(length, data->length);
e766d7b5 946 page_count = calc_pages_for(data->alignment, (u64)data->length);
e766d7b5
AE
947 cursor->page_offset = data->alignment & ~PAGE_MASK;
948 cursor->page_index = 0;
56fc5659
AE
949 BUG_ON(page_count > (int)USHRT_MAX);
950 cursor->page_count = (unsigned short)page_count;
951 BUG_ON(length > SIZE_MAX - cursor->page_offset);
5f740d7e 952 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
e766d7b5
AE
953}
954
8ae4f4f5
AE
955static struct page *
956ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
957 size_t *page_offset, size_t *length)
e766d7b5 958{
8ae4f4f5 959 struct ceph_msg_data *data = cursor->data;
e766d7b5
AE
960
961 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
962
963 BUG_ON(cursor->page_index >= cursor->page_count);
964 BUG_ON(cursor->page_offset >= PAGE_SIZE);
e766d7b5
AE
965
966 *page_offset = cursor->page_offset;
25aff7c5 967 if (cursor->last_piece)
e766d7b5 968 *length = cursor->resid;
25aff7c5 969 else
e766d7b5 970 *length = PAGE_SIZE - *page_offset;
e766d7b5
AE
971
972 return data->pages[cursor->page_index];
973}
974
8ae4f4f5 975static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
e766d7b5
AE
976 size_t bytes)
977{
8ae4f4f5 978 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
e766d7b5
AE
979
980 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
e766d7b5
AE
981
982 /* Advance the cursor page offset */
983
984 cursor->resid -= bytes;
5df521b1
AE
985 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
986 if (!bytes || cursor->page_offset)
e766d7b5
AE
987 return false; /* more bytes to process in the current page */
988
d90deda6
YZ
989 if (!cursor->resid)
990 return false; /* no more data */
991
5df521b1 992 /* Move on to the next page; offset is already at 0 */
e766d7b5
AE
993
994 BUG_ON(cursor->page_index >= cursor->page_count);
e766d7b5 995 cursor->page_index++;
25aff7c5 996 cursor->last_piece = cursor->resid <= PAGE_SIZE;
e766d7b5
AE
997
998 return true;
999}
1000
fe38a2b6 1001/*
dd236fcb
AE
1002 * For a pagelist, a piece is whatever remains to be consumed in the
1003 * first page in the list, or the front of the next page.
fe38a2b6 1004 */
8ae4f4f5
AE
1005static void
1006ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
25aff7c5 1007 size_t length)
fe38a2b6 1008{
8ae4f4f5 1009 struct ceph_msg_data *data = cursor->data;
fe38a2b6
AE
1010 struct ceph_pagelist *pagelist;
1011 struct page *page;
1012
dd236fcb 1013 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
fe38a2b6
AE
1014
1015 pagelist = data->pagelist;
1016 BUG_ON(!pagelist);
25aff7c5
AE
1017
1018 if (!length)
fe38a2b6
AE
1019 return; /* pagelist can be assigned but empty */
1020
1021 BUG_ON(list_empty(&pagelist->head));
1022 page = list_first_entry(&pagelist->head, struct page, lru);
1023
ca8b3a69 1024 cursor->resid = min(length, pagelist->length);
fe38a2b6
AE
1025 cursor->page = page;
1026 cursor->offset = 0;
a51b272e 1027 cursor->last_piece = cursor->resid <= PAGE_SIZE;
fe38a2b6
AE
1028}
1029
8ae4f4f5
AE
1030static struct page *
1031ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
1032 size_t *page_offset, size_t *length)
fe38a2b6 1033{
8ae4f4f5 1034 struct ceph_msg_data *data = cursor->data;
fe38a2b6 1035 struct ceph_pagelist *pagelist;
fe38a2b6
AE
1036
1037 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1038
1039 pagelist = data->pagelist;
1040 BUG_ON(!pagelist);
1041
1042 BUG_ON(!cursor->page);
25aff7c5 1043 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
fe38a2b6 1044
5df521b1 1045 /* offset of first page in pagelist is always 0 */
fe38a2b6 1046 *page_offset = cursor->offset & ~PAGE_MASK;
5df521b1 1047 if (cursor->last_piece)
25aff7c5
AE
1048 *length = cursor->resid;
1049 else
1050 *length = PAGE_SIZE - *page_offset;
fe38a2b6 1051
8ae4f4f5 1052 return cursor->page;
fe38a2b6
AE
1053}
1054
8ae4f4f5 1055static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
dd236fcb 1056 size_t bytes)
fe38a2b6 1057{
8ae4f4f5 1058 struct ceph_msg_data *data = cursor->data;
fe38a2b6
AE
1059 struct ceph_pagelist *pagelist;
1060
1061 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1062
1063 pagelist = data->pagelist;
1064 BUG_ON(!pagelist);
25aff7c5
AE
1065
1066 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
fe38a2b6
AE
1067 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
1068
1069 /* Advance the cursor offset */
1070
25aff7c5 1071 cursor->resid -= bytes;
fe38a2b6 1072 cursor->offset += bytes;
5df521b1 1073 /* offset of first page in pagelist is always 0 */
fe38a2b6
AE
1074 if (!bytes || cursor->offset & ~PAGE_MASK)
1075 return false; /* more bytes to process in the current page */
1076
d90deda6
YZ
1077 if (!cursor->resid)
1078 return false; /* no more data */
1079
fe38a2b6
AE
1080 /* Move on to the next page */
1081
1082 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
17ddc49b 1083 cursor->page = list_next_entry(cursor->page, lru);
25aff7c5 1084 cursor->last_piece = cursor->resid <= PAGE_SIZE;
fe38a2b6
AE
1085
1086 return true;
1087}
1088
dd236fcb
AE
1089/*
1090 * Message data is handled (sent or received) in pieces, where each
1091 * piece resides on a single page. The network layer might not
1092 * consume an entire piece at once. A data item's cursor keeps
1093 * track of which piece is next to process and how much remains to
1094 * be processed in that piece. It also tracks whether the current
1095 * piece is the last one in the data item.
1096 */
ca8b3a69 1097static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
dd236fcb 1098{
ca8b3a69 1099 size_t length = cursor->total_resid;
8ae4f4f5 1100
8ae4f4f5 1101 switch (cursor->data->type) {
dd236fcb 1102 case CEPH_MSG_DATA_PAGELIST:
8ae4f4f5 1103 ceph_msg_data_pagelist_cursor_init(cursor, length);
dd236fcb 1104 break;
e766d7b5 1105 case CEPH_MSG_DATA_PAGES:
8ae4f4f5 1106 ceph_msg_data_pages_cursor_init(cursor, length);
e766d7b5 1107 break;
dd236fcb
AE
1108#ifdef CONFIG_BLOCK
1109 case CEPH_MSG_DATA_BIO:
8ae4f4f5 1110 ceph_msg_data_bio_cursor_init(cursor, length);
6aaa4511 1111 break;
dd236fcb 1112#endif /* CONFIG_BLOCK */
6aaa4511 1113 case CEPH_MSG_DATA_NONE:
dd236fcb
AE
1114 default:
1115 /* BUG(); */
1116 break;
1117 }
8ae4f4f5 1118 cursor->need_crc = true;
dd236fcb
AE
1119}
1120
ca8b3a69
AE
1121static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
1122{
1123 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1124 struct ceph_msg_data *data;
1125
1126 BUG_ON(!length);
1127 BUG_ON(length > msg->data_length);
1128 BUG_ON(list_empty(&msg->data));
1129
ca8b3a69
AE
1130 cursor->data_head = &msg->data;
1131 cursor->total_resid = length;
1132 data = list_first_entry(&msg->data, struct ceph_msg_data, links);
1133 cursor->data = data;
1134
1135 __ceph_msg_data_cursor_init(cursor);
1136}
1137
dd236fcb
AE
1138/*
1139 * Return the page containing the next piece to process for a given
1140 * data item, and supply the page offset and length of that piece.
1141 * Indicate whether this is the last piece in this data item.
1142 */
8ae4f4f5
AE
1143static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1144 size_t *page_offset, size_t *length,
dd236fcb
AE
1145 bool *last_piece)
1146{
1147 struct page *page;
1148
8ae4f4f5 1149 switch (cursor->data->type) {
dd236fcb 1150 case CEPH_MSG_DATA_PAGELIST:
8ae4f4f5 1151 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
dd236fcb 1152 break;
e766d7b5 1153 case CEPH_MSG_DATA_PAGES:
8ae4f4f5 1154 page = ceph_msg_data_pages_next(cursor, page_offset, length);
e766d7b5 1155 break;
dd236fcb
AE
1156#ifdef CONFIG_BLOCK
1157 case CEPH_MSG_DATA_BIO:
8ae4f4f5 1158 page = ceph_msg_data_bio_next(cursor, page_offset, length);
6aaa4511 1159 break;
dd236fcb 1160#endif /* CONFIG_BLOCK */
6aaa4511 1161 case CEPH_MSG_DATA_NONE:
dd236fcb
AE
1162 default:
1163 page = NULL;
1164 break;
1165 }
1166 BUG_ON(!page);
1167 BUG_ON(*page_offset + *length > PAGE_SIZE);
1168 BUG_ON(!*length);
1169 if (last_piece)
8ae4f4f5 1170 *last_piece = cursor->last_piece;
dd236fcb
AE
1171
1172 return page;
1173}
1174
1175/*
1176 * Returns true if the result moves the cursor on to the next piece
1177 * of the data item.
1178 */
1759f7b0
ID
1179static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1180 size_t bytes)
dd236fcb
AE
1181{
1182 bool new_piece;
1183
25aff7c5 1184 BUG_ON(bytes > cursor->resid);
8ae4f4f5 1185 switch (cursor->data->type) {
dd236fcb 1186 case CEPH_MSG_DATA_PAGELIST:
8ae4f4f5 1187 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
dd236fcb 1188 break;
e766d7b5 1189 case CEPH_MSG_DATA_PAGES:
8ae4f4f5 1190 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
e766d7b5 1191 break;
dd236fcb
AE
1192#ifdef CONFIG_BLOCK
1193 case CEPH_MSG_DATA_BIO:
8ae4f4f5 1194 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
6aaa4511 1195 break;
dd236fcb 1196#endif /* CONFIG_BLOCK */
6aaa4511 1197 case CEPH_MSG_DATA_NONE:
dd236fcb
AE
1198 default:
1199 BUG();
1200 break;
1201 }
ca8b3a69 1202 cursor->total_resid -= bytes;
dd236fcb 1203
ca8b3a69
AE
1204 if (!cursor->resid && cursor->total_resid) {
1205 WARN_ON(!cursor->last_piece);
1206 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
17ddc49b 1207 cursor->data = list_next_entry(cursor->data, links);
ca8b3a69 1208 __ceph_msg_data_cursor_init(cursor);
a51b272e 1209 new_piece = true;
ca8b3a69 1210 }
a51b272e 1211 cursor->need_crc = new_piece;
dd236fcb
AE
1212}
1213
dbc0d3ca
ID
1214static size_t sizeof_footer(struct ceph_connection *con)
1215{
1216 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1217 sizeof(struct ceph_msg_footer) :
1218 sizeof(struct ceph_msg_footer_old);
1219}
1220
98fa5dd8 1221static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
739c905b 1222{
739c905b 1223 BUG_ON(!msg);
25aff7c5 1224 BUG_ON(!data_len);
739c905b 1225
4c59b4a2 1226 /* Initialize data cursor */
fe38a2b6 1227
8ae4f4f5 1228 ceph_msg_data_cursor_init(msg, (size_t)data_len);
739c905b
AE
1229}
1230
31b8006e
SW
1231/*
1232 * Prepare footer for currently outgoing message, and finish things
1233 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1234 */
859eb799 1235static void prepare_write_message_footer(struct ceph_connection *con)
31b8006e
SW
1236{
1237 struct ceph_msg *m = con->out_msg;
1238
fd154f3c
AE
1239 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1240
31b8006e 1241 dout("prepare_write_message_footer %p\n", con);
89f08173 1242 con_out_kvec_add(con, sizeof_footer(con), &m->footer);
33d07337
YZ
1243 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1244 if (con->ops->sign_message)
79dbd1ba 1245 con->ops->sign_message(m);
33d07337
YZ
1246 else
1247 m->footer.sig = 0;
33d07337
YZ
1248 } else {
1249 m->old_footer.flags = m->footer.flags;
33d07337 1250 }
31b8006e 1251 con->out_more = m->more_to_follow;
c86a2930 1252 con->out_msg_done = true;
31b8006e
SW
1253}
1254
1255/*
1256 * Prepare headers for the next outgoing message.
1257 */
1258static void prepare_write_message(struct ceph_connection *con)
1259{
1260 struct ceph_msg *m;
a9a0c51a 1261 u32 crc;
31b8006e 1262
e2200423 1263 con_out_kvec_reset(con);
c86a2930 1264 con->out_msg_done = false;
31b8006e
SW
1265
1266 /* Sneak an ack in there first? If we can get it into the same
1267 * TCP packet that's a good thing. */
1268 if (con->in_seq > con->in_seq_acked) {
1269 con->in_seq_acked = con->in_seq;
e2200423 1270 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
31b8006e 1271 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
e2200423 1272 con_out_kvec_add(con, sizeof (con->out_temp_ack),
859eb799 1273 &con->out_temp_ack);
31b8006e
SW
1274 }
1275
38941f80 1276 BUG_ON(list_empty(&con->out_queue));
859eb799 1277 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
c86a2930 1278 con->out_msg = m;
38941f80 1279 BUG_ON(m->con != con);
4cf9d544
SW
1280
1281 /* put message on sent list */
1282 ceph_msg_get(m);
1283 list_move_tail(&m->list_head, &con->out_sent);
31b8006e 1284
e84346b7
SW
1285 /*
1286 * only assign outgoing seq # if we haven't sent this message
1287 * yet. if it is requeued, resend with it's original seq.
1288 */
1289 if (m->needs_out_seq) {
1290 m->hdr.seq = cpu_to_le64(++con->out_seq);
1291 m->needs_out_seq = false;
98ad5ebd 1292
4690faf0
ID
1293 if (con->ops->reencode_message)
1294 con->ops->reencode_message(m);
1295 }
31b8006e 1296
98fa5dd8 1297 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
31b8006e
SW
1298 m, con->out_seq, le16_to_cpu(m->hdr.type),
1299 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
98fa5dd8 1300 m->data_length);
98ad5ebd
ID
1301 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len));
1302 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
31b8006e
SW
1303
1304 /* tag + hdr + front + middle */
e2200423 1305 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
67645d76 1306 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
e2200423 1307 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
859eb799 1308
31b8006e 1309 if (m->middle)
e2200423 1310 con_out_kvec_add(con, m->middle->vec.iov_len,
859eb799 1311 m->middle->vec.iov_base);
31b8006e 1312
67645d76 1313 /* fill in hdr crc and finalize hdr */
a9a0c51a
AE
1314 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1315 con->out_msg->hdr.crc = cpu_to_le32(crc);
67645d76 1316 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
a9a0c51a 1317
67645d76 1318 /* fill in front and middle crc, footer */
a9a0c51a
AE
1319 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1320 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1321 if (m->middle) {
1322 crc = crc32c(0, m->middle->vec.iov_base,
1323 m->middle->vec.iov_len);
1324 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1325 } else
31b8006e 1326 con->out_msg->footer.middle_crc = 0;
739c905b 1327 dout("%s front_crc %u middle_crc %u\n", __func__,
31b8006e
SW
1328 le32_to_cpu(con->out_msg->footer.front_crc),
1329 le32_to_cpu(con->out_msg->footer.middle_crc));
67645d76 1330 con->out_msg->footer.flags = 0;
31b8006e
SW
1331
1332 /* is there a data payload? */
739c905b 1333 con->out_msg->footer.data_crc = 0;
98fa5dd8
AE
1334 if (m->data_length) {
1335 prepare_message_data(con->out_msg, m->data_length);
78625051
AE
1336 con->out_more = 1; /* data + footer will follow */
1337 } else {
31b8006e 1338 /* no, queue up footer too and be done */
859eb799 1339 prepare_write_message_footer(con);
78625051 1340 }
31b8006e 1341
c9ffc77a 1342 con_flag_set(con, CON_FLAG_WRITE_PENDING);
31b8006e
SW
1343}
1344
1345/*
1346 * Prepare an ack.
1347 */
1348static void prepare_write_ack(struct ceph_connection *con)
1349{
1350 dout("prepare_write_ack %p %llu -> %llu\n", con,
1351 con->in_seq_acked, con->in_seq);
1352 con->in_seq_acked = con->in_seq;
1353
e2200423 1354 con_out_kvec_reset(con);
859eb799 1355
e2200423 1356 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
859eb799 1357
31b8006e 1358 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
e2200423 1359 con_out_kvec_add(con, sizeof (con->out_temp_ack),
859eb799
AE
1360 &con->out_temp_ack);
1361
31b8006e 1362 con->out_more = 1; /* more will follow.. eventually.. */
c9ffc77a 1363 con_flag_set(con, CON_FLAG_WRITE_PENDING);
31b8006e
SW
1364}
1365
3a23083b
SW
1366/*
1367 * Prepare to share the seq during handshake
1368 */
1369static void prepare_write_seq(struct ceph_connection *con)
1370{
1371 dout("prepare_write_seq %p %llu -> %llu\n", con,
1372 con->in_seq_acked, con->in_seq);
1373 con->in_seq_acked = con->in_seq;
1374
1375 con_out_kvec_reset(con);
1376
1377 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1378 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1379 &con->out_temp_ack);
1380
1381 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1382}
1383
31b8006e
SW
1384/*
1385 * Prepare to write keepalive byte.
1386 */
1387static void prepare_write_keepalive(struct ceph_connection *con)
1388{
1389 dout("prepare_write_keepalive %p\n", con);
e2200423 1390 con_out_kvec_reset(con);
8b9558aa 1391 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
1134e091 1392 struct timespec now;
7f61f545 1393
1134e091 1394 ktime_get_real_ts(&now);
8b9558aa 1395 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
7f61f545
ID
1396 ceph_encode_timespec(&con->out_temp_keepalive2, &now);
1397 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
1398 &con->out_temp_keepalive2);
8b9558aa
YZ
1399 } else {
1400 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
1401 }
c9ffc77a 1402 con_flag_set(con, CON_FLAG_WRITE_PENDING);
31b8006e
SW
1403}
1404
1405/*
1406 * Connection negotiation.
1407 */
1408
dac1e716
AE
1409static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
1410 int *auth_proto)
4e7a5dcd 1411{
a3530df3 1412 struct ceph_auth_handshake *auth;
b1c6b980
AE
1413
1414 if (!con->ops->get_authorizer) {
1415 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1416 con->out_connect.authorizer_len = 0;
729796be 1417 return NULL;
b1c6b980
AE
1418 }
1419
dac1e716 1420 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
a3530df3 1421 if (IS_ERR(auth))
729796be 1422 return auth;
0da5d703 1423
8f43fb53
AE
1424 con->auth_reply_buf = auth->authorizer_reply_buf;
1425 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
729796be 1426 return auth;
4e7a5dcd
SW
1427}
1428
31b8006e
SW
1429/*
1430 * We connected to a peer and are saying hello.
1431 */
e825a66d 1432static void prepare_write_banner(struct ceph_connection *con)
31b8006e 1433{
e2200423
AE
1434 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1435 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
e825a66d 1436 &con->msgr->my_enc_addr);
eed0ef2c 1437
eed0ef2c 1438 con->out_more = 0;
c9ffc77a 1439 con_flag_set(con, CON_FLAG_WRITE_PENDING);
eed0ef2c
SW
1440}
1441
e825a66d 1442static int prepare_write_connect(struct ceph_connection *con)
eed0ef2c 1443{
95c96174 1444 unsigned int global_seq = get_global_seq(con->msgr, 0);
31b8006e 1445 int proto;
dac1e716 1446 int auth_proto;
729796be 1447 struct ceph_auth_handshake *auth;
31b8006e
SW
1448
1449 switch (con->peer_name.type) {
1450 case CEPH_ENTITY_TYPE_MON:
1451 proto = CEPH_MONC_PROTOCOL;
1452 break;
1453 case CEPH_ENTITY_TYPE_OSD:
1454 proto = CEPH_OSDC_PROTOCOL;
1455 break;
1456 case CEPH_ENTITY_TYPE_MDS:
1457 proto = CEPH_MDSC_PROTOCOL;
1458 break;
1459 default:
1460 BUG();
1461 }
1462
1463 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1464 con->connect_seq, global_seq, proto);
4e7a5dcd 1465
859bff51
ID
1466 con->out_connect.features =
1467 cpu_to_le64(from_msgr(con->msgr)->supported_features);
31b8006e
SW
1468 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1469 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1470 con->out_connect.global_seq = cpu_to_le32(global_seq);
1471 con->out_connect.protocol_version = cpu_to_le32(proto);
1472 con->out_connect.flags = 0;
31b8006e 1473
dac1e716
AE
1474 auth_proto = CEPH_AUTH_UNKNOWN;
1475 auth = get_connect_authorizer(con, &auth_proto);
729796be
AE
1476 if (IS_ERR(auth))
1477 return PTR_ERR(auth);
3da54776 1478
dac1e716 1479 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
3da54776
AE
1480 con->out_connect.authorizer_len = auth ?
1481 cpu_to_le32(auth->authorizer_buf_len) : 0;
1482
e2200423 1483 con_out_kvec_add(con, sizeof (con->out_connect),
3da54776
AE
1484 &con->out_connect);
1485 if (auth && auth->authorizer_buf_len)
e2200423 1486 con_out_kvec_add(con, auth->authorizer_buf_len,
3da54776 1487 auth->authorizer_buf);
859eb799 1488
31b8006e 1489 con->out_more = 0;
c9ffc77a 1490 con_flag_set(con, CON_FLAG_WRITE_PENDING);
4e7a5dcd 1491
e10c758e 1492 return 0;
31b8006e
SW
1493}
1494
31b8006e
SW
1495/*
1496 * write as much of pending kvecs to the socket as we can.
1497 * 1 -> done
1498 * 0 -> socket full, but more to do
1499 * <0 -> error
1500 */
1501static int write_partial_kvec(struct ceph_connection *con)
1502{
1503 int ret;
1504
1505 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1506 while (con->out_kvec_bytes > 0) {
1507 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1508 con->out_kvec_left, con->out_kvec_bytes,
1509 con->out_more);
1510 if (ret <= 0)
1511 goto out;
1512 con->out_kvec_bytes -= ret;
1513 if (con->out_kvec_bytes == 0)
1514 break; /* done */
f42299e6
AE
1515
1516 /* account for full iov entries consumed */
1517 while (ret >= con->out_kvec_cur->iov_len) {
1518 BUG_ON(!con->out_kvec_left);
1519 ret -= con->out_kvec_cur->iov_len;
1520 con->out_kvec_cur++;
1521 con->out_kvec_left--;
1522 }
1523 /* and for a partially-consumed entry */
1524 if (ret) {
1525 con->out_kvec_cur->iov_len -= ret;
1526 con->out_kvec_cur->iov_base += ret;
31b8006e
SW
1527 }
1528 }
1529 con->out_kvec_left = 0;
31b8006e
SW
1530 ret = 1;
1531out:
1532 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1533 con->out_kvec_bytes, con->out_kvec_left, ret);
1534 return ret; /* done! */
1535}
1536
35b62808
AE
1537static u32 ceph_crc32c_page(u32 crc, struct page *page,
1538 unsigned int page_offset,
1539 unsigned int length)
1540{
1541 char *kaddr;
1542
1543 kaddr = kmap(page);
1544 BUG_ON(kaddr == NULL);
1545 crc = crc32c(crc, kaddr + page_offset, length);
1546 kunmap(page);
1547
1548 return crc;
1549}
31b8006e
SW
1550/*
1551 * Write as much message data payload as we can. If we finish, queue
1552 * up the footer.
1553 * 1 -> done, footer is now queued in out_kvec[].
1554 * 0 -> socket full, but more to do
1555 * <0 -> error
1556 */
34d2d200 1557static int write_partial_message_data(struct ceph_connection *con)
31b8006e
SW
1558{
1559 struct ceph_msg *msg = con->out_msg;
8ae4f4f5 1560 struct ceph_msg_data_cursor *cursor = &msg->cursor;
859bff51 1561 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
f5db90bc 1562 u32 crc;
31b8006e 1563
859a35d5 1564 dout("%s %p msg %p\n", __func__, con, msg);
31b8006e 1565
5240d9f9 1566 if (list_empty(&msg->data))
4c59b4a2
AE
1567 return -EINVAL;
1568
5821bd8c
AE
1569 /*
1570 * Iterate through each page that contains data to be
1571 * written, and send as much as possible for each.
1572 *
1573 * If we are calculating the data crc (the default), we will
1574 * need to map the page. If we have no pages, they have
1575 * been revoked, so use the zero page.
1576 */
f5db90bc 1577 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
643c68a4 1578 while (cursor->resid) {
8a166d05 1579 struct page *page;
e387d525
AE
1580 size_t page_offset;
1581 size_t length;
8a166d05 1582 bool last_piece;
f5db90bc 1583 int ret;
68b4476b 1584
343128ce
SB
1585 page = ceph_msg_data_next(cursor, &page_offset, &length,
1586 &last_piece);
e387d525 1587 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
c2cfa194 1588 length, !last_piece);
f5db90bc
AE
1589 if (ret <= 0) {
1590 if (do_datacrc)
1591 msg->footer.data_crc = cpu_to_le32(crc);
31b8006e 1592
f5db90bc
AE
1593 return ret;
1594 }
143334ff
AE
1595 if (do_datacrc && cursor->need_crc)
1596 crc = ceph_crc32c_page(crc, page, page_offset, length);
1759f7b0 1597 ceph_msg_data_advance(cursor, (size_t)ret);
31b8006e
SW
1598 }
1599
34d2d200 1600 dout("%s %p msg %p done\n", __func__, con, msg);
31b8006e
SW
1601
1602 /* prepare and queue up footer, too */
f5db90bc
AE
1603 if (do_datacrc)
1604 msg->footer.data_crc = cpu_to_le32(crc);
1605 else
84ca8fc8 1606 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
e2200423 1607 con_out_kvec_reset(con);
859eb799 1608 prepare_write_message_footer(con);
f5db90bc
AE
1609
1610 return 1; /* must return > 0 to indicate success */
31b8006e
SW
1611}
1612
1613/*
1614 * write some zeros
1615 */
1616static int write_partial_skip(struct ceph_connection *con)
1617{
1618 int ret;
1619
67645d76 1620 dout("%s %p %d left\n", __func__, con, con->out_skip);
31b8006e 1621 while (con->out_skip > 0) {
09cbfeaf 1622 size_t size = min(con->out_skip, (int) PAGE_SIZE);
31b8006e 1623
e1dcb128 1624 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
31b8006e
SW
1625 if (ret <= 0)
1626 goto out;
1627 con->out_skip -= ret;
1628 }
1629 ret = 1;
1630out:
1631 return ret;
1632}
1633
1634/*
1635 * Prepare to read connection handshake, or an ack.
1636 */
eed0ef2c
SW
1637static void prepare_read_banner(struct ceph_connection *con)
1638{
1639 dout("prepare_read_banner %p\n", con);
1640 con->in_base_pos = 0;
1641}
1642
31b8006e
SW
1643static void prepare_read_connect(struct ceph_connection *con)
1644{
1645 dout("prepare_read_connect %p\n", con);
1646 con->in_base_pos = 0;
1647}
1648
1649static void prepare_read_ack(struct ceph_connection *con)
1650{
1651 dout("prepare_read_ack %p\n", con);
1652 con->in_base_pos = 0;
1653}
1654
3a23083b
SW
1655static void prepare_read_seq(struct ceph_connection *con)
1656{
1657 dout("prepare_read_seq %p\n", con);
1658 con->in_base_pos = 0;
1659 con->in_tag = CEPH_MSGR_TAG_SEQ;
1660}
1661
31b8006e
SW
1662static void prepare_read_tag(struct ceph_connection *con)
1663{
1664 dout("prepare_read_tag %p\n", con);
1665 con->in_base_pos = 0;
1666 con->in_tag = CEPH_MSGR_TAG_READY;
1667}
1668
8b9558aa
YZ
1669static void prepare_read_keepalive_ack(struct ceph_connection *con)
1670{
1671 dout("prepare_read_keepalive_ack %p\n", con);
1672 con->in_base_pos = 0;
1673}
1674
31b8006e
SW
1675/*
1676 * Prepare to read a message.
1677 */
1678static int prepare_read_message(struct ceph_connection *con)
1679{
1680 dout("prepare_read_message %p\n", con);
1681 BUG_ON(con->in_msg != NULL);
1682 con->in_base_pos = 0;
1683 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1684 return 0;
1685}
1686
1687
1688static int read_partial(struct ceph_connection *con,
fd51653f 1689 int end, int size, void *object)
31b8006e 1690{
e6cee71f
AE
1691 while (con->in_base_pos < end) {
1692 int left = end - con->in_base_pos;
31b8006e
SW
1693 int have = size - left;
1694 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1695 if (ret <= 0)
1696 return ret;
1697 con->in_base_pos += ret;
1698 }
1699 return 1;
1700}
1701
1702
1703/*
1704 * Read all or part of the connect-side handshake on a new connection
1705 */
eed0ef2c 1706static int read_partial_banner(struct ceph_connection *con)
31b8006e 1707{
fd51653f
AE
1708 int size;
1709 int end;
1710 int ret;
31b8006e 1711
eed0ef2c 1712 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
31b8006e
SW
1713
1714 /* peer's banner */
fd51653f
AE
1715 size = strlen(CEPH_BANNER);
1716 end = size;
1717 ret = read_partial(con, end, size, con->in_banner);
31b8006e
SW
1718 if (ret <= 0)
1719 goto out;
fd51653f
AE
1720
1721 size = sizeof (con->actual_peer_addr);
1722 end += size;
1723 ret = read_partial(con, end, size, &con->actual_peer_addr);
31b8006e
SW
1724 if (ret <= 0)
1725 goto out;
fd51653f
AE
1726
1727 size = sizeof (con->peer_addr_for_me);
1728 end += size;
1729 ret = read_partial(con, end, size, &con->peer_addr_for_me);
31b8006e
SW
1730 if (ret <= 0)
1731 goto out;
fd51653f 1732
eed0ef2c
SW
1733out:
1734 return ret;
1735}
1736
1737static int read_partial_connect(struct ceph_connection *con)
1738{
fd51653f
AE
1739 int size;
1740 int end;
1741 int ret;
eed0ef2c
SW
1742
1743 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1744
fd51653f
AE
1745 size = sizeof (con->in_reply);
1746 end = size;
1747 ret = read_partial(con, end, size, &con->in_reply);
31b8006e
SW
1748 if (ret <= 0)
1749 goto out;
fd51653f
AE
1750
1751 size = le32_to_cpu(con->in_reply.authorizer_len);
1752 end += size;
1753 ret = read_partial(con, end, size, con->auth_reply_buf);
4e7a5dcd
SW
1754 if (ret <= 0)
1755 goto out;
31b8006e 1756
4e7a5dcd
SW
1757 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1758 con, (int)con->in_reply.tag,
1759 le32_to_cpu(con->in_reply.connect_seq),
31b8006e
SW
1760 le32_to_cpu(con->in_reply.global_seq));
1761out:
1762 return ret;
eed0ef2c 1763
31b8006e
SW
1764}
1765
1766/*
1767 * Verify the hello banner looks okay.
1768 */
1769static int verify_hello(struct ceph_connection *con)
1770{
1771 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
13e38c8a 1772 pr_err("connect to %s got bad banner\n",
3d14c5d2 1773 ceph_pr_addr(&con->peer_addr.in_addr));
31b8006e
SW
1774 con->error_msg = "protocol error, bad banner";
1775 return -1;
1776 }
1777 return 0;
1778}
1779
1780static bool addr_is_blank(struct sockaddr_storage *ss)
1781{
c44bd69c
ID
1782 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
1783 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
1784
31b8006e
SW
1785 switch (ss->ss_family) {
1786 case AF_INET:
c44bd69c 1787 return addr->s_addr == htonl(INADDR_ANY);
31b8006e 1788 case AF_INET6:
c44bd69c
ID
1789 return ipv6_addr_any(addr6);
1790 default:
1791 return true;
31b8006e 1792 }
31b8006e
SW
1793}
1794
1795static int addr_port(struct sockaddr_storage *ss)
1796{
1797 switch (ss->ss_family) {
1798 case AF_INET:
f28bcfbe 1799 return ntohs(((struct sockaddr_in *)ss)->sin_port);
31b8006e 1800 case AF_INET6:
f28bcfbe 1801 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
31b8006e
SW
1802 }
1803 return 0;
1804}
1805
1806static void addr_set_port(struct sockaddr_storage *ss, int p)
1807{
1808 switch (ss->ss_family) {
1809 case AF_INET:
1810 ((struct sockaddr_in *)ss)->sin_port = htons(p);
a2a79609 1811 break;
31b8006e
SW
1812 case AF_INET6:
1813 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
a2a79609 1814 break;
31b8006e
SW
1815 }
1816}
1817
ee3b56f2
NW
1818/*
1819 * Unlike other *_pton function semantics, zero indicates success.
1820 */
1821static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1822 char delim, const char **ipend)
1823{
99f0f3b2
AE
1824 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1825 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
ee3b56f2
NW
1826
1827 memset(ss, 0, sizeof(*ss));
1828
1829 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1830 ss->ss_family = AF_INET;
1831 return 0;
1832 }
1833
1834 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1835 ss->ss_family = AF_INET6;
1836 return 0;
1837 }
1838
1839 return -EINVAL;
1840}
1841
1842/*
1843 * Extract hostname string and resolve using kernel DNS facility.
1844 */
1845#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1846static int ceph_dns_resolve_name(const char *name, size_t namelen,
1847 struct sockaddr_storage *ss, char delim, const char **ipend)
1848{
1849 const char *end, *delim_p;
1850 char *colon_p, *ip_addr = NULL;
1851 int ip_len, ret;
1852
1853 /*
1854 * The end of the hostname occurs immediately preceding the delimiter or
1855 * the port marker (':') where the delimiter takes precedence.
1856 */
1857 delim_p = memchr(name, delim, namelen);
1858 colon_p = memchr(name, ':', namelen);
1859
1860 if (delim_p && colon_p)
1861 end = delim_p < colon_p ? delim_p : colon_p;
1862 else if (!delim_p && colon_p)
1863 end = colon_p;
1864 else {
1865 end = delim_p;
1866 if (!end) /* case: hostname:/ */
1867 end = name + namelen;
1868 }
1869
1870 if (end <= name)
1871 return -EINVAL;
1872
1873 /* do dns_resolve upcall */
1874 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1875 if (ip_len > 0)
1876 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1877 else
1878 ret = -ESRCH;
1879
1880 kfree(ip_addr);
1881
1882 *ipend = end;
1883
1884 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1885 ret, ret ? "failed" : ceph_pr_addr(ss));
1886
1887 return ret;
1888}
1889#else
1890static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1891 struct sockaddr_storage *ss, char delim, const char **ipend)
1892{
1893 return -EINVAL;
1894}
1895#endif
1896
1897/*
1898 * Parse a server name (IP or hostname). If a valid IP address is not found
1899 * then try to extract a hostname to resolve using userspace DNS upcall.
1900 */
1901static int ceph_parse_server_name(const char *name, size_t namelen,
1902 struct sockaddr_storage *ss, char delim, const char **ipend)
1903{
1904 int ret;
1905
1906 ret = ceph_pton(name, namelen, ss, delim, ipend);
1907 if (ret)
1908 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1909
1910 return ret;
1911}
1912
31b8006e
SW
1913/*
1914 * Parse an ip[:port] list into an addr array. Use the default
1915 * monitor port if a port isn't specified.
1916 */
1917int ceph_parse_ips(const char *c, const char *end,
1918 struct ceph_entity_addr *addr,
1919 int max_count, int *count)
1920{
ee3b56f2 1921 int i, ret = -EINVAL;
31b8006e
SW
1922 const char *p = c;
1923
1924 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1925 for (i = 0; i < max_count; i++) {
1926 const char *ipend;
1927 struct sockaddr_storage *ss = &addr[i].in_addr;
31b8006e 1928 int port;
39139f64
SW
1929 char delim = ',';
1930
1931 if (*p == '[') {
1932 delim = ']';
1933 p++;
1934 }
31b8006e 1935
ee3b56f2
NW
1936 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1937 if (ret)
31b8006e 1938 goto bad;
ee3b56f2
NW
1939 ret = -EINVAL;
1940
31b8006e
SW
1941 p = ipend;
1942
39139f64
SW
1943 if (delim == ']') {
1944 if (*p != ']') {
1945 dout("missing matching ']'\n");
1946 goto bad;
1947 }
1948 p++;
1949 }
1950
31b8006e
SW
1951 /* port? */
1952 if (p < end && *p == ':') {
1953 port = 0;
1954 p++;
1955 while (p < end && *p >= '0' && *p <= '9') {
1956 port = (port * 10) + (*p - '0');
1957 p++;
1958 }
f48db1e9
ID
1959 if (port == 0)
1960 port = CEPH_MON_PORT;
1961 else if (port > 65535)
31b8006e
SW
1962 goto bad;
1963 } else {
1964 port = CEPH_MON_PORT;
1965 }
1966
1967 addr_set_port(ss, port);
1968
3d14c5d2 1969 dout("parse_ips got %s\n", ceph_pr_addr(ss));
31b8006e
SW
1970
1971 if (p == end)
1972 break;
1973 if (*p != ',')
1974 goto bad;
1975 p++;
1976 }
1977
1978 if (p != end)
1979 goto bad;
1980
1981 if (count)
1982 *count = i + 1;
1983 return 0;
1984
1985bad:
39139f64 1986 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
ee3b56f2 1987 return ret;
31b8006e 1988}
3d14c5d2 1989EXPORT_SYMBOL(ceph_parse_ips);
31b8006e 1990
eed0ef2c 1991static int process_banner(struct ceph_connection *con)
31b8006e 1992{
eed0ef2c 1993 dout("process_banner on %p\n", con);
31b8006e
SW
1994
1995 if (verify_hello(con) < 0)
1996 return -1;
1997
63f2d211
SW
1998 ceph_decode_addr(&con->actual_peer_addr);
1999 ceph_decode_addr(&con->peer_addr_for_me);
2000
31b8006e
SW
2001 /*
2002 * Make sure the other end is who we wanted. note that the other
2003 * end may not yet know their ip address, so if it's 0.0.0.0, give
2004 * them the benefit of the doubt.
2005 */
103e2d3a
SW
2006 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
2007 sizeof(con->peer_addr)) != 0 &&
31b8006e
SW
2008 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
2009 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
b9a67899
JP
2010 pr_warn("wrong peer, want %s/%d, got %s/%d\n",
2011 ceph_pr_addr(&con->peer_addr.in_addr),
2012 (int)le32_to_cpu(con->peer_addr.nonce),
2013 ceph_pr_addr(&con->actual_peer_addr.in_addr),
2014 (int)le32_to_cpu(con->actual_peer_addr.nonce));
58bb3b37 2015 con->error_msg = "wrong peer at address";
31b8006e
SW
2016 return -1;
2017 }
2018
2019 /*
2020 * did we learn our address?
2021 */
2022 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
2023 int port = addr_port(&con->msgr->inst.addr.in_addr);
2024
2025 memcpy(&con->msgr->inst.addr.in_addr,
2026 &con->peer_addr_for_me.in_addr,
2027 sizeof(con->peer_addr_for_me.in_addr));
2028 addr_set_port(&con->msgr->inst.addr.in_addr, port);
63f2d211 2029 encode_my_addr(con->msgr);
eed0ef2c 2030 dout("process_banner learned my addr is %s\n",
3d14c5d2 2031 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
31b8006e
SW
2032 }
2033
eed0ef2c
SW
2034 return 0;
2035}
2036
2037static int process_connect(struct ceph_connection *con)
2038{
859bff51
ID
2039 u64 sup_feat = from_msgr(con->msgr)->supported_features;
2040 u64 req_feat = from_msgr(con->msgr)->required_features;
dcbbd97c 2041 u64 server_feat = le64_to_cpu(con->in_reply.features);
0da5d703 2042 int ret;
04a419f9 2043
eed0ef2c
SW
2044 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2045
5c056fdc
ID
2046 if (con->auth_reply_buf) {
2047 /*
2048 * Any connection that defines ->get_authorizer()
2049 * should also define ->verify_authorizer_reply().
2050 * See get_connect_authorizer().
2051 */
0dde5848 2052 ret = con->ops->verify_authorizer_reply(con);
5c056fdc
ID
2053 if (ret < 0) {
2054 con->error_msg = "bad authorize reply";
2055 return ret;
2056 }
2057 }
2058
31b8006e 2059 switch (con->in_reply.tag) {
04a419f9
SW
2060 case CEPH_MSGR_TAG_FEATURES:
2061 pr_err("%s%lld %s feature set mismatch,"
2062 " my %llx < server's %llx, missing %llx\n",
2063 ENTITY_NAME(con->peer_name),
3d14c5d2 2064 ceph_pr_addr(&con->peer_addr.in_addr),
04a419f9
SW
2065 sup_feat, server_feat, server_feat & ~sup_feat);
2066 con->error_msg = "missing required protocol features";
0fa6ebc6 2067 reset_connection(con);
04a419f9
SW
2068 return -1;
2069
31b8006e 2070 case CEPH_MSGR_TAG_BADPROTOVER:
31b8006e
SW
2071 pr_err("%s%lld %s protocol version mismatch,"
2072 " my %d != server's %d\n",
2073 ENTITY_NAME(con->peer_name),
3d14c5d2 2074 ceph_pr_addr(&con->peer_addr.in_addr),
31b8006e
SW
2075 le32_to_cpu(con->out_connect.protocol_version),
2076 le32_to_cpu(con->in_reply.protocol_version));
2077 con->error_msg = "protocol version mismatch";
0fa6ebc6 2078 reset_connection(con);
31b8006e
SW
2079 return -1;
2080
4e7a5dcd
SW
2081 case CEPH_MSGR_TAG_BADAUTHORIZER:
2082 con->auth_retry++;
2083 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2084 con->auth_retry);
2085 if (con->auth_retry == 2) {
2086 con->error_msg = "connect authorization failure";
4e7a5dcd
SW
2087 return -1;
2088 }
6d4221b5 2089 con_out_kvec_reset(con);
e825a66d 2090 ret = prepare_write_connect(con);
0da5d703
SW
2091 if (ret < 0)
2092 return ret;
63733a0f 2093 prepare_read_connect(con);
4e7a5dcd 2094 break;
31b8006e
SW
2095
2096 case CEPH_MSGR_TAG_RESETSESSION:
2097 /*
2098 * If we connected with a large connect_seq but the peer
2099 * has no record of a session with us (no connection, or
2100 * connect_seq == 0), they will send RESETSESION to indicate
2101 * that they must have reset their session, and may have
2102 * dropped messages.
2103 */
2104 dout("process_connect got RESET peer seq %u\n",
5bdca4e0 2105 le32_to_cpu(con->in_reply.connect_seq));
31b8006e
SW
2106 pr_err("%s%lld %s connection reset\n",
2107 ENTITY_NAME(con->peer_name),
3d14c5d2 2108 ceph_pr_addr(&con->peer_addr.in_addr));
31b8006e 2109 reset_connection(con);
6d4221b5 2110 con_out_kvec_reset(con);
5a0f8fdd
AE
2111 ret = prepare_write_connect(con);
2112 if (ret < 0)
2113 return ret;
31b8006e
SW
2114 prepare_read_connect(con);
2115
2116 /* Tell ceph about it. */
ec302645 2117 mutex_unlock(&con->mutex);
31b8006e
SW
2118 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
2119 if (con->ops->peer_reset)
2120 con->ops->peer_reset(con);
ec302645 2121 mutex_lock(&con->mutex);
8dacc7da 2122 if (con->state != CON_STATE_NEGOTIATING)
0da5d703 2123 return -EAGAIN;
31b8006e
SW
2124 break;
2125
2126 case CEPH_MSGR_TAG_RETRY_SESSION:
2127 /*
2128 * If we sent a smaller connect_seq than the peer has, try
2129 * again with a larger value.
2130 */
5bdca4e0 2131 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
31b8006e 2132 le32_to_cpu(con->out_connect.connect_seq),
5bdca4e0
SW
2133 le32_to_cpu(con->in_reply.connect_seq));
2134 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
6d4221b5 2135 con_out_kvec_reset(con);
5a0f8fdd
AE
2136 ret = prepare_write_connect(con);
2137 if (ret < 0)
2138 return ret;
31b8006e
SW
2139 prepare_read_connect(con);
2140 break;
2141
2142 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2143 /*
2144 * If we sent a smaller global_seq than the peer has, try
2145 * again with a larger value.
2146 */
eed0ef2c 2147 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
31b8006e 2148 con->peer_global_seq,
5bdca4e0 2149 le32_to_cpu(con->in_reply.global_seq));
31b8006e 2150 get_global_seq(con->msgr,
5bdca4e0 2151 le32_to_cpu(con->in_reply.global_seq));
6d4221b5 2152 con_out_kvec_reset(con);
5a0f8fdd
AE
2153 ret = prepare_write_connect(con);
2154 if (ret < 0)
2155 return ret;
31b8006e
SW
2156 prepare_read_connect(con);
2157 break;
2158
3a23083b 2159 case CEPH_MSGR_TAG_SEQ:
31b8006e 2160 case CEPH_MSGR_TAG_READY:
04a419f9
SW
2161 if (req_feat & ~server_feat) {
2162 pr_err("%s%lld %s protocol feature mismatch,"
2163 " my required %llx > server's %llx, need %llx\n",
2164 ENTITY_NAME(con->peer_name),
3d14c5d2 2165 ceph_pr_addr(&con->peer_addr.in_addr),
04a419f9
SW
2166 req_feat, server_feat, req_feat & ~server_feat);
2167 con->error_msg = "missing required protocol features";
0fa6ebc6 2168 reset_connection(con);
04a419f9
SW
2169 return -1;
2170 }
8dacc7da 2171
122070a2 2172 WARN_ON(con->state != CON_STATE_NEGOTIATING);
8dacc7da 2173 con->state = CON_STATE_OPEN;
20e55c4c 2174 con->auth_retry = 0; /* we authenticated; clear flag */
31b8006e
SW
2175 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2176 con->connect_seq++;
aba558e2 2177 con->peer_features = server_feat;
31b8006e
SW
2178 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2179 con->peer_global_seq,
2180 le32_to_cpu(con->in_reply.connect_seq),
2181 con->connect_seq);
2182 WARN_ON(con->connect_seq !=
2183 le32_to_cpu(con->in_reply.connect_seq));
92ac41d0
SW
2184
2185 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
c9ffc77a 2186 con_flag_set(con, CON_FLAG_LOSSYTX);
92ac41d0 2187
85effe18 2188 con->delay = 0; /* reset backoff memory */
92ac41d0 2189
3a23083b
SW
2190 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2191 prepare_write_seq(con);
2192 prepare_read_seq(con);
2193 } else {
2194 prepare_read_tag(con);
2195 }
31b8006e
SW
2196 break;
2197
2198 case CEPH_MSGR_TAG_WAIT:
2199 /*
2200 * If there is a connection race (we are opening
2201 * connections to each other), one of us may just have
2202 * to WAIT. This shouldn't happen if we are the
2203 * client.
2204 */
04177882
SW
2205 con->error_msg = "protocol error, got WAIT as client";
2206 return -1;
31b8006e
SW
2207
2208 default:
31b8006e
SW
2209 con->error_msg = "protocol error, garbage tag during connect";
2210 return -1;
2211 }
2212 return 0;
2213}
2214
2215
2216/*
2217 * read (part of) an ack
2218 */
2219static int read_partial_ack(struct ceph_connection *con)
2220{
fd51653f
AE
2221 int size = sizeof (con->in_temp_ack);
2222 int end = size;
31b8006e 2223
fd51653f 2224 return read_partial(con, end, size, &con->in_temp_ack);
31b8006e
SW
2225}
2226
31b8006e
SW
2227/*
2228 * We can finally discard anything that's been acked.
2229 */
2230static void process_ack(struct ceph_connection *con)
2231{
2232 struct ceph_msg *m;
2233 u64 ack = le64_to_cpu(con->in_temp_ack);
2234 u64 seq;
0a2ad541
YZ
2235 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
2236 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
31b8006e 2237
0a2ad541
YZ
2238 /*
2239 * In the reconnect case, con_fault() has requeued messages
2240 * in out_sent. We should cleanup old messages according to
2241 * the reconnect seq.
2242 */
2243 while (!list_empty(list)) {
2244 m = list_first_entry(list, struct ceph_msg, list_head);
2245 if (reconnect && m->needs_out_seq)
2246 break;
31b8006e
SW
2247 seq = le64_to_cpu(m->hdr.seq);
2248 if (seq > ack)
2249 break;
2250 dout("got ack for seq %llu type %d at %p\n", seq,
2251 le16_to_cpu(m->hdr.type), m);
4cf9d544 2252 m->ack_stamp = jiffies;
31b8006e
SW
2253 ceph_msg_remove(m);
2254 }
0a2ad541 2255
31b8006e
SW
2256 prepare_read_tag(con);
2257}
2258
2259
2450418c 2260static int read_partial_message_section(struct ceph_connection *con,
213c99ee
SW
2261 struct kvec *section,
2262 unsigned int sec_len, u32 *crc)
2450418c 2263{
68b4476b 2264 int ret, left;
2450418c
YS
2265
2266 BUG_ON(!section);
2267
2268 while (section->iov_len < sec_len) {
2269 BUG_ON(section->iov_base == NULL);
2270 left = sec_len - section->iov_len;
2271 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2272 section->iov_len, left);
2273 if (ret <= 0)
2274 return ret;
2275 section->iov_len += ret;
2450418c 2276 }
fe3ad593
AE
2277 if (section->iov_len == sec_len)
2278 *crc = crc32c(0, section->iov_base, section->iov_len);
31b8006e 2279
2450418c
YS
2280 return 1;
2281}
31b8006e 2282
34d2d200
AE
2283static int read_partial_msg_data(struct ceph_connection *con)
2284{
2285 struct ceph_msg *msg = con->in_msg;
8ae4f4f5 2286 struct ceph_msg_data_cursor *cursor = &msg->cursor;
859bff51 2287 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
686be208
AE
2288 struct page *page;
2289 size_t page_offset;
2290 size_t length;
f5db90bc 2291 u32 crc = 0;
34d2d200
AE
2292 int ret;
2293
2294 BUG_ON(!msg);
5240d9f9 2295 if (list_empty(&msg->data))
4c59b4a2 2296 return -EIO;
34d2d200 2297
f5db90bc
AE
2298 if (do_datacrc)
2299 crc = con->in_data_crc;
643c68a4 2300 while (cursor->resid) {
343128ce 2301 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
686be208 2302 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
f5db90bc
AE
2303 if (ret <= 0) {
2304 if (do_datacrc)
2305 con->in_data_crc = crc;
2306
686be208 2307 return ret;
f5db90bc 2308 }
686be208
AE
2309
2310 if (do_datacrc)
f5db90bc 2311 crc = ceph_crc32c_page(crc, page, page_offset, ret);
1759f7b0 2312 ceph_msg_data_advance(cursor, (size_t)ret);
34d2d200 2313 }
f5db90bc
AE
2314 if (do_datacrc)
2315 con->in_data_crc = crc;
34d2d200
AE
2316
2317 return 1; /* must return > 0 to indicate success */
2318}
2319
31b8006e
SW
2320/*
2321 * read (part of) a message.
2322 */
686be208
AE
2323static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2324
31b8006e
SW
2325static int read_partial_message(struct ceph_connection *con)
2326{
2327 struct ceph_msg *m = con->in_msg;
fd51653f
AE
2328 int size;
2329 int end;
31b8006e 2330 int ret;
95c96174 2331 unsigned int front_len, middle_len, data_len;
859bff51 2332 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
33d07337 2333 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
ae18756b 2334 u64 seq;
fe3ad593 2335 u32 crc;
31b8006e
SW
2336
2337 dout("read_partial_message con %p msg %p\n", con, m);
2338
2339 /* header */
fd51653f
AE
2340 size = sizeof (con->in_hdr);
2341 end = size;
2342 ret = read_partial(con, end, size, &con->in_hdr);
57dac9d1
AE
2343 if (ret <= 0)
2344 return ret;
fe3ad593
AE
2345
2346 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2347 if (cpu_to_le32(crc) != con->in_hdr.crc) {
67c64eb7 2348 pr_err("read_partial_message bad hdr crc %u != expected %u\n",
fe3ad593
AE
2349 crc, con->in_hdr.crc);
2350 return -EBADMSG;
2351 }
2352
31b8006e
SW
2353 front_len = le32_to_cpu(con->in_hdr.front_len);
2354 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2355 return -EIO;
2356 middle_len = le32_to_cpu(con->in_hdr.middle_len);
7b11ba37 2357 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
31b8006e
SW
2358 return -EIO;
2359 data_len = le32_to_cpu(con->in_hdr.data_len);
2360 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2361 return -EIO;
2362
ae18756b
SW
2363 /* verify seq# */
2364 seq = le64_to_cpu(con->in_hdr.seq);
2365 if ((s64)seq - (s64)con->in_seq < 1) {
df9f86fa 2366 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
ae18756b 2367 ENTITY_NAME(con->peer_name),
3d14c5d2 2368 ceph_pr_addr(&con->peer_addr.in_addr),
ae18756b
SW
2369 seq, con->in_seq + 1);
2370 con->in_base_pos = -front_len - middle_len - data_len -
dbc0d3ca 2371 sizeof_footer(con);
ae18756b 2372 con->in_tag = CEPH_MSGR_TAG_READY;
e7a88e82 2373 return 1;
ae18756b
SW
2374 } else if ((s64)seq - (s64)con->in_seq > 1) {
2375 pr_err("read_partial_message bad seq %lld expected %lld\n",
2376 seq, con->in_seq + 1);
2377 con->error_msg = "bad message sequence # for incoming message";
67c64eb7 2378 return -EBADE;
ae18756b
SW
2379 }
2380
31b8006e
SW
2381 /* allocate message? */
2382 if (!con->in_msg) {
4740a623
SW
2383 int skip = 0;
2384
31b8006e 2385 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
6ebc8b32 2386 front_len, data_len);
4740a623
SW
2387 ret = ceph_con_in_msg_alloc(con, &skip);
2388 if (ret < 0)
2389 return ret;
f759ebb9
AE
2390
2391 BUG_ON(!con->in_msg ^ skip);
2450418c 2392 if (skip) {
31b8006e 2393 /* skip this message */
a79832f2 2394 dout("alloc_msg said skip message\n");
31b8006e 2395 con->in_base_pos = -front_len - middle_len - data_len -
dbc0d3ca 2396 sizeof_footer(con);
31b8006e 2397 con->in_tag = CEPH_MSGR_TAG_READY;
684be25c 2398 con->in_seq++;
e7a88e82 2399 return 1;
31b8006e 2400 }
38941f80 2401
4740a623 2402 BUG_ON(!con->in_msg);
38941f80 2403 BUG_ON(con->in_msg->con != con);
31b8006e
SW
2404 m = con->in_msg;
2405 m->front.iov_len = 0; /* haven't read it yet */
2450418c
YS
2406 if (m->middle)
2407 m->middle->vec.iov_len = 0;
9d7f0f13 2408
78625051 2409 /* prepare for data payload, if any */
a4107026 2410
78625051 2411 if (data_len)
98fa5dd8 2412 prepare_message_data(con->in_msg, data_len);
31b8006e
SW
2413 }
2414
2415 /* front */
2450418c
YS
2416 ret = read_partial_message_section(con, &m->front, front_len,
2417 &con->in_front_crc);
2418 if (ret <= 0)
2419 return ret;
31b8006e
SW
2420
2421 /* middle */
2450418c 2422 if (m->middle) {
213c99ee
SW
2423 ret = read_partial_message_section(con, &m->middle->vec,
2424 middle_len,
2450418c 2425 &con->in_middle_crc);
31b8006e
SW
2426 if (ret <= 0)
2427 return ret;
31b8006e
SW
2428 }
2429
2430 /* (page) data */
34d2d200
AE
2431 if (data_len) {
2432 ret = read_partial_msg_data(con);
2433 if (ret <= 0)
2434 return ret;
31b8006e
SW
2435 }
2436
31b8006e 2437 /* footer */
89f08173 2438 size = sizeof_footer(con);
fd51653f
AE
2439 end += size;
2440 ret = read_partial(con, end, size, &m->footer);
57dac9d1
AE
2441 if (ret <= 0)
2442 return ret;
2443
33d07337
YZ
2444 if (!need_sign) {
2445 m->footer.flags = m->old_footer.flags;
2446 m->footer.sig = 0;
2447 }
2448
31b8006e
SW
2449 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2450 m, front_len, m->footer.front_crc, middle_len,
2451 m->footer.middle_crc, data_len, m->footer.data_crc);
2452
2453 /* crc ok? */
2454 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2455 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2456 m, con->in_front_crc, m->footer.front_crc);
2457 return -EBADMSG;
2458 }
2459 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2460 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2461 m, con->in_middle_crc, m->footer.middle_crc);
2462 return -EBADMSG;
2463 }
bca064d2 2464 if (do_datacrc &&
31b8006e
SW
2465 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2466 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2467 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2468 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2469 return -EBADMSG;
2470 }
2471
33d07337 2472 if (need_sign && con->ops->check_message_signature &&
79dbd1ba 2473 con->ops->check_message_signature(m)) {
33d07337
YZ
2474 pr_err("read_partial_message %p signature check failed\n", m);
2475 return -EBADMSG;
2476 }
2477
31b8006e
SW
2478 return 1; /* done! */
2479}
2480
2481/*
2482 * Process message. This happens in the worker thread. The callback should
2483 * be careful not to do anything that waits on other incoming messages or it
2484 * may deadlock.
2485 */
2486static void process_message(struct ceph_connection *con)
2487{
583d0fef 2488 struct ceph_msg *msg = con->in_msg;
31b8006e 2489
38941f80 2490 BUG_ON(con->in_msg->con != con);
31b8006e
SW
2491 con->in_msg = NULL;
2492
2493 /* if first message, set peer_name */
2494 if (con->peer_name.type == 0)
dbad185d 2495 con->peer_name = msg->hdr.src;
31b8006e 2496
31b8006e 2497 con->in_seq++;
ec302645 2498 mutex_unlock(&con->mutex);
31b8006e
SW
2499
2500 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2501 msg, le64_to_cpu(msg->hdr.seq),
dbad185d 2502 ENTITY_NAME(msg->hdr.src),
31b8006e
SW
2503 le16_to_cpu(msg->hdr.type),
2504 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2505 le32_to_cpu(msg->hdr.front_len),
2506 le32_to_cpu(msg->hdr.data_len),
2507 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2508 con->ops->dispatch(con, msg);
ec302645
SW
2509
2510 mutex_lock(&con->mutex);
31b8006e
SW
2511}
2512
8b9558aa
YZ
2513static int read_keepalive_ack(struct ceph_connection *con)
2514{
2515 struct ceph_timespec ceph_ts;
2516 size_t size = sizeof(ceph_ts);
2517 int ret = read_partial(con, size, size, &ceph_ts);
2518 if (ret <= 0)
2519 return ret;
2520 ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts);
2521 prepare_read_tag(con);
2522 return 1;
2523}
31b8006e
SW
2524
2525/*
2526 * Write something to the socket. Called in a worker thread when the
2527 * socket appears to be writeable and we have something ready to send.
2528 */
2529static int try_write(struct ceph_connection *con)
2530{
31b8006e
SW
2531 int ret = 1;
2532
d59315ca 2533 dout("try_write start %p state %lu\n", con, con->state);
b252bc18
ID
2534 if (con->state != CON_STATE_PREOPEN &&
2535 con->state != CON_STATE_CONNECTING &&
2536 con->state != CON_STATE_NEGOTIATING &&
2537 con->state != CON_STATE_OPEN)
2538 return 0;
31b8006e 2539
31b8006e
SW
2540more:
2541 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2542
2543 /* open the socket first? */
8dacc7da
SW
2544 if (con->state == CON_STATE_PREOPEN) {
2545 BUG_ON(con->sock);
2546 con->state = CON_STATE_CONNECTING;
a5988c49 2547
e2200423 2548 con_out_kvec_reset(con);
e825a66d 2549 prepare_write_banner(con);
eed0ef2c 2550 prepare_read_banner(con);
31b8006e 2551
cf3e5c40 2552 BUG_ON(con->in_msg);
31b8006e
SW
2553 con->in_tag = CEPH_MSGR_TAG_READY;
2554 dout("try_write initiating connect on %p new state %lu\n",
2555 con, con->state);
41617d0c
AE
2556 ret = ceph_tcp_connect(con);
2557 if (ret < 0) {
31b8006e 2558 con->error_msg = "connect error";
31b8006e
SW
2559 goto out;
2560 }
2561 }
2562
2563more_kvec:
b252bc18
ID
2564 BUG_ON(!con->sock);
2565
31b8006e 2566 /* kvec data queued? */
67645d76
ID
2567 if (con->out_kvec_left) {
2568 ret = write_partial_kvec(con);
31b8006e 2569 if (ret <= 0)
42961d23 2570 goto out;
31b8006e 2571 }
67645d76
ID
2572 if (con->out_skip) {
2573 ret = write_partial_skip(con);
31b8006e 2574 if (ret <= 0)
42961d23 2575 goto out;
31b8006e
SW
2576 }
2577
2578 /* msg pages? */
2579 if (con->out_msg) {
c86a2930
SW
2580 if (con->out_msg_done) {
2581 ceph_msg_put(con->out_msg);
2582 con->out_msg = NULL; /* we're done with this one */
2583 goto do_next;
2584 }
2585
34d2d200 2586 ret = write_partial_message_data(con);
31b8006e
SW
2587 if (ret == 1)
2588 goto more_kvec; /* we need to send the footer, too! */
2589 if (ret == 0)
42961d23 2590 goto out;
31b8006e 2591 if (ret < 0) {
34d2d200 2592 dout("try_write write_partial_message_data err %d\n",
31b8006e 2593 ret);
42961d23 2594 goto out;
31b8006e
SW
2595 }
2596 }
2597
c86a2930 2598do_next:
8dacc7da 2599 if (con->state == CON_STATE_OPEN) {
8b9558aa
YZ
2600 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2601 prepare_write_keepalive(con);
2602 goto more;
2603 }
31b8006e
SW
2604 /* is anything else pending? */
2605 if (!list_empty(&con->out_queue)) {
2606 prepare_write_message(con);
2607 goto more;
2608 }
2609 if (con->in_seq > con->in_seq_acked) {
2610 prepare_write_ack(con);
2611 goto more;
2612 }
31b8006e
SW
2613 }
2614
2615 /* Nothing to do! */
c9ffc77a 2616 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
31b8006e 2617 dout("try_write nothing else to write.\n");
31b8006e
SW
2618 ret = 0;
2619out:
42961d23 2620 dout("try_write done on %p ret %d\n", con, ret);
31b8006e
SW
2621 return ret;
2622}
2623
2624
2625
2626/*
2627 * Read what we can from the socket.
2628 */
2629static int try_read(struct ceph_connection *con)
2630{
31b8006e
SW
2631 int ret = -1;
2632
8dacc7da
SW
2633more:
2634 dout("try_read start on %p state %lu\n", con, con->state);
2635 if (con->state != CON_STATE_CONNECTING &&
2636 con->state != CON_STATE_NEGOTIATING &&
2637 con->state != CON_STATE_OPEN)
31b8006e
SW
2638 return 0;
2639
8dacc7da 2640 BUG_ON(!con->sock);
ec302645 2641
31b8006e
SW
2642 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2643 con->in_base_pos);
0da5d703 2644
8dacc7da 2645 if (con->state == CON_STATE_CONNECTING) {
7593af92
AE
2646 dout("try_read connecting\n");
2647 ret = read_partial_banner(con);
2648 if (ret <= 0)
ab166d5a 2649 goto out;
7593af92
AE
2650 ret = process_banner(con);
2651 if (ret < 0)
2652 goto out;
2653
8dacc7da 2654 con->state = CON_STATE_NEGOTIATING;
7593af92 2655
6d4221b5
JS
2656 /*
2657 * Received banner is good, exchange connection info.
2658 * Do not reset out_kvec, as sending our banner raced
2659 * with receiving peer banner after connect completed.
2660 */
7593af92
AE
2661 ret = prepare_write_connect(con);
2662 if (ret < 0)
2663 goto out;
2664 prepare_read_connect(con);
2665
2666 /* Send connection info before awaiting response */
0da5d703
SW
2667 goto out;
2668 }
2669
8dacc7da 2670 if (con->state == CON_STATE_NEGOTIATING) {
7593af92 2671 dout("try_read negotiating\n");
31b8006e
SW
2672 ret = read_partial_connect(con);
2673 if (ret <= 0)
31b8006e 2674 goto out;
98bdb0aa
SW
2675 ret = process_connect(con);
2676 if (ret < 0)
2677 goto out;
31b8006e
SW
2678 goto more;
2679 }
2680
122070a2 2681 WARN_ON(con->state != CON_STATE_OPEN);
8dacc7da 2682
31b8006e
SW
2683 if (con->in_base_pos < 0) {
2684 /*
2685 * skipping + discarding content.
2686 *
2687 * FIXME: there must be a better way to do this!
2688 */
84495f49
AE
2689 static char buf[SKIP_BUF_SIZE];
2690 int skip = min((int) sizeof (buf), -con->in_base_pos);
2691
31b8006e
SW
2692 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2693 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2694 if (ret <= 0)
98bdb0aa 2695 goto out;
31b8006e
SW
2696 con->in_base_pos += ret;
2697 if (con->in_base_pos)
2698 goto more;
2699 }
2700 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2701 /*
2702 * what's next?
2703 */
2704 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2705 if (ret <= 0)
98bdb0aa 2706 goto out;
31b8006e
SW
2707 dout("try_read got tag %d\n", (int)con->in_tag);
2708 switch (con->in_tag) {
2709 case CEPH_MSGR_TAG_MSG:
2710 prepare_read_message(con);
2711 break;
2712 case CEPH_MSGR_TAG_ACK:
2713 prepare_read_ack(con);
2714 break;
8b9558aa
YZ
2715 case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
2716 prepare_read_keepalive_ack(con);
2717 break;
31b8006e 2718 case CEPH_MSGR_TAG_CLOSE:
8dacc7da
SW
2719 con_close_socket(con);
2720 con->state = CON_STATE_CLOSED;
98bdb0aa 2721 goto out;
31b8006e
SW
2722 default:
2723 goto bad_tag;
2724 }
2725 }
2726 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2727 ret = read_partial_message(con);
2728 if (ret <= 0) {
2729 switch (ret) {
2730 case -EBADMSG:
a51983e4 2731 con->error_msg = "bad crc/signature";
67c64eb7
ID
2732 /* fall through */
2733 case -EBADE:
31b8006e 2734 ret = -EIO;
98bdb0aa 2735 break;
31b8006e
SW
2736 case -EIO:
2737 con->error_msg = "io error";
98bdb0aa 2738 break;
31b8006e 2739 }
98bdb0aa 2740 goto out;
31b8006e
SW
2741 }
2742 if (con->in_tag == CEPH_MSGR_TAG_READY)
2743 goto more;
2744 process_message(con);
7b862e07
SW
2745 if (con->state == CON_STATE_OPEN)
2746 prepare_read_tag(con);
31b8006e
SW
2747 goto more;
2748 }
3a23083b
SW
2749 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2750 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2751 /*
2752 * the final handshake seq exchange is semantically
2753 * equivalent to an ACK
2754 */
31b8006e
SW
2755 ret = read_partial_ack(con);
2756 if (ret <= 0)
98bdb0aa 2757 goto out;
31b8006e
SW
2758 process_ack(con);
2759 goto more;
2760 }
8b9558aa
YZ
2761 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
2762 ret = read_keepalive_ack(con);
2763 if (ret <= 0)
2764 goto out;
2765 goto more;
2766 }
31b8006e 2767
31b8006e 2768out:
98bdb0aa 2769 dout("try_read done on %p ret %d\n", con, ret);
31b8006e
SW
2770 return ret;
2771
2772bad_tag:
2773 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2774 con->error_msg = "protocol error, garbage tag";
2775 ret = -1;
2776 goto out;
2777}
2778
2779
2780/*
802c6d96
AE
2781 * Atomically queue work on a connection after the specified delay.
2782 * Bump @con reference to avoid races with connection teardown.
2783 * Returns 0 if work was queued, or an error code otherwise.
31b8006e 2784 */
802c6d96 2785static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
31b8006e 2786{
31b8006e 2787 if (!con->ops->get(con)) {
802c6d96 2788 dout("%s %p ref count 0\n", __func__, con);
802c6d96 2789 return -ENOENT;
31b8006e
SW
2790 }
2791
802c6d96
AE
2792 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2793 dout("%s %p - already queued\n", __func__, con);
31b8006e 2794 con->ops->put(con);
802c6d96 2795 return -EBUSY;
31b8006e 2796 }
802c6d96
AE
2797
2798 dout("%s %p %lu\n", __func__, con, delay);
802c6d96
AE
2799 return 0;
2800}
2801
2802static void queue_con(struct ceph_connection *con)
2803{
2804 (void) queue_con_delay(con, 0);
31b8006e
SW
2805}
2806
37ab77ac
ID
2807static void cancel_con(struct ceph_connection *con)
2808{
2809 if (cancel_delayed_work(&con->work)) {
2810 dout("%s %p\n", __func__, con);
2811 con->ops->put(con);
2812 }
2813}
2814
7bb21d68
AE
2815static bool con_sock_closed(struct ceph_connection *con)
2816{
c9ffc77a 2817 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
7bb21d68
AE
2818 return false;
2819
2820#define CASE(x) \
2821 case CON_STATE_ ## x: \
2822 con->error_msg = "socket closed (con state " #x ")"; \
2823 break;
2824
2825 switch (con->state) {
2826 CASE(CLOSED);
2827 CASE(PREOPEN);
2828 CASE(CONNECTING);
2829 CASE(NEGOTIATING);
2830 CASE(OPEN);
2831 CASE(STANDBY);
2832 default:
b9a67899 2833 pr_warn("%s con %p unrecognized state %lu\n",
7bb21d68
AE
2834 __func__, con, con->state);
2835 con->error_msg = "unrecognized con state";
2836 BUG();
2837 break;
2838 }
2839#undef CASE
2840
2841 return true;
2842}
2843
f20a39fd
AE
2844static bool con_backoff(struct ceph_connection *con)
2845{
2846 int ret;
2847
2848 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2849 return false;
2850
2851 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2852 if (ret) {
2853 dout("%s: con %p FAILED to back off %lu\n", __func__,
2854 con, con->delay);
2855 BUG_ON(ret == -ENOENT);
2856 con_flag_set(con, CON_FLAG_BACKOFF);
2857 }
2858
2859 return true;
2860}
2861
93209264
AE
2862/* Finish fault handling; con->mutex must *not* be held here */
2863
2864static void con_fault_finish(struct ceph_connection *con)
2865{
f6330cc1
ID
2866 dout("%s %p\n", __func__, con);
2867
93209264
AE
2868 /*
2869 * in case we faulted due to authentication, invalidate our
2870 * current tickets so that we can get new ones.
2871 */
f6330cc1
ID
2872 if (con->auth_retry) {
2873 dout("auth_retry %d, invalidating\n", con->auth_retry);
2874 if (con->ops->invalidate_authorizer)
2875 con->ops->invalidate_authorizer(con);
2876 con->auth_retry = 0;
93209264
AE
2877 }
2878
2879 if (con->ops->fault)
2880 con->ops->fault(con);
2881}
2882
31b8006e
SW
2883/*
2884 * Do some work on a connection. Drop a connection ref when we're done.
2885 */
68931622 2886static void ceph_con_workfn(struct work_struct *work)
31b8006e
SW
2887{
2888 struct ceph_connection *con = container_of(work, struct ceph_connection,
2889 work.work);
49659416 2890 bool fault;
31b8006e 2891
9dd4658d 2892 mutex_lock(&con->mutex);
49659416
AE
2893 while (true) {
2894 int ret;
31b8006e 2895
49659416
AE
2896 if ((fault = con_sock_closed(con))) {
2897 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2898 break;
2899 }
2900 if (con_backoff(con)) {
2901 dout("%s: con %p BACKOFF\n", __func__, con);
2902 break;
2903 }
2904 if (con->state == CON_STATE_STANDBY) {
2905 dout("%s: con %p STANDBY\n", __func__, con);
2906 break;
2907 }
2908 if (con->state == CON_STATE_CLOSED) {
2909 dout("%s: con %p CLOSED\n", __func__, con);
2910 BUG_ON(con->sock);
2911 break;
2912 }
2913 if (con->state == CON_STATE_PREOPEN) {
2914 dout("%s: con %p PREOPEN\n", __func__, con);
2915 BUG_ON(con->sock);
2916 }
0da5d703 2917
49659416
AE
2918 ret = try_read(con);
2919 if (ret < 0) {
2920 if (ret == -EAGAIN)
2921 continue;
67c64eb7
ID
2922 if (!con->error_msg)
2923 con->error_msg = "socket error on read";
49659416
AE
2924 fault = true;
2925 break;
2926 }
2927
2928 ret = try_write(con);
2929 if (ret < 0) {
2930 if (ret == -EAGAIN)
2931 continue;
67c64eb7
ID
2932 if (!con->error_msg)
2933 con->error_msg = "socket error on write";
49659416
AE
2934 fault = true;
2935 }
2936
2937 break; /* If we make it to here, we're done */
3a140a0d 2938 }
b6e7b6a1
AE
2939 if (fault)
2940 con_fault(con);
9dd4658d 2941 mutex_unlock(&con->mutex);
0da5d703 2942
b6e7b6a1
AE
2943 if (fault)
2944 con_fault_finish(con);
2945
2946 con->ops->put(con);
31b8006e
SW
2947}
2948
31b8006e
SW
2949/*
2950 * Generic error/fault handler. A retry mechanism is used with
2951 * exponential backoff
2952 */
93209264 2953static void con_fault(struct ceph_connection *con)
31b8006e 2954{
31b8006e 2955 dout("fault %p state %lu to peer %s\n",
3d14c5d2 2956 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
31b8006e 2957
67c64eb7
ID
2958 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2959 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2960 con->error_msg = NULL;
2961
122070a2 2962 WARN_ON(con->state != CON_STATE_CONNECTING &&
8dacc7da
SW
2963 con->state != CON_STATE_NEGOTIATING &&
2964 con->state != CON_STATE_OPEN);
ec302645 2965
31b8006e 2966 con_close_socket(con);
5e095e8b 2967
c9ffc77a 2968 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
8dacc7da
SW
2969 dout("fault on LOSSYTX channel, marking CLOSED\n");
2970 con->state = CON_STATE_CLOSED;
93209264 2971 return;
3b5ede07
SW
2972 }
2973
5e095e8b 2974 if (con->in_msg) {
38941f80 2975 BUG_ON(con->in_msg->con != con);
5e095e8b
SW
2976 ceph_msg_put(con->in_msg);
2977 con->in_msg = NULL;
2978 }
31b8006e 2979
e80a52d1
SW
2980 /* Requeue anything that hasn't been acked */
2981 list_splice_init(&con->out_sent, &con->out_queue);
9bd2e6f8 2982
e76661d0
SW
2983 /* If there are no messages queued or keepalive pending, place
2984 * the connection in a STANDBY state */
2985 if (list_empty(&con->out_queue) &&
c9ffc77a 2986 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
e00de341 2987 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
c9ffc77a 2988 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
8dacc7da 2989 con->state = CON_STATE_STANDBY;
e80a52d1
SW
2990 } else {
2991 /* retry after a delay. */
8dacc7da 2992 con->state = CON_STATE_PREOPEN;
e80a52d1
SW
2993 if (con->delay == 0)
2994 con->delay = BASE_DELAY_INTERVAL;
2995 else if (con->delay < MAX_DELAY_INTERVAL)
2996 con->delay *= 2;
c9ffc77a 2997 con_flag_set(con, CON_FLAG_BACKOFF);
8618e30b 2998 queue_con(con);
31b8006e 2999 }
31b8006e
SW
3000}
3001
3002
3003
3004/*
15d9882c 3005 * initialize a new messenger instance
31b8006e 3006 */
15d9882c 3007void ceph_messenger_init(struct ceph_messenger *msgr,
859bff51 3008 struct ceph_entity_addr *myaddr)
31b8006e 3009{
31b8006e
SW
3010 spin_lock_init(&msgr->global_seq_lock);
3011
31b8006e
SW
3012 if (myaddr)
3013 msgr->inst.addr = *myaddr;
3014
3015 /* select a random nonce */
ac8839d7 3016 msgr->inst.addr.type = 0;
103e2d3a 3017 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
63f2d211 3018 encode_my_addr(msgr);
31b8006e 3019
a2a32584 3020 atomic_set(&msgr->stopping, 0);
757856d2 3021 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
31b8006e 3022
15d9882c 3023 dout("%s %p\n", __func__, msgr);
31b8006e 3024}
15d9882c 3025EXPORT_SYMBOL(ceph_messenger_init);
31b8006e 3026
757856d2
ID
3027void ceph_messenger_fini(struct ceph_messenger *msgr)
3028{
3029 put_net(read_pnet(&msgr->net));
3030}
3031EXPORT_SYMBOL(ceph_messenger_fini);
3032
583d0fef
ID
3033static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
3034{
3035 if (msg->con)
3036 msg->con->ops->put(msg->con);
3037
3038 msg->con = con ? con->ops->get(con) : NULL;
3039 BUG_ON(msg->con != con);
3040}
3041
e00de341
SW
3042static void clear_standby(struct ceph_connection *con)
3043{
3044 /* come back from STANDBY? */
8dacc7da 3045 if (con->state == CON_STATE_STANDBY) {
e00de341 3046 dout("clear_standby %p and ++connect_seq\n", con);
8dacc7da 3047 con->state = CON_STATE_PREOPEN;
e00de341 3048 con->connect_seq++;
c9ffc77a
AE
3049 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
3050 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
e00de341
SW
3051 }
3052}
3053
31b8006e
SW
3054/*
3055 * Queue up an outgoing message on the given connection.
3056 */
3057void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
3058{
31b8006e 3059 /* set src+dst */
dbad185d 3060 msg->hdr.src = con->msgr->inst.name;
3ca02ef9 3061 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
e84346b7
SW
3062 msg->needs_out_seq = true;
3063
ec302645 3064 mutex_lock(&con->mutex);
92ce034b 3065
8dacc7da 3066 if (con->state == CON_STATE_CLOSED) {
a59b55a6
SW
3067 dout("con_send %p closed, dropping %p\n", con, msg);
3068 ceph_msg_put(msg);
3069 mutex_unlock(&con->mutex);
3070 return;
3071 }
3072
583d0fef 3073 msg_con_set(msg, con);
92ce034b 3074
31b8006e
SW
3075 BUG_ON(!list_empty(&msg->list_head));
3076 list_add_tail(&msg->list_head, &con->out_queue);
3077 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
3078 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
3079 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
3080 le32_to_cpu(msg->hdr.front_len),
3081 le32_to_cpu(msg->hdr.middle_len),
3082 le32_to_cpu(msg->hdr.data_len));
00650931
SW
3083
3084 clear_standby(con);
ec302645 3085 mutex_unlock(&con->mutex);
31b8006e
SW
3086
3087 /* if there wasn't anything waiting to send before, queue
3088 * new work */
c9ffc77a 3089 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
31b8006e
SW
3090 queue_con(con);
3091}
3d14c5d2 3092EXPORT_SYMBOL(ceph_con_send);
31b8006e
SW
3093
3094/*
3095 * Revoke a message that was previously queued for send
3096 */
6740a845 3097void ceph_msg_revoke(struct ceph_msg *msg)
31b8006e 3098{
6740a845
AE
3099 struct ceph_connection *con = msg->con;
3100
583d0fef
ID
3101 if (!con) {
3102 dout("%s msg %p null con\n", __func__, msg);
6740a845 3103 return; /* Message not in our possession */
583d0fef 3104 }
6740a845 3105
ec302645 3106 mutex_lock(&con->mutex);
31b8006e 3107 if (!list_empty(&msg->list_head)) {
38941f80 3108 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
31b8006e 3109 list_del_init(&msg->list_head);
31b8006e 3110 msg->hdr.seq = 0;
38941f80 3111
31b8006e 3112 ceph_msg_put(msg);
ed98adad
SW
3113 }
3114 if (con->out_msg == msg) {
67645d76
ID
3115 BUG_ON(con->out_skip);
3116 /* footer */
3117 if (con->out_msg_done) {
3118 con->out_skip += con_out_kvec_skip(con);
3119 } else {
3120 BUG_ON(!msg->data_length);
89f08173 3121 con->out_skip += sizeof_footer(con);
31b8006e 3122 }
67645d76
ID
3123 /* data, middle, front */
3124 if (msg->data_length)
3125 con->out_skip += msg->cursor.total_resid;
3126 if (msg->middle)
3127 con->out_skip += con_out_kvec_skip(con);
3128 con->out_skip += con_out_kvec_skip(con);
3129
3130 dout("%s %p msg %p - was sending, will write %d skip %d\n",
3131 __func__, con, msg, con->out_kvec_bytes, con->out_skip);
ed98adad 3132 msg->hdr.seq = 0;
67645d76 3133 con->out_msg = NULL;
92ce034b 3134 ceph_msg_put(msg);
31b8006e 3135 }
67645d76 3136
ec302645 3137 mutex_unlock(&con->mutex);
31b8006e
SW
3138}
3139
350b1c32 3140/*
0d59ab81 3141 * Revoke a message that we may be reading data into
350b1c32 3142 */
8921d114 3143void ceph_msg_revoke_incoming(struct ceph_msg *msg)
350b1c32 3144{
583d0fef 3145 struct ceph_connection *con = msg->con;
8921d114 3146
583d0fef 3147 if (!con) {
8921d114 3148 dout("%s msg %p null con\n", __func__, msg);
8921d114
AE
3149 return; /* Message not in our possession */
3150 }
3151
350b1c32 3152 mutex_lock(&con->mutex);
8921d114 3153 if (con->in_msg == msg) {
95c96174
ED
3154 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3155 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3156 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
350b1c32
SW
3157
3158 /* skip rest of message */
8921d114
AE
3159 dout("%s %p msg %p revoked\n", __func__, con, msg);
3160 con->in_base_pos = con->in_base_pos -
350b1c32 3161 sizeof(struct ceph_msg_header) -
0d59ab81
YS
3162 front_len -
3163 middle_len -
3164 data_len -
350b1c32 3165 sizeof(struct ceph_msg_footer);
350b1c32
SW
3166 ceph_msg_put(con->in_msg);
3167 con->in_msg = NULL;
3168 con->in_tag = CEPH_MSGR_TAG_READY;
684be25c 3169 con->in_seq++;
350b1c32 3170 } else {
8921d114
AE
3171 dout("%s %p in_msg %p msg %p no-op\n",
3172 __func__, con, con->in_msg, msg);
350b1c32
SW
3173 }
3174 mutex_unlock(&con->mutex);
3175}
3176
31b8006e
SW
3177/*
3178 * Queue a keepalive byte to ensure the tcp connection is alive.
3179 */
3180void ceph_con_keepalive(struct ceph_connection *con)
3181{
e00de341 3182 dout("con_keepalive %p\n", con);
00650931 3183 mutex_lock(&con->mutex);
e00de341 3184 clear_standby(con);
00650931 3185 mutex_unlock(&con->mutex);
c9ffc77a
AE
3186 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
3187 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
31b8006e
SW
3188 queue_con(con);
3189}
3d14c5d2 3190EXPORT_SYMBOL(ceph_con_keepalive);
31b8006e 3191
8b9558aa
YZ
3192bool ceph_con_keepalive_expired(struct ceph_connection *con,
3193 unsigned long interval)
3194{
3195 if (interval > 0 &&
3196 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
1134e091 3197 struct timespec now;
8b9558aa 3198 struct timespec ts;
1134e091 3199 ktime_get_real_ts(&now);
8b9558aa
YZ
3200 jiffies_to_timespec(interval, &ts);
3201 ts = timespec_add(con->last_keepalive_ack, ts);
3202 return timespec_compare(&now, &ts) >= 0;
3203 }
3204 return false;
3205}
3206
6644ed7b 3207static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
43794509 3208{
6644ed7b
AE
3209 struct ceph_msg_data *data;
3210
3211 if (WARN_ON(!ceph_msg_data_type_valid(type)))
3212 return NULL;
3213
81b36be4 3214 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
7c40b22f
DC
3215 if (!data)
3216 return NULL;
3217
3218 data->type = type;
5240d9f9 3219 INIT_LIST_HEAD(&data->links);
6644ed7b
AE
3220
3221 return data;
3222}
3223
3224static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3225{
3226 if (!data)
3227 return;
3228
5240d9f9 3229 WARN_ON(!list_empty(&data->links));
e4339d28 3230 if (data->type == CEPH_MSG_DATA_PAGELIST)
6644ed7b 3231 ceph_pagelist_release(data->pagelist);
81b36be4 3232 kmem_cache_free(ceph_msg_data_cache, data);
43794509
AE
3233}
3234
90af3602 3235void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
f1baeb2b 3236 size_t length, size_t alignment)
02afca6c 3237{
6644ed7b
AE
3238 struct ceph_msg_data *data;
3239
07aa1558
AE
3240 BUG_ON(!pages);
3241 BUG_ON(!length);
6644ed7b
AE
3242
3243 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
3244 BUG_ON(!data);
3245 data->pages = pages;
3246 data->length = length;
3247 data->alignment = alignment & ~PAGE_MASK;
02afca6c 3248
5240d9f9
AE
3249 list_add_tail(&data->links, &msg->data);
3250 msg->data_length += length;
02afca6c 3251}
90af3602 3252EXPORT_SYMBOL(ceph_msg_data_add_pages);
31b8006e 3253
90af3602 3254void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
27fa8385
AE
3255 struct ceph_pagelist *pagelist)
3256{
6644ed7b
AE
3257 struct ceph_msg_data *data;
3258
07aa1558
AE
3259 BUG_ON(!pagelist);
3260 BUG_ON(!pagelist->length);
27fa8385 3261
6644ed7b
AE
3262 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
3263 BUG_ON(!data);
3264 data->pagelist = pagelist;
3265
5240d9f9
AE
3266 list_add_tail(&data->links, &msg->data);
3267 msg->data_length += pagelist->length;
27fa8385 3268}
90af3602 3269EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
27fa8385 3270
ea96571f 3271#ifdef CONFIG_BLOCK
90af3602 3272void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
a1930804 3273 size_t length)
27fa8385 3274{
6644ed7b
AE
3275 struct ceph_msg_data *data;
3276
07aa1558 3277 BUG_ON(!bio);
27fa8385 3278
6644ed7b
AE
3279 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
3280 BUG_ON(!data);
3281 data->bio = bio;
c851c495 3282 data->bio_length = length;
6644ed7b 3283
5240d9f9
AE
3284 list_add_tail(&data->links, &msg->data);
3285 msg->data_length += length;
27fa8385 3286}
90af3602 3287EXPORT_SYMBOL(ceph_msg_data_add_bio);
ea96571f 3288#endif /* CONFIG_BLOCK */
27fa8385 3289
31b8006e
SW
3290/*
3291 * construct a new message with given type, size
3292 * the new msg has a ref count of 1.
3293 */
b61c2763
SW
3294struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3295 bool can_fail)
31b8006e
SW
3296{
3297 struct ceph_msg *m;
3298
e3d5d638 3299 m = kmem_cache_zalloc(ceph_msg_cache, flags);
31b8006e
SW
3300 if (m == NULL)
3301 goto out;
31b8006e
SW
3302
3303 m->hdr.type = cpu_to_le16(type);
45c6ceb5 3304 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
31b8006e 3305 m->hdr.front_len = cpu_to_le32(front_len);
ca20892d 3306
9516e45b
AE
3307 INIT_LIST_HEAD(&m->list_head);
3308 kref_init(&m->kref);
5240d9f9 3309 INIT_LIST_HEAD(&m->data);
ca20892d 3310
31b8006e
SW
3311 /* front */
3312 if (front_len) {
eeb0bed5 3313 m->front.iov_base = ceph_kvmalloc(front_len, flags);
31b8006e 3314 if (m->front.iov_base == NULL) {
b61c2763 3315 dout("ceph_msg_new can't allocate %d bytes\n",
31b8006e
SW
3316 front_len);
3317 goto out2;
3318 }
3319 } else {
3320 m->front.iov_base = NULL;
3321 }
f2be82b0 3322 m->front_alloc_len = m->front.iov_len = front_len;
31b8006e 3323
bb257664 3324 dout("ceph_msg_new %p front %d\n", m, front_len);
31b8006e
SW
3325 return m;
3326
3327out2:
3328 ceph_msg_put(m);
3329out:
b61c2763
SW
3330 if (!can_fail) {
3331 pr_err("msg_new can't create type %d front %d\n", type,
3332 front_len);
f0ed1b7c 3333 WARN_ON(1);
b61c2763
SW
3334 } else {
3335 dout("msg_new can't create type %d front %d\n", type,
3336 front_len);
3337 }
a79832f2 3338 return NULL;
31b8006e 3339}
3d14c5d2 3340EXPORT_SYMBOL(ceph_msg_new);
31b8006e 3341
31b8006e
SW
3342/*
3343 * Allocate "middle" portion of a message, if it is needed and wasn't
3344 * allocated by alloc_msg. This allows us to read a small fixed-size
3345 * per-type header in the front and then gracefully fail (i.e.,
3346 * propagate the error to the caller based on info in the front) when
3347 * the middle is too large.
3348 */
2450418c 3349static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
31b8006e
SW
3350{
3351 int type = le16_to_cpu(msg->hdr.type);
3352 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3353
3354 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3355 ceph_msg_type_name(type), middle_len);
3356 BUG_ON(!middle_len);
3357 BUG_ON(msg->middle);
3358
b6c1d5b8 3359 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
31b8006e
SW
3360 if (!msg->middle)
3361 return -ENOMEM;
3362 return 0;
3363}
3364
2450418c 3365/*
1c20f2d2
AE
3366 * Allocate a message for receiving an incoming message on a
3367 * connection, and save the result in con->in_msg. Uses the
3368 * connection's private alloc_msg op if available.
3369 *
4740a623
SW
3370 * Returns 0 on success, or a negative error code.
3371 *
3372 * On success, if we set *skip = 1:
3373 * - the next message should be skipped and ignored.
3374 * - con->in_msg == NULL
3375 * or if we set *skip = 0:
3376 * - con->in_msg is non-null.
3377 * On error (ENOMEM, EAGAIN, ...),
3378 * - con->in_msg == NULL
2450418c 3379 */
4740a623 3380static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
2450418c 3381{
4740a623 3382 struct ceph_msg_header *hdr = &con->in_hdr;
2450418c 3383 int middle_len = le32_to_cpu(hdr->middle_len);
1d866d1c 3384 struct ceph_msg *msg;
4740a623 3385 int ret = 0;
2450418c 3386
1c20f2d2 3387 BUG_ON(con->in_msg != NULL);
53ded495 3388 BUG_ON(!con->ops->alloc_msg);
2450418c 3389
53ded495
AE
3390 mutex_unlock(&con->mutex);
3391 msg = con->ops->alloc_msg(con, hdr, skip);
3392 mutex_lock(&con->mutex);
3393 if (con->state != CON_STATE_OPEN) {
3394 if (msg)
1d866d1c 3395 ceph_msg_put(msg);
53ded495
AE
3396 return -EAGAIN;
3397 }
4137577a
AE
3398 if (msg) {
3399 BUG_ON(*skip);
583d0fef 3400 msg_con_set(msg, con);
4137577a 3401 con->in_msg = msg;
4137577a
AE
3402 } else {
3403 /*
3404 * Null message pointer means either we should skip
3405 * this message or we couldn't allocate memory. The
3406 * former is not an error.
3407 */
3408 if (*skip)
3409 return 0;
4137577a 3410
67c64eb7 3411 con->error_msg = "error allocating memory for incoming message";
53ded495 3412 return -ENOMEM;
2450418c 3413 }
1c20f2d2 3414 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2450418c 3415
1c20f2d2
AE
3416 if (middle_len && !con->in_msg->middle) {
3417 ret = ceph_alloc_middle(con, con->in_msg);
2450418c 3418 if (ret < 0) {
1c20f2d2
AE
3419 ceph_msg_put(con->in_msg);
3420 con->in_msg = NULL;
2450418c
YS
3421 }
3422 }
9d7f0f13 3423
4740a623 3424 return ret;
2450418c
YS
3425}
3426
31b8006e
SW
3427
3428/*
3429 * Free a generically kmalloc'd message.
3430 */
0215e44b 3431static void ceph_msg_free(struct ceph_msg *m)
31b8006e 3432{
0215e44b 3433 dout("%s %p\n", __func__, m);
4965fc38 3434 kvfree(m->front.iov_base);
e3d5d638 3435 kmem_cache_free(ceph_msg_cache, m);
31b8006e
SW
3436}
3437
0215e44b 3438static void ceph_msg_release(struct kref *kref)
c2e552e7
SW
3439{
3440 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
10bcee14 3441 struct ceph_msg_data *data, *next;
31b8006e 3442
0215e44b 3443 dout("%s %p\n", __func__, m);
c2e552e7
SW
3444 WARN_ON(!list_empty(&m->list_head));
3445
583d0fef
ID
3446 msg_con_set(m, NULL);
3447
c2e552e7
SW
3448 /* drop middle, data, if any */
3449 if (m->middle) {
3450 ceph_buffer_put(m->middle);
3451 m->middle = NULL;
31b8006e 3452 }
5240d9f9 3453
10bcee14
GT
3454 list_for_each_entry_safe(data, next, &m->data, links) {
3455 list_del_init(&data->links);
5240d9f9
AE
3456 ceph_msg_data_destroy(data);
3457 }
a1930804 3458 m->data_length = 0;
58bb3b37 3459
c2e552e7
SW
3460 if (m->pool)
3461 ceph_msgpool_put(m->pool, m);
3462 else
0215e44b
ID
3463 ceph_msg_free(m);
3464}
3465
3466struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
3467{
3468 dout("%s %p (was %d)\n", __func__, msg,
2c935bc5 3469 kref_read(&msg->kref));
0215e44b
ID
3470 kref_get(&msg->kref);
3471 return msg;
3472}
3473EXPORT_SYMBOL(ceph_msg_get);
3474
3475void ceph_msg_put(struct ceph_msg *msg)
3476{
3477 dout("%s %p (was %d)\n", __func__, msg,
2c935bc5 3478 kref_read(&msg->kref));
0215e44b 3479 kref_put(&msg->kref, ceph_msg_release);
31b8006e 3480}
0215e44b 3481EXPORT_SYMBOL(ceph_msg_put);
9ec7cab1
SW
3482
3483void ceph_msg_dump(struct ceph_msg *msg)
3484{
3cea4c30
ID
3485 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3486 msg->front_alloc_len, msg->data_length);
9ec7cab1
SW
3487 print_hex_dump(KERN_DEBUG, "header: ",
3488 DUMP_PREFIX_OFFSET, 16, 1,
3489 &msg->hdr, sizeof(msg->hdr), true);
3490 print_hex_dump(KERN_DEBUG, " front: ",
3491 DUMP_PREFIX_OFFSET, 16, 1,
3492 msg->front.iov_base, msg->front.iov_len, true);
3493 if (msg->middle)
3494 print_hex_dump(KERN_DEBUG, "middle: ",
3495 DUMP_PREFIX_OFFSET, 16, 1,
3496 msg->middle->vec.iov_base,
3497 msg->middle->vec.iov_len, true);
3498 print_hex_dump(KERN_DEBUG, "footer: ",
3499 DUMP_PREFIX_OFFSET, 16, 1,
3500 &msg->footer, sizeof(msg->footer), true);
3501}
3d14c5d2 3502EXPORT_SYMBOL(ceph_msg_dump);