]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/sunrpc/xprtsock.c
Merge tag 'nfs-for-5.15-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
[mirror_ubuntu-jammy-kernel.git] / net / sunrpc / xprtsock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/net/sunrpc/xprtsock.c
4 *
5 * Client-side transport implementation for sockets.
6 *
7 * TCP callback races fixes (C) 1998 Red Hat
8 * TCP send fixes (C) 1998 Red Hat
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 *
12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
13 * Fix behaviour when socket buffer is full.
14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 *
16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 *
18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
19 * <gilles.quillard@bull.net>
20 */
21
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/pagemap.h>
28 #include <linux/errno.h>
29 #include <linux/socket.h>
30 #include <linux/in.h>
31 #include <linux/net.h>
32 #include <linux/mm.h>
33 #include <linux/un.h>
34 #include <linux/udp.h>
35 #include <linux/tcp.h>
36 #include <linux/sunrpc/clnt.h>
37 #include <linux/sunrpc/addr.h>
38 #include <linux/sunrpc/sched.h>
39 #include <linux/sunrpc/svcsock.h>
40 #include <linux/sunrpc/xprtsock.h>
41 #include <linux/file.h>
42 #ifdef CONFIG_SUNRPC_BACKCHANNEL
43 #include <linux/sunrpc/bc_xprt.h>
44 #endif
45
46 #include <net/sock.h>
47 #include <net/checksum.h>
48 #include <net/udp.h>
49 #include <net/tcp.h>
50 #include <linux/bvec.h>
51 #include <linux/highmem.h>
52 #include <linux/uio.h>
53 #include <linux/sched/mm.h>
54
55 #include <trace/events/sunrpc.h>
56
57 #include "socklib.h"
58 #include "sunrpc.h"
59
60 static void xs_close(struct rpc_xprt *xprt);
61 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
62 struct socket *sock);
63
64 /*
65 * xprtsock tunables
66 */
67 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
68 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
69 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
70
71 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
72 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
73
74 #define XS_TCP_LINGER_TO (15U * HZ)
75 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
76
77 /*
78 * We can register our own files under /proc/sys/sunrpc by
79 * calling register_sysctl_table() again. The files in that
80 * directory become the union of all files registered there.
81 *
82 * We simply need to make sure that we don't collide with
83 * someone else's file names!
84 */
85
86 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
87 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
88 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
89 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
90 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
91
92 static struct ctl_table_header *sunrpc_table_header;
93
94 static struct xprt_class xs_local_transport;
95 static struct xprt_class xs_udp_transport;
96 static struct xprt_class xs_tcp_transport;
97 static struct xprt_class xs_bc_tcp_transport;
98
99 /*
100 * FIXME: changing the UDP slot table size should also resize the UDP
101 * socket buffers for existing UDP transports
102 */
103 static struct ctl_table xs_tunables_table[] = {
104 {
105 .procname = "udp_slot_table_entries",
106 .data = &xprt_udp_slot_table_entries,
107 .maxlen = sizeof(unsigned int),
108 .mode = 0644,
109 .proc_handler = proc_dointvec_minmax,
110 .extra1 = &min_slot_table_size,
111 .extra2 = &max_slot_table_size
112 },
113 {
114 .procname = "tcp_slot_table_entries",
115 .data = &xprt_tcp_slot_table_entries,
116 .maxlen = sizeof(unsigned int),
117 .mode = 0644,
118 .proc_handler = proc_dointvec_minmax,
119 .extra1 = &min_slot_table_size,
120 .extra2 = &max_slot_table_size
121 },
122 {
123 .procname = "tcp_max_slot_table_entries",
124 .data = &xprt_max_tcp_slot_table_entries,
125 .maxlen = sizeof(unsigned int),
126 .mode = 0644,
127 .proc_handler = proc_dointvec_minmax,
128 .extra1 = &min_slot_table_size,
129 .extra2 = &max_tcp_slot_table_limit
130 },
131 {
132 .procname = "min_resvport",
133 .data = &xprt_min_resvport,
134 .maxlen = sizeof(unsigned int),
135 .mode = 0644,
136 .proc_handler = proc_dointvec_minmax,
137 .extra1 = &xprt_min_resvport_limit,
138 .extra2 = &xprt_max_resvport_limit
139 },
140 {
141 .procname = "max_resvport",
142 .data = &xprt_max_resvport,
143 .maxlen = sizeof(unsigned int),
144 .mode = 0644,
145 .proc_handler = proc_dointvec_minmax,
146 .extra1 = &xprt_min_resvport_limit,
147 .extra2 = &xprt_max_resvport_limit
148 },
149 {
150 .procname = "tcp_fin_timeout",
151 .data = &xs_tcp_fin_timeout,
152 .maxlen = sizeof(xs_tcp_fin_timeout),
153 .mode = 0644,
154 .proc_handler = proc_dointvec_jiffies,
155 },
156 { },
157 };
158
159 static struct ctl_table sunrpc_table[] = {
160 {
161 .procname = "sunrpc",
162 .mode = 0555,
163 .child = xs_tunables_table
164 },
165 { },
166 };
167
168 /*
169 * Wait duration for a reply from the RPC portmapper.
170 */
171 #define XS_BIND_TO (60U * HZ)
172
173 /*
174 * Delay if a UDP socket connect error occurs. This is most likely some
175 * kind of resource problem on the local host.
176 */
177 #define XS_UDP_REEST_TO (2U * HZ)
178
179 /*
180 * The reestablish timeout allows clients to delay for a bit before attempting
181 * to reconnect to a server that just dropped our connection.
182 *
183 * We implement an exponential backoff when trying to reestablish a TCP
184 * transport connection with the server. Some servers like to drop a TCP
185 * connection when they are overworked, so we start with a short timeout and
186 * increase over time if the server is down or not responding.
187 */
188 #define XS_TCP_INIT_REEST_TO (3U * HZ)
189
190 /*
191 * TCP idle timeout; client drops the transport socket if it is idle
192 * for this long. Note that we also timeout UDP sockets to prevent
193 * holding port numbers when there is no RPC traffic.
194 */
195 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
196
197 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
198 # undef RPC_DEBUG_DATA
199 # define RPCDBG_FACILITY RPCDBG_TRANS
200 #endif
201
202 #ifdef RPC_DEBUG_DATA
203 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
204 {
205 u8 *buf = (u8 *) packet;
206 int j;
207
208 dprintk("RPC: %s\n", msg);
209 for (j = 0; j < count && j < 128; j += 4) {
210 if (!(j & 31)) {
211 if (j)
212 dprintk("\n");
213 dprintk("0x%04x ", j);
214 }
215 dprintk("%02x%02x%02x%02x ",
216 buf[j], buf[j+1], buf[j+2], buf[j+3]);
217 }
218 dprintk("\n");
219 }
220 #else
221 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
222 {
223 /* NOP */
224 }
225 #endif
226
227 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
228 {
229 return (struct rpc_xprt *) sk->sk_user_data;
230 }
231
232 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
233 {
234 return (struct sockaddr *) &xprt->addr;
235 }
236
237 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
238 {
239 return (struct sockaddr_un *) &xprt->addr;
240 }
241
242 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
243 {
244 return (struct sockaddr_in *) &xprt->addr;
245 }
246
247 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
248 {
249 return (struct sockaddr_in6 *) &xprt->addr;
250 }
251
252 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
253 {
254 struct sockaddr *sap = xs_addr(xprt);
255 struct sockaddr_in6 *sin6;
256 struct sockaddr_in *sin;
257 struct sockaddr_un *sun;
258 char buf[128];
259
260 switch (sap->sa_family) {
261 case AF_LOCAL:
262 sun = xs_addr_un(xprt);
263 strlcpy(buf, sun->sun_path, sizeof(buf));
264 xprt->address_strings[RPC_DISPLAY_ADDR] =
265 kstrdup(buf, GFP_KERNEL);
266 break;
267 case AF_INET:
268 (void)rpc_ntop(sap, buf, sizeof(buf));
269 xprt->address_strings[RPC_DISPLAY_ADDR] =
270 kstrdup(buf, GFP_KERNEL);
271 sin = xs_addr_in(xprt);
272 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
273 break;
274 case AF_INET6:
275 (void)rpc_ntop(sap, buf, sizeof(buf));
276 xprt->address_strings[RPC_DISPLAY_ADDR] =
277 kstrdup(buf, GFP_KERNEL);
278 sin6 = xs_addr_in6(xprt);
279 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
280 break;
281 default:
282 BUG();
283 }
284
285 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
286 }
287
288 static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
289 {
290 struct sockaddr *sap = xs_addr(xprt);
291 char buf[128];
292
293 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
294 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
295
296 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
297 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
298 }
299
300 static void xs_format_peer_addresses(struct rpc_xprt *xprt,
301 const char *protocol,
302 const char *netid)
303 {
304 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
305 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
306 xs_format_common_peer_addresses(xprt);
307 xs_format_common_peer_ports(xprt);
308 }
309
310 static void xs_update_peer_port(struct rpc_xprt *xprt)
311 {
312 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
313 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
314
315 xs_format_common_peer_ports(xprt);
316 }
317
318 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
319 {
320 unsigned int i;
321
322 for (i = 0; i < RPC_DISPLAY_MAX; i++)
323 switch (i) {
324 case RPC_DISPLAY_PROTO:
325 case RPC_DISPLAY_NETID:
326 continue;
327 default:
328 kfree(xprt->address_strings[i]);
329 }
330 }
331
332 static size_t
333 xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
334 {
335 size_t i,n;
336
337 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
338 return want;
339 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
340 for (i = 0; i < n; i++) {
341 if (buf->pages[i])
342 continue;
343 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
344 if (!buf->pages[i]) {
345 i *= PAGE_SIZE;
346 return i > buf->page_base ? i - buf->page_base : 0;
347 }
348 }
349 return want;
350 }
351
352 static ssize_t
353 xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
354 {
355 ssize_t ret;
356 if (seek != 0)
357 iov_iter_advance(&msg->msg_iter, seek);
358 ret = sock_recvmsg(sock, msg, flags);
359 return ret > 0 ? ret + seek : ret;
360 }
361
362 static ssize_t
363 xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
364 struct kvec *kvec, size_t count, size_t seek)
365 {
366 iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
367 return xs_sock_recvmsg(sock, msg, flags, seek);
368 }
369
370 static ssize_t
371 xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
372 struct bio_vec *bvec, unsigned long nr, size_t count,
373 size_t seek)
374 {
375 iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
376 return xs_sock_recvmsg(sock, msg, flags, seek);
377 }
378
379 static ssize_t
380 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
381 size_t count)
382 {
383 iov_iter_discard(&msg->msg_iter, READ, count);
384 return sock_recvmsg(sock, msg, flags);
385 }
386
387 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
388 static void
389 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
390 {
391 struct bvec_iter bi = {
392 .bi_size = count,
393 };
394 struct bio_vec bv;
395
396 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
397 for_each_bvec(bv, bvec, bi, bi)
398 flush_dcache_page(bv.bv_page);
399 }
400 #else
401 static inline void
402 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
403 {
404 }
405 #endif
406
407 static ssize_t
408 xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
409 struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
410 {
411 size_t want, seek_init = seek, offset = 0;
412 ssize_t ret;
413
414 want = min_t(size_t, count, buf->head[0].iov_len);
415 if (seek < want) {
416 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
417 if (ret <= 0)
418 goto sock_err;
419 offset += ret;
420 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
421 goto out;
422 if (ret != want)
423 goto out;
424 seek = 0;
425 } else {
426 seek -= want;
427 offset += want;
428 }
429
430 want = xs_alloc_sparse_pages(buf,
431 min_t(size_t, count - offset, buf->page_len),
432 GFP_KERNEL);
433 if (seek < want) {
434 ret = xs_read_bvec(sock, msg, flags, buf->bvec,
435 xdr_buf_pagecount(buf),
436 want + buf->page_base,
437 seek + buf->page_base);
438 if (ret <= 0)
439 goto sock_err;
440 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
441 ret -= buf->page_base;
442 offset += ret;
443 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
444 goto out;
445 if (ret != want)
446 goto out;
447 seek = 0;
448 } else {
449 seek -= want;
450 offset += want;
451 }
452
453 want = min_t(size_t, count - offset, buf->tail[0].iov_len);
454 if (seek < want) {
455 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
456 if (ret <= 0)
457 goto sock_err;
458 offset += ret;
459 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
460 goto out;
461 if (ret != want)
462 goto out;
463 } else if (offset < seek_init)
464 offset = seek_init;
465 ret = -EMSGSIZE;
466 out:
467 *read = offset - seek_init;
468 return ret;
469 sock_err:
470 offset += seek;
471 goto out;
472 }
473
474 static void
475 xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
476 {
477 if (!transport->recv.copied) {
478 if (buf->head[0].iov_len >= transport->recv.offset)
479 memcpy(buf->head[0].iov_base,
480 &transport->recv.xid,
481 transport->recv.offset);
482 transport->recv.copied = transport->recv.offset;
483 }
484 }
485
486 static bool
487 xs_read_stream_request_done(struct sock_xprt *transport)
488 {
489 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
490 }
491
492 static void
493 xs_read_stream_check_eor(struct sock_xprt *transport,
494 struct msghdr *msg)
495 {
496 if (xs_read_stream_request_done(transport))
497 msg->msg_flags |= MSG_EOR;
498 }
499
500 static ssize_t
501 xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
502 int flags, struct rpc_rqst *req)
503 {
504 struct xdr_buf *buf = &req->rq_private_buf;
505 size_t want, read;
506 ssize_t ret;
507
508 xs_read_header(transport, buf);
509
510 want = transport->recv.len - transport->recv.offset;
511 if (want != 0) {
512 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
513 transport->recv.copied + want,
514 transport->recv.copied,
515 &read);
516 transport->recv.offset += read;
517 transport->recv.copied += read;
518 }
519
520 if (transport->recv.offset == transport->recv.len)
521 xs_read_stream_check_eor(transport, msg);
522
523 if (want == 0)
524 return 0;
525
526 switch (ret) {
527 default:
528 break;
529 case -EFAULT:
530 case -EMSGSIZE:
531 msg->msg_flags |= MSG_TRUNC;
532 return read;
533 case 0:
534 return -ESHUTDOWN;
535 }
536 return ret < 0 ? ret : read;
537 }
538
539 static size_t
540 xs_read_stream_headersize(bool isfrag)
541 {
542 if (isfrag)
543 return sizeof(__be32);
544 return 3 * sizeof(__be32);
545 }
546
547 static ssize_t
548 xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
549 int flags, size_t want, size_t seek)
550 {
551 struct kvec kvec = {
552 .iov_base = &transport->recv.fraghdr,
553 .iov_len = want,
554 };
555 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
556 }
557
558 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
559 static ssize_t
560 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
561 {
562 struct rpc_xprt *xprt = &transport->xprt;
563 struct rpc_rqst *req;
564 ssize_t ret;
565
566 /* Is this transport associated with the backchannel? */
567 if (!xprt->bc_serv)
568 return -ESHUTDOWN;
569
570 /* Look up and lock the request corresponding to the given XID */
571 req = xprt_lookup_bc_request(xprt, transport->recv.xid);
572 if (!req) {
573 printk(KERN_WARNING "Callback slot table overflowed\n");
574 return -ESHUTDOWN;
575 }
576 if (transport->recv.copied && !req->rq_private_buf.len)
577 return -ESHUTDOWN;
578
579 ret = xs_read_stream_request(transport, msg, flags, req);
580 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
581 xprt_complete_bc_request(req, transport->recv.copied);
582 else
583 req->rq_private_buf.len = transport->recv.copied;
584
585 return ret;
586 }
587 #else /* CONFIG_SUNRPC_BACKCHANNEL */
588 static ssize_t
589 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
590 {
591 return -ESHUTDOWN;
592 }
593 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
594
595 static ssize_t
596 xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
597 {
598 struct rpc_xprt *xprt = &transport->xprt;
599 struct rpc_rqst *req;
600 ssize_t ret = 0;
601
602 /* Look up and lock the request corresponding to the given XID */
603 spin_lock(&xprt->queue_lock);
604 req = xprt_lookup_rqst(xprt, transport->recv.xid);
605 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
606 msg->msg_flags |= MSG_TRUNC;
607 goto out;
608 }
609 xprt_pin_rqst(req);
610 spin_unlock(&xprt->queue_lock);
611
612 ret = xs_read_stream_request(transport, msg, flags, req);
613
614 spin_lock(&xprt->queue_lock);
615 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
616 xprt_complete_rqst(req->rq_task, transport->recv.copied);
617 else
618 req->rq_private_buf.len = transport->recv.copied;
619 xprt_unpin_rqst(req);
620 out:
621 spin_unlock(&xprt->queue_lock);
622 return ret;
623 }
624
625 static ssize_t
626 xs_read_stream(struct sock_xprt *transport, int flags)
627 {
628 struct msghdr msg = { 0 };
629 size_t want, read = 0;
630 ssize_t ret = 0;
631
632 if (transport->recv.len == 0) {
633 want = xs_read_stream_headersize(transport->recv.copied != 0);
634 ret = xs_read_stream_header(transport, &msg, flags, want,
635 transport->recv.offset);
636 if (ret <= 0)
637 goto out_err;
638 transport->recv.offset = ret;
639 if (transport->recv.offset != want)
640 return transport->recv.offset;
641 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
642 RPC_FRAGMENT_SIZE_MASK;
643 transport->recv.offset -= sizeof(transport->recv.fraghdr);
644 read = ret;
645 }
646
647 switch (be32_to_cpu(transport->recv.calldir)) {
648 default:
649 msg.msg_flags |= MSG_TRUNC;
650 break;
651 case RPC_CALL:
652 ret = xs_read_stream_call(transport, &msg, flags);
653 break;
654 case RPC_REPLY:
655 ret = xs_read_stream_reply(transport, &msg, flags);
656 }
657 if (msg.msg_flags & MSG_TRUNC) {
658 transport->recv.calldir = cpu_to_be32(-1);
659 transport->recv.copied = -1;
660 }
661 if (ret < 0)
662 goto out_err;
663 read += ret;
664 if (transport->recv.offset < transport->recv.len) {
665 if (!(msg.msg_flags & MSG_TRUNC))
666 return read;
667 msg.msg_flags = 0;
668 ret = xs_read_discard(transport->sock, &msg, flags,
669 transport->recv.len - transport->recv.offset);
670 if (ret <= 0)
671 goto out_err;
672 transport->recv.offset += ret;
673 read += ret;
674 if (transport->recv.offset != transport->recv.len)
675 return read;
676 }
677 if (xs_read_stream_request_done(transport)) {
678 trace_xs_stream_read_request(transport);
679 transport->recv.copied = 0;
680 }
681 transport->recv.offset = 0;
682 transport->recv.len = 0;
683 return read;
684 out_err:
685 return ret != 0 ? ret : -ESHUTDOWN;
686 }
687
688 static __poll_t xs_poll_socket(struct sock_xprt *transport)
689 {
690 return transport->sock->ops->poll(transport->file, transport->sock,
691 NULL);
692 }
693
694 static bool xs_poll_socket_readable(struct sock_xprt *transport)
695 {
696 __poll_t events = xs_poll_socket(transport);
697
698 return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
699 }
700
701 static void xs_poll_check_readable(struct sock_xprt *transport)
702 {
703
704 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
705 if (!xs_poll_socket_readable(transport))
706 return;
707 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
708 queue_work(xprtiod_workqueue, &transport->recv_worker);
709 }
710
711 static void xs_stream_data_receive(struct sock_xprt *transport)
712 {
713 size_t read = 0;
714 ssize_t ret = 0;
715
716 mutex_lock(&transport->recv_mutex);
717 if (transport->sock == NULL)
718 goto out;
719 for (;;) {
720 ret = xs_read_stream(transport, MSG_DONTWAIT);
721 if (ret < 0)
722 break;
723 read += ret;
724 cond_resched();
725 }
726 if (ret == -ESHUTDOWN)
727 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
728 else
729 xs_poll_check_readable(transport);
730 out:
731 mutex_unlock(&transport->recv_mutex);
732 trace_xs_stream_read_data(&transport->xprt, ret, read);
733 }
734
735 static void xs_stream_data_receive_workfn(struct work_struct *work)
736 {
737 struct sock_xprt *transport =
738 container_of(work, struct sock_xprt, recv_worker);
739 unsigned int pflags = memalloc_nofs_save();
740
741 xs_stream_data_receive(transport);
742 memalloc_nofs_restore(pflags);
743 }
744
745 static void
746 xs_stream_reset_connect(struct sock_xprt *transport)
747 {
748 transport->recv.offset = 0;
749 transport->recv.len = 0;
750 transport->recv.copied = 0;
751 transport->xmit.offset = 0;
752 }
753
754 static void
755 xs_stream_start_connect(struct sock_xprt *transport)
756 {
757 transport->xprt.stat.connect_count++;
758 transport->xprt.stat.connect_start = jiffies;
759 }
760
761 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
762
763 /**
764 * xs_nospace - handle transmit was incomplete
765 * @req: pointer to RPC request
766 *
767 */
768 static int xs_nospace(struct rpc_rqst *req)
769 {
770 struct rpc_xprt *xprt = req->rq_xprt;
771 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
772 struct sock *sk = transport->inet;
773 int ret = -EAGAIN;
774
775 trace_rpc_socket_nospace(req, transport);
776
777 /* Protect against races with write_space */
778 spin_lock(&xprt->transport_lock);
779
780 /* Don't race with disconnect */
781 if (xprt_connected(xprt)) {
782 /* wait for more buffer space */
783 sk->sk_write_pending++;
784 xprt_wait_for_buffer_space(xprt);
785 } else
786 ret = -ENOTCONN;
787
788 spin_unlock(&xprt->transport_lock);
789
790 /* Race breaker in case memory is freed before above code is called */
791 if (ret == -EAGAIN) {
792 struct socket_wq *wq;
793
794 rcu_read_lock();
795 wq = rcu_dereference(sk->sk_wq);
796 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
797 rcu_read_unlock();
798
799 sk->sk_write_space(sk);
800 }
801 return ret;
802 }
803
804 static void
805 xs_stream_prepare_request(struct rpc_rqst *req)
806 {
807 xdr_free_bvec(&req->rq_rcv_buf);
808 req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
809 }
810
811 /*
812 * Determine if the previous message in the stream was aborted before it
813 * could complete transmission.
814 */
815 static bool
816 xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
817 {
818 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
819 }
820
821 /*
822 * Return the stream record marker field for a record of length < 2^31-1
823 */
824 static rpc_fraghdr
825 xs_stream_record_marker(struct xdr_buf *xdr)
826 {
827 if (!xdr->len)
828 return 0;
829 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
830 }
831
832 /**
833 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
834 * @req: pointer to RPC request
835 *
836 * Return values:
837 * 0: The request has been sent
838 * EAGAIN: The socket was blocked, please call again later to
839 * complete the request
840 * ENOTCONN: Caller needs to invoke connect logic then call again
841 * other: Some other error occurred, the request was not sent
842 */
843 static int xs_local_send_request(struct rpc_rqst *req)
844 {
845 struct rpc_xprt *xprt = req->rq_xprt;
846 struct sock_xprt *transport =
847 container_of(xprt, struct sock_xprt, xprt);
848 struct xdr_buf *xdr = &req->rq_snd_buf;
849 rpc_fraghdr rm = xs_stream_record_marker(xdr);
850 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
851 struct msghdr msg = {
852 .msg_flags = XS_SENDMSG_FLAGS,
853 };
854 unsigned int sent;
855 int status;
856
857 /* Close the stream if the previous transmission was incomplete */
858 if (xs_send_request_was_aborted(transport, req)) {
859 xs_close(xprt);
860 return -ENOTCONN;
861 }
862
863 xs_pktdump("packet data:",
864 req->rq_svec->iov_base, req->rq_svec->iov_len);
865
866 req->rq_xtime = ktime_get();
867 status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
868 transport->xmit.offset, rm, &sent);
869 dprintk("RPC: %s(%u) = %d\n",
870 __func__, xdr->len - transport->xmit.offset, status);
871
872 if (status == -EAGAIN && sock_writeable(transport->inet))
873 status = -ENOBUFS;
874
875 if (likely(sent > 0) || status == 0) {
876 transport->xmit.offset += sent;
877 req->rq_bytes_sent = transport->xmit.offset;
878 if (likely(req->rq_bytes_sent >= msglen)) {
879 req->rq_xmit_bytes_sent += transport->xmit.offset;
880 transport->xmit.offset = 0;
881 return 0;
882 }
883 status = -EAGAIN;
884 }
885
886 switch (status) {
887 case -ENOBUFS:
888 break;
889 case -EAGAIN:
890 status = xs_nospace(req);
891 break;
892 default:
893 dprintk("RPC: sendmsg returned unrecognized error %d\n",
894 -status);
895 fallthrough;
896 case -EPIPE:
897 xs_close(xprt);
898 status = -ENOTCONN;
899 }
900
901 return status;
902 }
903
904 /**
905 * xs_udp_send_request - write an RPC request to a UDP socket
906 * @req: pointer to RPC request
907 *
908 * Return values:
909 * 0: The request has been sent
910 * EAGAIN: The socket was blocked, please call again later to
911 * complete the request
912 * ENOTCONN: Caller needs to invoke connect logic then call again
913 * other: Some other error occurred, the request was not sent
914 */
915 static int xs_udp_send_request(struct rpc_rqst *req)
916 {
917 struct rpc_xprt *xprt = req->rq_xprt;
918 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
919 struct xdr_buf *xdr = &req->rq_snd_buf;
920 struct msghdr msg = {
921 .msg_name = xs_addr(xprt),
922 .msg_namelen = xprt->addrlen,
923 .msg_flags = XS_SENDMSG_FLAGS,
924 };
925 unsigned int sent;
926 int status;
927
928 xs_pktdump("packet data:",
929 req->rq_svec->iov_base,
930 req->rq_svec->iov_len);
931
932 if (!xprt_bound(xprt))
933 return -ENOTCONN;
934
935 if (!xprt_request_get_cong(xprt, req))
936 return -EBADSLT;
937
938 req->rq_xtime = ktime_get();
939 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
940
941 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
942 xdr->len, status);
943
944 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
945 if (status == -EPERM)
946 goto process_status;
947
948 if (status == -EAGAIN && sock_writeable(transport->inet))
949 status = -ENOBUFS;
950
951 if (sent > 0 || status == 0) {
952 req->rq_xmit_bytes_sent += sent;
953 if (sent >= req->rq_slen)
954 return 0;
955 /* Still some bytes left; set up for a retry later. */
956 status = -EAGAIN;
957 }
958
959 process_status:
960 switch (status) {
961 case -ENOTSOCK:
962 status = -ENOTCONN;
963 /* Should we call xs_close() here? */
964 break;
965 case -EAGAIN:
966 status = xs_nospace(req);
967 break;
968 case -ENETUNREACH:
969 case -ENOBUFS:
970 case -EPIPE:
971 case -ECONNREFUSED:
972 case -EPERM:
973 /* When the server has died, an ICMP port unreachable message
974 * prompts ECONNREFUSED. */
975 break;
976 default:
977 dprintk("RPC: sendmsg returned unrecognized error %d\n",
978 -status);
979 }
980
981 return status;
982 }
983
984 /**
985 * xs_tcp_send_request - write an RPC request to a TCP socket
986 * @req: pointer to RPC request
987 *
988 * Return values:
989 * 0: The request has been sent
990 * EAGAIN: The socket was blocked, please call again later to
991 * complete the request
992 * ENOTCONN: Caller needs to invoke connect logic then call again
993 * other: Some other error occurred, the request was not sent
994 *
995 * XXX: In the case of soft timeouts, should we eventually give up
996 * if sendmsg is not able to make progress?
997 */
998 static int xs_tcp_send_request(struct rpc_rqst *req)
999 {
1000 struct rpc_xprt *xprt = req->rq_xprt;
1001 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1002 struct xdr_buf *xdr = &req->rq_snd_buf;
1003 rpc_fraghdr rm = xs_stream_record_marker(xdr);
1004 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
1005 struct msghdr msg = {
1006 .msg_flags = XS_SENDMSG_FLAGS,
1007 };
1008 bool vm_wait = false;
1009 unsigned int sent;
1010 int status;
1011
1012 /* Close the stream if the previous transmission was incomplete */
1013 if (xs_send_request_was_aborted(transport, req)) {
1014 if (transport->sock != NULL)
1015 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1016 return -ENOTCONN;
1017 }
1018 if (!transport->inet)
1019 return -ENOTCONN;
1020
1021 xs_pktdump("packet data:",
1022 req->rq_svec->iov_base,
1023 req->rq_svec->iov_len);
1024
1025 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1026 xs_tcp_set_socket_timeouts(xprt, transport->sock);
1027
1028 /* Continue transmitting the packet/record. We must be careful
1029 * to cope with writespace callbacks arriving _after_ we have
1030 * called sendmsg(). */
1031 req->rq_xtime = ktime_get();
1032 tcp_sock_set_cork(transport->inet, true);
1033 while (1) {
1034 status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
1035 transport->xmit.offset, rm, &sent);
1036
1037 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
1038 xdr->len - transport->xmit.offset, status);
1039
1040 /* If we've sent the entire packet, immediately
1041 * reset the count of bytes sent. */
1042 transport->xmit.offset += sent;
1043 req->rq_bytes_sent = transport->xmit.offset;
1044 if (likely(req->rq_bytes_sent >= msglen)) {
1045 req->rq_xmit_bytes_sent += transport->xmit.offset;
1046 transport->xmit.offset = 0;
1047 if (atomic_long_read(&xprt->xmit_queuelen) == 1)
1048 tcp_sock_set_cork(transport->inet, false);
1049 return 0;
1050 }
1051
1052 WARN_ON_ONCE(sent == 0 && status == 0);
1053
1054 if (status == -EAGAIN ) {
1055 /*
1056 * Return EAGAIN if we're sure we're hitting the
1057 * socket send buffer limits.
1058 */
1059 if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
1060 break;
1061 /*
1062 * Did we hit a memory allocation failure?
1063 */
1064 if (sent == 0) {
1065 status = -ENOBUFS;
1066 if (vm_wait)
1067 break;
1068 /* Retry, knowing now that we're below the
1069 * socket send buffer limit
1070 */
1071 vm_wait = true;
1072 }
1073 continue;
1074 }
1075 if (status < 0)
1076 break;
1077 vm_wait = false;
1078 }
1079
1080 switch (status) {
1081 case -ENOTSOCK:
1082 status = -ENOTCONN;
1083 /* Should we call xs_close() here? */
1084 break;
1085 case -EAGAIN:
1086 status = xs_nospace(req);
1087 break;
1088 case -ECONNRESET:
1089 case -ECONNREFUSED:
1090 case -ENOTCONN:
1091 case -EADDRINUSE:
1092 case -ENOBUFS:
1093 case -EPIPE:
1094 break;
1095 default:
1096 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1097 -status);
1098 }
1099
1100 return status;
1101 }
1102
1103 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1104 {
1105 transport->old_data_ready = sk->sk_data_ready;
1106 transport->old_state_change = sk->sk_state_change;
1107 transport->old_write_space = sk->sk_write_space;
1108 transport->old_error_report = sk->sk_error_report;
1109 }
1110
1111 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1112 {
1113 sk->sk_data_ready = transport->old_data_ready;
1114 sk->sk_state_change = transport->old_state_change;
1115 sk->sk_write_space = transport->old_write_space;
1116 sk->sk_error_report = transport->old_error_report;
1117 }
1118
1119 static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1120 {
1121 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1122
1123 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1124 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
1125 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
1126 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
1127 }
1128
1129 static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
1130 {
1131 set_bit(nr, &transport->sock_state);
1132 queue_work(xprtiod_workqueue, &transport->error_worker);
1133 }
1134
1135 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1136 {
1137 smp_mb__before_atomic();
1138 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1139 clear_bit(XPRT_CLOSING, &xprt->state);
1140 xs_sock_reset_state_flags(xprt);
1141 smp_mb__after_atomic();
1142 }
1143
1144 /**
1145 * xs_error_report - callback to handle TCP socket state errors
1146 * @sk: socket
1147 *
1148 * Note: we don't call sock_error() since there may be a rpc_task
1149 * using the socket, and so we don't want to clear sk->sk_err.
1150 */
1151 static void xs_error_report(struct sock *sk)
1152 {
1153 struct sock_xprt *transport;
1154 struct rpc_xprt *xprt;
1155
1156 read_lock_bh(&sk->sk_callback_lock);
1157 if (!(xprt = xprt_from_sock(sk)))
1158 goto out;
1159
1160 transport = container_of(xprt, struct sock_xprt, xprt);
1161 transport->xprt_err = -sk->sk_err;
1162 if (transport->xprt_err == 0)
1163 goto out;
1164 dprintk("RPC: xs_error_report client %p, error=%d...\n",
1165 xprt, -transport->xprt_err);
1166 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
1167
1168 /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
1169 smp_mb__before_atomic();
1170 xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
1171 out:
1172 read_unlock_bh(&sk->sk_callback_lock);
1173 }
1174
1175 static void xs_reset_transport(struct sock_xprt *transport)
1176 {
1177 struct socket *sock = transport->sock;
1178 struct sock *sk = transport->inet;
1179 struct rpc_xprt *xprt = &transport->xprt;
1180 struct file *filp = transport->file;
1181
1182 if (sk == NULL)
1183 return;
1184
1185 if (atomic_read(&transport->xprt.swapper))
1186 sk_clear_memalloc(sk);
1187
1188 kernel_sock_shutdown(sock, SHUT_RDWR);
1189
1190 mutex_lock(&transport->recv_mutex);
1191 write_lock_bh(&sk->sk_callback_lock);
1192 transport->inet = NULL;
1193 transport->sock = NULL;
1194 transport->file = NULL;
1195
1196 sk->sk_user_data = NULL;
1197
1198 xs_restore_old_callbacks(transport, sk);
1199 xprt_clear_connected(xprt);
1200 write_unlock_bh(&sk->sk_callback_lock);
1201 xs_sock_reset_connection_flags(xprt);
1202 /* Reset stream record info */
1203 xs_stream_reset_connect(transport);
1204 mutex_unlock(&transport->recv_mutex);
1205
1206 trace_rpc_socket_close(xprt, sock);
1207 fput(filp);
1208
1209 xprt_disconnect_done(xprt);
1210 }
1211
1212 /**
1213 * xs_close - close a socket
1214 * @xprt: transport
1215 *
1216 * This is used when all requests are complete; ie, no DRC state remains
1217 * on the server we want to save.
1218 *
1219 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1220 * xs_reset_transport() zeroing the socket from underneath a writer.
1221 */
1222 static void xs_close(struct rpc_xprt *xprt)
1223 {
1224 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1225
1226 dprintk("RPC: xs_close xprt %p\n", xprt);
1227
1228 xs_reset_transport(transport);
1229 xprt->reestablish_timeout = 0;
1230 }
1231
1232 static void xs_inject_disconnect(struct rpc_xprt *xprt)
1233 {
1234 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
1235 xprt);
1236 xprt_disconnect_done(xprt);
1237 }
1238
1239 static void xs_xprt_free(struct rpc_xprt *xprt)
1240 {
1241 xs_free_peer_addresses(xprt);
1242 xprt_free(xprt);
1243 }
1244
1245 /**
1246 * xs_destroy - prepare to shutdown a transport
1247 * @xprt: doomed transport
1248 *
1249 */
1250 static void xs_destroy(struct rpc_xprt *xprt)
1251 {
1252 struct sock_xprt *transport = container_of(xprt,
1253 struct sock_xprt, xprt);
1254 dprintk("RPC: xs_destroy xprt %p\n", xprt);
1255
1256 cancel_delayed_work_sync(&transport->connect_worker);
1257 xs_close(xprt);
1258 cancel_work_sync(&transport->recv_worker);
1259 cancel_work_sync(&transport->error_worker);
1260 xs_xprt_free(xprt);
1261 module_put(THIS_MODULE);
1262 }
1263
1264 /**
1265 * xs_udp_data_read_skb - receive callback for UDP sockets
1266 * @xprt: transport
1267 * @sk: socket
1268 * @skb: skbuff
1269 *
1270 */
1271 static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1272 struct sock *sk,
1273 struct sk_buff *skb)
1274 {
1275 struct rpc_task *task;
1276 struct rpc_rqst *rovr;
1277 int repsize, copied;
1278 u32 _xid;
1279 __be32 *xp;
1280
1281 repsize = skb->len;
1282 if (repsize < 4) {
1283 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1284 return;
1285 }
1286
1287 /* Copy the XID from the skb... */
1288 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1289 if (xp == NULL)
1290 return;
1291
1292 /* Look up and lock the request corresponding to the given XID */
1293 spin_lock(&xprt->queue_lock);
1294 rovr = xprt_lookup_rqst(xprt, *xp);
1295 if (!rovr)
1296 goto out_unlock;
1297 xprt_pin_rqst(rovr);
1298 xprt_update_rtt(rovr->rq_task);
1299 spin_unlock(&xprt->queue_lock);
1300 task = rovr->rq_task;
1301
1302 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1303 copied = repsize;
1304
1305 /* Suck it into the iovec, verify checksum if not done by hw. */
1306 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1307 spin_lock(&xprt->queue_lock);
1308 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1309 goto out_unpin;
1310 }
1311
1312
1313 spin_lock(&xprt->transport_lock);
1314 xprt_adjust_cwnd(xprt, task, copied);
1315 spin_unlock(&xprt->transport_lock);
1316 spin_lock(&xprt->queue_lock);
1317 xprt_complete_rqst(task, copied);
1318 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1319 out_unpin:
1320 xprt_unpin_rqst(rovr);
1321 out_unlock:
1322 spin_unlock(&xprt->queue_lock);
1323 }
1324
1325 static void xs_udp_data_receive(struct sock_xprt *transport)
1326 {
1327 struct sk_buff *skb;
1328 struct sock *sk;
1329 int err;
1330
1331 mutex_lock(&transport->recv_mutex);
1332 sk = transport->inet;
1333 if (sk == NULL)
1334 goto out;
1335 for (;;) {
1336 skb = skb_recv_udp(sk, 0, 1, &err);
1337 if (skb == NULL)
1338 break;
1339 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1340 consume_skb(skb);
1341 cond_resched();
1342 }
1343 xs_poll_check_readable(transport);
1344 out:
1345 mutex_unlock(&transport->recv_mutex);
1346 }
1347
1348 static void xs_udp_data_receive_workfn(struct work_struct *work)
1349 {
1350 struct sock_xprt *transport =
1351 container_of(work, struct sock_xprt, recv_worker);
1352 unsigned int pflags = memalloc_nofs_save();
1353
1354 xs_udp_data_receive(transport);
1355 memalloc_nofs_restore(pflags);
1356 }
1357
1358 /**
1359 * xs_data_ready - "data ready" callback for UDP sockets
1360 * @sk: socket with data to read
1361 *
1362 */
1363 static void xs_data_ready(struct sock *sk)
1364 {
1365 struct rpc_xprt *xprt;
1366
1367 read_lock_bh(&sk->sk_callback_lock);
1368 dprintk("RPC: xs_data_ready...\n");
1369 xprt = xprt_from_sock(sk);
1370 if (xprt != NULL) {
1371 struct sock_xprt *transport = container_of(xprt,
1372 struct sock_xprt, xprt);
1373 transport->old_data_ready(sk);
1374 /* Any data means we had a useful conversation, so
1375 * then we don't need to delay the next reconnect
1376 */
1377 if (xprt->reestablish_timeout)
1378 xprt->reestablish_timeout = 0;
1379 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1380 queue_work(xprtiod_workqueue, &transport->recv_worker);
1381 }
1382 read_unlock_bh(&sk->sk_callback_lock);
1383 }
1384
1385 /*
1386 * Helper function to force a TCP close if the server is sending
1387 * junk and/or it has put us in CLOSE_WAIT
1388 */
1389 static void xs_tcp_force_close(struct rpc_xprt *xprt)
1390 {
1391 xprt_force_disconnect(xprt);
1392 }
1393
1394 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1395 static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1396 {
1397 return PAGE_SIZE;
1398 }
1399 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1400
1401 /**
1402 * xs_tcp_state_change - callback to handle TCP socket state changes
1403 * @sk: socket whose state has changed
1404 *
1405 */
1406 static void xs_tcp_state_change(struct sock *sk)
1407 {
1408 struct rpc_xprt *xprt;
1409 struct sock_xprt *transport;
1410
1411 read_lock_bh(&sk->sk_callback_lock);
1412 if (!(xprt = xprt_from_sock(sk)))
1413 goto out;
1414 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1415 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1416 sk->sk_state, xprt_connected(xprt),
1417 sock_flag(sk, SOCK_DEAD),
1418 sock_flag(sk, SOCK_ZAPPED),
1419 sk->sk_shutdown);
1420
1421 transport = container_of(xprt, struct sock_xprt, xprt);
1422 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1423 switch (sk->sk_state) {
1424 case TCP_ESTABLISHED:
1425 if (!xprt_test_and_set_connected(xprt)) {
1426 xprt->connect_cookie++;
1427 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1428 xprt_clear_connecting(xprt);
1429
1430 xprt->stat.connect_count++;
1431 xprt->stat.connect_time += (long)jiffies -
1432 xprt->stat.connect_start;
1433 xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
1434 }
1435 break;
1436 case TCP_FIN_WAIT1:
1437 /* The client initiated a shutdown of the socket */
1438 xprt->connect_cookie++;
1439 xprt->reestablish_timeout = 0;
1440 set_bit(XPRT_CLOSING, &xprt->state);
1441 smp_mb__before_atomic();
1442 clear_bit(XPRT_CONNECTED, &xprt->state);
1443 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1444 smp_mb__after_atomic();
1445 break;
1446 case TCP_CLOSE_WAIT:
1447 /* The server initiated a shutdown of the socket */
1448 xprt->connect_cookie++;
1449 clear_bit(XPRT_CONNECTED, &xprt->state);
1450 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1451 fallthrough;
1452 case TCP_CLOSING:
1453 /*
1454 * If the server closed down the connection, make sure that
1455 * we back off before reconnecting
1456 */
1457 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1458 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1459 break;
1460 case TCP_LAST_ACK:
1461 set_bit(XPRT_CLOSING, &xprt->state);
1462 smp_mb__before_atomic();
1463 clear_bit(XPRT_CONNECTED, &xprt->state);
1464 smp_mb__after_atomic();
1465 break;
1466 case TCP_CLOSE:
1467 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1468 &transport->sock_state))
1469 xprt_clear_connecting(xprt);
1470 clear_bit(XPRT_CLOSING, &xprt->state);
1471 /* Trigger the socket release */
1472 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1473 }
1474 out:
1475 read_unlock_bh(&sk->sk_callback_lock);
1476 }
1477
1478 static void xs_write_space(struct sock *sk)
1479 {
1480 struct socket_wq *wq;
1481 struct sock_xprt *transport;
1482 struct rpc_xprt *xprt;
1483
1484 if (!sk->sk_socket)
1485 return;
1486 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1487
1488 if (unlikely(!(xprt = xprt_from_sock(sk))))
1489 return;
1490 transport = container_of(xprt, struct sock_xprt, xprt);
1491 rcu_read_lock();
1492 wq = rcu_dereference(sk->sk_wq);
1493 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1494 goto out;
1495
1496 xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
1497 sk->sk_write_pending--;
1498 out:
1499 rcu_read_unlock();
1500 }
1501
1502 /**
1503 * xs_udp_write_space - callback invoked when socket buffer space
1504 * becomes available
1505 * @sk: socket whose state has changed
1506 *
1507 * Called when more output buffer space is available for this socket.
1508 * We try not to wake our writers until they can make "significant"
1509 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1510 * with a bunch of small requests.
1511 */
1512 static void xs_udp_write_space(struct sock *sk)
1513 {
1514 read_lock_bh(&sk->sk_callback_lock);
1515
1516 /* from net/core/sock.c:sock_def_write_space */
1517 if (sock_writeable(sk))
1518 xs_write_space(sk);
1519
1520 read_unlock_bh(&sk->sk_callback_lock);
1521 }
1522
1523 /**
1524 * xs_tcp_write_space - callback invoked when socket buffer space
1525 * becomes available
1526 * @sk: socket whose state has changed
1527 *
1528 * Called when more output buffer space is available for this socket.
1529 * We try not to wake our writers until they can make "significant"
1530 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1531 * with a bunch of small requests.
1532 */
1533 static void xs_tcp_write_space(struct sock *sk)
1534 {
1535 read_lock_bh(&sk->sk_callback_lock);
1536
1537 /* from net/core/stream.c:sk_stream_write_space */
1538 if (sk_stream_is_writeable(sk))
1539 xs_write_space(sk);
1540
1541 read_unlock_bh(&sk->sk_callback_lock);
1542 }
1543
1544 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1545 {
1546 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1547 struct sock *sk = transport->inet;
1548
1549 if (transport->rcvsize) {
1550 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1551 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1552 }
1553 if (transport->sndsize) {
1554 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1555 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1556 sk->sk_write_space(sk);
1557 }
1558 }
1559
1560 /**
1561 * xs_udp_set_buffer_size - set send and receive limits
1562 * @xprt: generic transport
1563 * @sndsize: requested size of send buffer, in bytes
1564 * @rcvsize: requested size of receive buffer, in bytes
1565 *
1566 * Set socket send and receive buffer size limits.
1567 */
1568 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1569 {
1570 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1571
1572 transport->sndsize = 0;
1573 if (sndsize)
1574 transport->sndsize = sndsize + 1024;
1575 transport->rcvsize = 0;
1576 if (rcvsize)
1577 transport->rcvsize = rcvsize + 1024;
1578
1579 xs_udp_do_set_buffer_size(xprt);
1580 }
1581
1582 /**
1583 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1584 * @xprt: controlling transport
1585 * @task: task that timed out
1586 *
1587 * Adjust the congestion window after a retransmit timeout has occurred.
1588 */
1589 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1590 {
1591 spin_lock(&xprt->transport_lock);
1592 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1593 spin_unlock(&xprt->transport_lock);
1594 }
1595
1596 static int xs_get_random_port(void)
1597 {
1598 unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1599 unsigned short range;
1600 unsigned short rand;
1601
1602 if (max < min)
1603 return -EADDRINUSE;
1604 range = max - min + 1;
1605 rand = (unsigned short) prandom_u32() % range;
1606 return rand + min;
1607 }
1608
1609 static unsigned short xs_sock_getport(struct socket *sock)
1610 {
1611 struct sockaddr_storage buf;
1612 unsigned short port = 0;
1613
1614 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1615 goto out;
1616 switch (buf.ss_family) {
1617 case AF_INET6:
1618 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1619 break;
1620 case AF_INET:
1621 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1622 }
1623 out:
1624 return port;
1625 }
1626
1627 /**
1628 * xs_set_port - reset the port number in the remote endpoint address
1629 * @xprt: generic transport
1630 * @port: new port number
1631 *
1632 */
1633 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1634 {
1635 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1636
1637 rpc_set_port(xs_addr(xprt), port);
1638 xs_update_peer_port(xprt);
1639 }
1640
1641 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1642 {
1643 if (transport->srcport == 0 && transport->xprt.reuseport)
1644 transport->srcport = xs_sock_getport(sock);
1645 }
1646
1647 static int xs_get_srcport(struct sock_xprt *transport)
1648 {
1649 int port = transport->srcport;
1650
1651 if (port == 0 && transport->xprt.resvport)
1652 port = xs_get_random_port();
1653 return port;
1654 }
1655
1656 unsigned short get_srcport(struct rpc_xprt *xprt)
1657 {
1658 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
1659 return xs_sock_getport(sock->sock);
1660 }
1661 EXPORT_SYMBOL(get_srcport);
1662
1663 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1664 {
1665 if (transport->srcport != 0)
1666 transport->srcport = 0;
1667 if (!transport->xprt.resvport)
1668 return 0;
1669 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1670 return xprt_max_resvport;
1671 return --port;
1672 }
1673 static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1674 {
1675 struct sockaddr_storage myaddr;
1676 int err, nloop = 0;
1677 int port = xs_get_srcport(transport);
1678 unsigned short last;
1679
1680 /*
1681 * If we are asking for any ephemeral port (i.e. port == 0 &&
1682 * transport->xprt.resvport == 0), don't bind. Let the local
1683 * port selection happen implicitly when the socket is used
1684 * (for example at connect time).
1685 *
1686 * This ensures that we can continue to establish TCP
1687 * connections even when all local ephemeral ports are already
1688 * a part of some TCP connection. This makes no difference
1689 * for UDP sockets, but also doesn't harm them.
1690 *
1691 * If we're asking for any reserved port (i.e. port == 0 &&
1692 * transport->xprt.resvport == 1) xs_get_srcport above will
1693 * ensure that port is non-zero and we will bind as needed.
1694 */
1695 if (port <= 0)
1696 return port;
1697
1698 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1699 do {
1700 rpc_set_port((struct sockaddr *)&myaddr, port);
1701 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1702 transport->xprt.addrlen);
1703 if (err == 0) {
1704 if (transport->xprt.reuseport)
1705 transport->srcport = port;
1706 break;
1707 }
1708 last = port;
1709 port = xs_next_srcport(transport, port);
1710 if (port > last)
1711 nloop++;
1712 } while (err == -EADDRINUSE && nloop != 2);
1713
1714 if (myaddr.ss_family == AF_INET)
1715 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1716 &((struct sockaddr_in *)&myaddr)->sin_addr,
1717 port, err ? "failed" : "ok", err);
1718 else
1719 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1720 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1721 port, err ? "failed" : "ok", err);
1722 return err;
1723 }
1724
1725 /*
1726 * We don't support autobind on AF_LOCAL sockets
1727 */
1728 static void xs_local_rpcbind(struct rpc_task *task)
1729 {
1730 xprt_set_bound(task->tk_xprt);
1731 }
1732
1733 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1734 {
1735 }
1736
1737 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1738 static struct lock_class_key xs_key[2];
1739 static struct lock_class_key xs_slock_key[2];
1740
1741 static inline void xs_reclassify_socketu(struct socket *sock)
1742 {
1743 struct sock *sk = sock->sk;
1744
1745 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1746 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1747 }
1748
1749 static inline void xs_reclassify_socket4(struct socket *sock)
1750 {
1751 struct sock *sk = sock->sk;
1752
1753 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1754 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1755 }
1756
1757 static inline void xs_reclassify_socket6(struct socket *sock)
1758 {
1759 struct sock *sk = sock->sk;
1760
1761 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1762 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1763 }
1764
1765 static inline void xs_reclassify_socket(int family, struct socket *sock)
1766 {
1767 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1768 return;
1769
1770 switch (family) {
1771 case AF_LOCAL:
1772 xs_reclassify_socketu(sock);
1773 break;
1774 case AF_INET:
1775 xs_reclassify_socket4(sock);
1776 break;
1777 case AF_INET6:
1778 xs_reclassify_socket6(sock);
1779 break;
1780 }
1781 }
1782 #else
1783 static inline void xs_reclassify_socket(int family, struct socket *sock)
1784 {
1785 }
1786 #endif
1787
1788 static void xs_dummy_setup_socket(struct work_struct *work)
1789 {
1790 }
1791
1792 static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1793 struct sock_xprt *transport, int family, int type,
1794 int protocol, bool reuseport)
1795 {
1796 struct file *filp;
1797 struct socket *sock;
1798 int err;
1799
1800 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1801 if (err < 0) {
1802 dprintk("RPC: can't create %d transport socket (%d).\n",
1803 protocol, -err);
1804 goto out;
1805 }
1806 xs_reclassify_socket(family, sock);
1807
1808 if (reuseport)
1809 sock_set_reuseport(sock->sk);
1810
1811 err = xs_bind(transport, sock);
1812 if (err) {
1813 sock_release(sock);
1814 goto out;
1815 }
1816
1817 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1818 if (IS_ERR(filp))
1819 return ERR_CAST(filp);
1820 transport->file = filp;
1821
1822 return sock;
1823 out:
1824 return ERR_PTR(err);
1825 }
1826
1827 static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1828 struct socket *sock)
1829 {
1830 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1831 xprt);
1832
1833 if (!transport->inet) {
1834 struct sock *sk = sock->sk;
1835
1836 write_lock_bh(&sk->sk_callback_lock);
1837
1838 xs_save_old_callbacks(transport, sk);
1839
1840 sk->sk_user_data = xprt;
1841 sk->sk_data_ready = xs_data_ready;
1842 sk->sk_write_space = xs_udp_write_space;
1843 sock_set_flag(sk, SOCK_FASYNC);
1844 sk->sk_error_report = xs_error_report;
1845
1846 xprt_clear_connected(xprt);
1847
1848 /* Reset to new socket */
1849 transport->sock = sock;
1850 transport->inet = sk;
1851
1852 write_unlock_bh(&sk->sk_callback_lock);
1853 }
1854
1855 xs_stream_start_connect(transport);
1856
1857 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1858 }
1859
1860 /**
1861 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1862 * @transport: socket transport to connect
1863 */
1864 static int xs_local_setup_socket(struct sock_xprt *transport)
1865 {
1866 struct rpc_xprt *xprt = &transport->xprt;
1867 struct file *filp;
1868 struct socket *sock;
1869 int status;
1870
1871 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1872 SOCK_STREAM, 0, &sock, 1);
1873 if (status < 0) {
1874 dprintk("RPC: can't create AF_LOCAL "
1875 "transport socket (%d).\n", -status);
1876 goto out;
1877 }
1878 xs_reclassify_socket(AF_LOCAL, sock);
1879
1880 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1881 if (IS_ERR(filp)) {
1882 status = PTR_ERR(filp);
1883 goto out;
1884 }
1885 transport->file = filp;
1886
1887 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1888 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1889
1890 status = xs_local_finish_connecting(xprt, sock);
1891 trace_rpc_socket_connect(xprt, sock, status);
1892 switch (status) {
1893 case 0:
1894 dprintk("RPC: xprt %p connected to %s\n",
1895 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1896 xprt->stat.connect_count++;
1897 xprt->stat.connect_time += (long)jiffies -
1898 xprt->stat.connect_start;
1899 xprt_set_connected(xprt);
1900 break;
1901 case -ENOBUFS:
1902 break;
1903 case -ENOENT:
1904 dprintk("RPC: xprt %p: socket %s does not exist\n",
1905 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1906 break;
1907 case -ECONNREFUSED:
1908 dprintk("RPC: xprt %p: connection refused for %s\n",
1909 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1910 break;
1911 default:
1912 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1913 __func__, -status,
1914 xprt->address_strings[RPC_DISPLAY_ADDR]);
1915 }
1916
1917 out:
1918 xprt_clear_connecting(xprt);
1919 xprt_wake_pending_tasks(xprt, status);
1920 return status;
1921 }
1922
1923 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1924 {
1925 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1926 int ret;
1927
1928 if (RPC_IS_ASYNC(task)) {
1929 /*
1930 * We want the AF_LOCAL connect to be resolved in the
1931 * filesystem namespace of the process making the rpc
1932 * call. Thus we connect synchronously.
1933 *
1934 * If we want to support asynchronous AF_LOCAL calls,
1935 * we'll need to figure out how to pass a namespace to
1936 * connect.
1937 */
1938 task->tk_rpc_status = -ENOTCONN;
1939 rpc_exit(task, -ENOTCONN);
1940 return;
1941 }
1942 ret = xs_local_setup_socket(transport);
1943 if (ret && !RPC_IS_SOFTCONN(task))
1944 msleep_interruptible(15000);
1945 }
1946
1947 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1948 /*
1949 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
1950 * know that we have exclusive access to the socket), to guard against
1951 * races with xs_reset_transport.
1952 */
1953 static void xs_set_memalloc(struct rpc_xprt *xprt)
1954 {
1955 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1956 xprt);
1957
1958 /*
1959 * If there's no sock, then we have nothing to set. The
1960 * reconnecting process will get it for us.
1961 */
1962 if (!transport->inet)
1963 return;
1964 if (atomic_read(&xprt->swapper))
1965 sk_set_memalloc(transport->inet);
1966 }
1967
1968 /**
1969 * xs_enable_swap - Tag this transport as being used for swap.
1970 * @xprt: transport to tag
1971 *
1972 * Take a reference to this transport on behalf of the rpc_clnt, and
1973 * optionally mark it for swapping if it wasn't already.
1974 */
1975 static int
1976 xs_enable_swap(struct rpc_xprt *xprt)
1977 {
1978 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
1979
1980 if (atomic_inc_return(&xprt->swapper) != 1)
1981 return 0;
1982 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
1983 return -ERESTARTSYS;
1984 if (xs->inet)
1985 sk_set_memalloc(xs->inet);
1986 xprt_release_xprt(xprt, NULL);
1987 return 0;
1988 }
1989
1990 /**
1991 * xs_disable_swap - Untag this transport as being used for swap.
1992 * @xprt: transport to tag
1993 *
1994 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
1995 * swapper refcount goes to 0, untag the socket as a memalloc socket.
1996 */
1997 static void
1998 xs_disable_swap(struct rpc_xprt *xprt)
1999 {
2000 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2001
2002 if (!atomic_dec_and_test(&xprt->swapper))
2003 return;
2004 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2005 return;
2006 if (xs->inet)
2007 sk_clear_memalloc(xs->inet);
2008 xprt_release_xprt(xprt, NULL);
2009 }
2010 #else
2011 static void xs_set_memalloc(struct rpc_xprt *xprt)
2012 {
2013 }
2014
2015 static int
2016 xs_enable_swap(struct rpc_xprt *xprt)
2017 {
2018 return -EINVAL;
2019 }
2020
2021 static void
2022 xs_disable_swap(struct rpc_xprt *xprt)
2023 {
2024 }
2025 #endif
2026
2027 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2028 {
2029 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2030
2031 if (!transport->inet) {
2032 struct sock *sk = sock->sk;
2033
2034 write_lock_bh(&sk->sk_callback_lock);
2035
2036 xs_save_old_callbacks(transport, sk);
2037
2038 sk->sk_user_data = xprt;
2039 sk->sk_data_ready = xs_data_ready;
2040 sk->sk_write_space = xs_udp_write_space;
2041 sock_set_flag(sk, SOCK_FASYNC);
2042
2043 xprt_set_connected(xprt);
2044
2045 /* Reset to new socket */
2046 transport->sock = sock;
2047 transport->inet = sk;
2048
2049 xs_set_memalloc(xprt);
2050
2051 write_unlock_bh(&sk->sk_callback_lock);
2052 }
2053 xs_udp_do_set_buffer_size(xprt);
2054
2055 xprt->stat.connect_start = jiffies;
2056 }
2057
2058 static void xs_udp_setup_socket(struct work_struct *work)
2059 {
2060 struct sock_xprt *transport =
2061 container_of(work, struct sock_xprt, connect_worker.work);
2062 struct rpc_xprt *xprt = &transport->xprt;
2063 struct socket *sock;
2064 int status = -EIO;
2065
2066 sock = xs_create_sock(xprt, transport,
2067 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2068 IPPROTO_UDP, false);
2069 if (IS_ERR(sock))
2070 goto out;
2071
2072 dprintk("RPC: worker connecting xprt %p via %s to "
2073 "%s (port %s)\n", xprt,
2074 xprt->address_strings[RPC_DISPLAY_PROTO],
2075 xprt->address_strings[RPC_DISPLAY_ADDR],
2076 xprt->address_strings[RPC_DISPLAY_PORT]);
2077
2078 xs_udp_finish_connecting(xprt, sock);
2079 trace_rpc_socket_connect(xprt, sock, 0);
2080 status = 0;
2081 out:
2082 xprt_clear_connecting(xprt);
2083 xprt_unlock_connect(xprt, transport);
2084 xprt_wake_pending_tasks(xprt, status);
2085 }
2086
2087 /**
2088 * xs_tcp_shutdown - gracefully shut down a TCP socket
2089 * @xprt: transport
2090 *
2091 * Initiates a graceful shutdown of the TCP socket by calling the
2092 * equivalent of shutdown(SHUT_RDWR);
2093 */
2094 static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2095 {
2096 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2097 struct socket *sock = transport->sock;
2098 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2099
2100 if (sock == NULL)
2101 return;
2102 if (!xprt->reuseport) {
2103 xs_close(xprt);
2104 return;
2105 }
2106 switch (skst) {
2107 case TCP_FIN_WAIT1:
2108 case TCP_FIN_WAIT2:
2109 break;
2110 case TCP_ESTABLISHED:
2111 case TCP_CLOSE_WAIT:
2112 kernel_sock_shutdown(sock, SHUT_RDWR);
2113 trace_rpc_socket_shutdown(xprt, sock);
2114 break;
2115 default:
2116 xs_reset_transport(transport);
2117 }
2118 }
2119
2120 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2121 struct socket *sock)
2122 {
2123 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2124 unsigned int keepidle;
2125 unsigned int keepcnt;
2126 unsigned int timeo;
2127
2128 spin_lock(&xprt->transport_lock);
2129 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2130 keepcnt = xprt->timeout->to_retries + 1;
2131 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2132 (xprt->timeout->to_retries + 1);
2133 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2134 spin_unlock(&xprt->transport_lock);
2135
2136 /* TCP Keepalive options */
2137 sock_set_keepalive(sock->sk);
2138 tcp_sock_set_keepidle(sock->sk, keepidle);
2139 tcp_sock_set_keepintvl(sock->sk, keepidle);
2140 tcp_sock_set_keepcnt(sock->sk, keepcnt);
2141
2142 /* TCP user timeout (see RFC5482) */
2143 tcp_sock_set_user_timeout(sock->sk, timeo);
2144 }
2145
2146 static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2147 unsigned long connect_timeout,
2148 unsigned long reconnect_timeout)
2149 {
2150 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2151 struct rpc_timeout to;
2152 unsigned long initval;
2153
2154 spin_lock(&xprt->transport_lock);
2155 if (reconnect_timeout < xprt->max_reconnect_timeout)
2156 xprt->max_reconnect_timeout = reconnect_timeout;
2157 if (connect_timeout < xprt->connect_timeout) {
2158 memcpy(&to, xprt->timeout, sizeof(to));
2159 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2160 /* Arbitrary lower limit */
2161 if (initval < XS_TCP_INIT_REEST_TO << 1)
2162 initval = XS_TCP_INIT_REEST_TO << 1;
2163 to.to_initval = initval;
2164 to.to_maxval = initval;
2165 memcpy(&transport->tcp_timeout, &to,
2166 sizeof(transport->tcp_timeout));
2167 xprt->timeout = &transport->tcp_timeout;
2168 xprt->connect_timeout = connect_timeout;
2169 }
2170 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2171 spin_unlock(&xprt->transport_lock);
2172 }
2173
2174 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2175 {
2176 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2177 int ret = -ENOTCONN;
2178
2179 if (!transport->inet) {
2180 struct sock *sk = sock->sk;
2181
2182 /* Avoid temporary address, they are bad for long-lived
2183 * connections such as NFS mounts.
2184 * RFC4941, section 3.6 suggests that:
2185 * Individual applications, which have specific
2186 * knowledge about the normal duration of connections,
2187 * MAY override this as appropriate.
2188 */
2189 if (xs_addr(xprt)->sa_family == PF_INET6) {
2190 ip6_sock_set_addr_preferences(sk,
2191 IPV6_PREFER_SRC_PUBLIC);
2192 }
2193
2194 xs_tcp_set_socket_timeouts(xprt, sock);
2195 tcp_sock_set_nodelay(sk);
2196
2197 write_lock_bh(&sk->sk_callback_lock);
2198
2199 xs_save_old_callbacks(transport, sk);
2200
2201 sk->sk_user_data = xprt;
2202 sk->sk_data_ready = xs_data_ready;
2203 sk->sk_state_change = xs_tcp_state_change;
2204 sk->sk_write_space = xs_tcp_write_space;
2205 sock_set_flag(sk, SOCK_FASYNC);
2206 sk->sk_error_report = xs_error_report;
2207
2208 /* socket options */
2209 sock_reset_flag(sk, SOCK_LINGER);
2210
2211 xprt_clear_connected(xprt);
2212
2213 /* Reset to new socket */
2214 transport->sock = sock;
2215 transport->inet = sk;
2216
2217 write_unlock_bh(&sk->sk_callback_lock);
2218 }
2219
2220 if (!xprt_bound(xprt))
2221 goto out;
2222
2223 xs_set_memalloc(xprt);
2224
2225 xs_stream_start_connect(transport);
2226
2227 /* Tell the socket layer to start connecting... */
2228 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2229 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2230 switch (ret) {
2231 case 0:
2232 xs_set_srcport(transport, sock);
2233 fallthrough;
2234 case -EINPROGRESS:
2235 /* SYN_SENT! */
2236 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2237 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2238 break;
2239 case -EADDRNOTAVAIL:
2240 /* Source port number is unavailable. Try a new one! */
2241 transport->srcport = 0;
2242 }
2243 out:
2244 return ret;
2245 }
2246
2247 /**
2248 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2249 * @work: queued work item
2250 *
2251 * Invoked by a work queue tasklet.
2252 */
2253 static void xs_tcp_setup_socket(struct work_struct *work)
2254 {
2255 struct sock_xprt *transport =
2256 container_of(work, struct sock_xprt, connect_worker.work);
2257 struct socket *sock = transport->sock;
2258 struct rpc_xprt *xprt = &transport->xprt;
2259 int status = -EIO;
2260
2261 if (!sock) {
2262 sock = xs_create_sock(xprt, transport,
2263 xs_addr(xprt)->sa_family, SOCK_STREAM,
2264 IPPROTO_TCP, true);
2265 if (IS_ERR(sock)) {
2266 status = PTR_ERR(sock);
2267 goto out;
2268 }
2269 }
2270
2271 dprintk("RPC: worker connecting xprt %p via %s to "
2272 "%s (port %s)\n", xprt,
2273 xprt->address_strings[RPC_DISPLAY_PROTO],
2274 xprt->address_strings[RPC_DISPLAY_ADDR],
2275 xprt->address_strings[RPC_DISPLAY_PORT]);
2276
2277 status = xs_tcp_finish_connecting(xprt, sock);
2278 trace_rpc_socket_connect(xprt, sock, status);
2279 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2280 xprt, -status, xprt_connected(xprt),
2281 sock->sk->sk_state);
2282 switch (status) {
2283 default:
2284 printk("%s: connect returned unhandled error %d\n",
2285 __func__, status);
2286 fallthrough;
2287 case -EADDRNOTAVAIL:
2288 /* We're probably in TIME_WAIT. Get rid of existing socket,
2289 * and retry
2290 */
2291 xs_tcp_force_close(xprt);
2292 break;
2293 case 0:
2294 case -EINPROGRESS:
2295 case -EALREADY:
2296 xprt_unlock_connect(xprt, transport);
2297 return;
2298 case -EINVAL:
2299 /* Happens, for instance, if the user specified a link
2300 * local IPv6 address without a scope-id.
2301 */
2302 case -ECONNREFUSED:
2303 case -ECONNRESET:
2304 case -ENETDOWN:
2305 case -ENETUNREACH:
2306 case -EHOSTUNREACH:
2307 case -EADDRINUSE:
2308 case -ENOBUFS:
2309 /* xs_tcp_force_close() wakes tasks with a fixed error code.
2310 * We need to wake them first to ensure the correct error code.
2311 */
2312 xprt_wake_pending_tasks(xprt, status);
2313 xs_tcp_force_close(xprt);
2314 goto out;
2315 }
2316 status = -EAGAIN;
2317 out:
2318 xprt_clear_connecting(xprt);
2319 xprt_unlock_connect(xprt, transport);
2320 xprt_wake_pending_tasks(xprt, status);
2321 }
2322
2323 /**
2324 * xs_connect - connect a socket to a remote endpoint
2325 * @xprt: pointer to transport structure
2326 * @task: address of RPC task that manages state of connect request
2327 *
2328 * TCP: If the remote end dropped the connection, delay reconnecting.
2329 *
2330 * UDP socket connects are synchronous, but we use a work queue anyway
2331 * to guarantee that even unprivileged user processes can set up a
2332 * socket on a privileged port.
2333 *
2334 * If a UDP socket connect fails, the delay behavior here prevents
2335 * retry floods (hard mounts).
2336 */
2337 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2338 {
2339 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2340 unsigned long delay = 0;
2341
2342 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2343
2344 if (transport->sock != NULL) {
2345 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2346 "seconds\n",
2347 xprt, xprt->reestablish_timeout / HZ);
2348
2349 /* Start by resetting any existing state */
2350 xs_reset_transport(transport);
2351
2352 delay = xprt_reconnect_delay(xprt);
2353 xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2354
2355 } else
2356 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2357
2358 queue_delayed_work(xprtiod_workqueue,
2359 &transport->connect_worker,
2360 delay);
2361 }
2362
2363 static void xs_wake_disconnect(struct sock_xprt *transport)
2364 {
2365 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
2366 xs_tcp_force_close(&transport->xprt);
2367 }
2368
2369 static void xs_wake_write(struct sock_xprt *transport)
2370 {
2371 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
2372 xprt_write_space(&transport->xprt);
2373 }
2374
2375 static void xs_wake_error(struct sock_xprt *transport)
2376 {
2377 int sockerr;
2378
2379 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2380 return;
2381 mutex_lock(&transport->recv_mutex);
2382 if (transport->sock == NULL)
2383 goto out;
2384 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2385 goto out;
2386 sockerr = xchg(&transport->xprt_err, 0);
2387 if (sockerr < 0)
2388 xprt_wake_pending_tasks(&transport->xprt, sockerr);
2389 out:
2390 mutex_unlock(&transport->recv_mutex);
2391 }
2392
2393 static void xs_wake_pending(struct sock_xprt *transport)
2394 {
2395 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
2396 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
2397 }
2398
2399 static void xs_error_handle(struct work_struct *work)
2400 {
2401 struct sock_xprt *transport = container_of(work,
2402 struct sock_xprt, error_worker);
2403
2404 xs_wake_disconnect(transport);
2405 xs_wake_write(transport);
2406 xs_wake_error(transport);
2407 xs_wake_pending(transport);
2408 }
2409
2410 /**
2411 * xs_local_print_stats - display AF_LOCAL socket-specific stats
2412 * @xprt: rpc_xprt struct containing statistics
2413 * @seq: output file
2414 *
2415 */
2416 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2417 {
2418 long idle_time = 0;
2419
2420 if (xprt_connected(xprt))
2421 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2422
2423 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2424 "%llu %llu %lu %llu %llu\n",
2425 xprt->stat.bind_count,
2426 xprt->stat.connect_count,
2427 xprt->stat.connect_time / HZ,
2428 idle_time,
2429 xprt->stat.sends,
2430 xprt->stat.recvs,
2431 xprt->stat.bad_xids,
2432 xprt->stat.req_u,
2433 xprt->stat.bklog_u,
2434 xprt->stat.max_slots,
2435 xprt->stat.sending_u,
2436 xprt->stat.pending_u);
2437 }
2438
2439 /**
2440 * xs_udp_print_stats - display UDP socket-specific stats
2441 * @xprt: rpc_xprt struct containing statistics
2442 * @seq: output file
2443 *
2444 */
2445 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2446 {
2447 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2448
2449 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2450 "%lu %llu %llu\n",
2451 transport->srcport,
2452 xprt->stat.bind_count,
2453 xprt->stat.sends,
2454 xprt->stat.recvs,
2455 xprt->stat.bad_xids,
2456 xprt->stat.req_u,
2457 xprt->stat.bklog_u,
2458 xprt->stat.max_slots,
2459 xprt->stat.sending_u,
2460 xprt->stat.pending_u);
2461 }
2462
2463 /**
2464 * xs_tcp_print_stats - display TCP socket-specific stats
2465 * @xprt: rpc_xprt struct containing statistics
2466 * @seq: output file
2467 *
2468 */
2469 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2470 {
2471 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2472 long idle_time = 0;
2473
2474 if (xprt_connected(xprt))
2475 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2476
2477 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2478 "%llu %llu %lu %llu %llu\n",
2479 transport->srcport,
2480 xprt->stat.bind_count,
2481 xprt->stat.connect_count,
2482 xprt->stat.connect_time / HZ,
2483 idle_time,
2484 xprt->stat.sends,
2485 xprt->stat.recvs,
2486 xprt->stat.bad_xids,
2487 xprt->stat.req_u,
2488 xprt->stat.bklog_u,
2489 xprt->stat.max_slots,
2490 xprt->stat.sending_u,
2491 xprt->stat.pending_u);
2492 }
2493
2494 /*
2495 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2496 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2497 * to use the server side send routines.
2498 */
2499 static int bc_malloc(struct rpc_task *task)
2500 {
2501 struct rpc_rqst *rqst = task->tk_rqstp;
2502 size_t size = rqst->rq_callsize;
2503 struct page *page;
2504 struct rpc_buffer *buf;
2505
2506 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2507 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2508 size);
2509 return -EINVAL;
2510 }
2511
2512 page = alloc_page(GFP_KERNEL);
2513 if (!page)
2514 return -ENOMEM;
2515
2516 buf = page_address(page);
2517 buf->len = PAGE_SIZE;
2518
2519 rqst->rq_buffer = buf->data;
2520 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2521 return 0;
2522 }
2523
2524 /*
2525 * Free the space allocated in the bc_alloc routine
2526 */
2527 static void bc_free(struct rpc_task *task)
2528 {
2529 void *buffer = task->tk_rqstp->rq_buffer;
2530 struct rpc_buffer *buf;
2531
2532 buf = container_of(buffer, struct rpc_buffer, data);
2533 free_page((unsigned long)buf);
2534 }
2535
2536 static int bc_sendto(struct rpc_rqst *req)
2537 {
2538 struct xdr_buf *xdr = &req->rq_snd_buf;
2539 struct sock_xprt *transport =
2540 container_of(req->rq_xprt, struct sock_xprt, xprt);
2541 struct msghdr msg = {
2542 .msg_flags = 0,
2543 };
2544 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
2545 (u32)xdr->len);
2546 unsigned int sent = 0;
2547 int err;
2548
2549 req->rq_xtime = ktime_get();
2550 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
2551 xdr_free_bvec(xdr);
2552 if (err < 0 || sent != (xdr->len + sizeof(marker)))
2553 return -EAGAIN;
2554 return sent;
2555 }
2556
2557 /**
2558 * bc_send_request - Send a backchannel Call on a TCP socket
2559 * @req: rpc_rqst containing Call message to be sent
2560 *
2561 * xpt_mutex ensures @rqstp's whole message is written to the socket
2562 * without interruption.
2563 *
2564 * Return values:
2565 * %0 if the message was sent successfully
2566 * %ENOTCONN if the message was not sent
2567 */
2568 static int bc_send_request(struct rpc_rqst *req)
2569 {
2570 struct svc_xprt *xprt;
2571 int len;
2572
2573 /*
2574 * Get the server socket associated with this callback xprt
2575 */
2576 xprt = req->rq_xprt->bc_xprt;
2577
2578 /*
2579 * Grab the mutex to serialize data as the connection is shared
2580 * with the fore channel
2581 */
2582 mutex_lock(&xprt->xpt_mutex);
2583 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2584 len = -ENOTCONN;
2585 else
2586 len = bc_sendto(req);
2587 mutex_unlock(&xprt->xpt_mutex);
2588
2589 if (len > 0)
2590 len = 0;
2591
2592 return len;
2593 }
2594
2595 /*
2596 * The close routine. Since this is client initiated, we do nothing
2597 */
2598
2599 static void bc_close(struct rpc_xprt *xprt)
2600 {
2601 xprt_disconnect_done(xprt);
2602 }
2603
2604 /*
2605 * The xprt destroy routine. Again, because this connection is client
2606 * initiated, we do nothing
2607 */
2608
2609 static void bc_destroy(struct rpc_xprt *xprt)
2610 {
2611 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2612
2613 xs_xprt_free(xprt);
2614 module_put(THIS_MODULE);
2615 }
2616
2617 static const struct rpc_xprt_ops xs_local_ops = {
2618 .reserve_xprt = xprt_reserve_xprt,
2619 .release_xprt = xprt_release_xprt,
2620 .alloc_slot = xprt_alloc_slot,
2621 .free_slot = xprt_free_slot,
2622 .rpcbind = xs_local_rpcbind,
2623 .set_port = xs_local_set_port,
2624 .connect = xs_local_connect,
2625 .buf_alloc = rpc_malloc,
2626 .buf_free = rpc_free,
2627 .prepare_request = xs_stream_prepare_request,
2628 .send_request = xs_local_send_request,
2629 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2630 .close = xs_close,
2631 .destroy = xs_destroy,
2632 .print_stats = xs_local_print_stats,
2633 .enable_swap = xs_enable_swap,
2634 .disable_swap = xs_disable_swap,
2635 };
2636
2637 static const struct rpc_xprt_ops xs_udp_ops = {
2638 .set_buffer_size = xs_udp_set_buffer_size,
2639 .reserve_xprt = xprt_reserve_xprt_cong,
2640 .release_xprt = xprt_release_xprt_cong,
2641 .alloc_slot = xprt_alloc_slot,
2642 .free_slot = xprt_free_slot,
2643 .rpcbind = rpcb_getport_async,
2644 .set_port = xs_set_port,
2645 .connect = xs_connect,
2646 .buf_alloc = rpc_malloc,
2647 .buf_free = rpc_free,
2648 .send_request = xs_udp_send_request,
2649 .wait_for_reply_request = xprt_wait_for_reply_request_rtt,
2650 .timer = xs_udp_timer,
2651 .release_request = xprt_release_rqst_cong,
2652 .close = xs_close,
2653 .destroy = xs_destroy,
2654 .print_stats = xs_udp_print_stats,
2655 .enable_swap = xs_enable_swap,
2656 .disable_swap = xs_disable_swap,
2657 .inject_disconnect = xs_inject_disconnect,
2658 };
2659
2660 static const struct rpc_xprt_ops xs_tcp_ops = {
2661 .reserve_xprt = xprt_reserve_xprt,
2662 .release_xprt = xprt_release_xprt,
2663 .alloc_slot = xprt_alloc_slot,
2664 .free_slot = xprt_free_slot,
2665 .rpcbind = rpcb_getport_async,
2666 .set_port = xs_set_port,
2667 .connect = xs_connect,
2668 .buf_alloc = rpc_malloc,
2669 .buf_free = rpc_free,
2670 .prepare_request = xs_stream_prepare_request,
2671 .send_request = xs_tcp_send_request,
2672 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2673 .close = xs_tcp_shutdown,
2674 .destroy = xs_destroy,
2675 .set_connect_timeout = xs_tcp_set_connect_timeout,
2676 .print_stats = xs_tcp_print_stats,
2677 .enable_swap = xs_enable_swap,
2678 .disable_swap = xs_disable_swap,
2679 .inject_disconnect = xs_inject_disconnect,
2680 #ifdef CONFIG_SUNRPC_BACKCHANNEL
2681 .bc_setup = xprt_setup_bc,
2682 .bc_maxpayload = xs_tcp_bc_maxpayload,
2683 .bc_num_slots = xprt_bc_max_slots,
2684 .bc_free_rqst = xprt_free_bc_rqst,
2685 .bc_destroy = xprt_destroy_bc,
2686 #endif
2687 };
2688
2689 /*
2690 * The rpc_xprt_ops for the server backchannel
2691 */
2692
2693 static const struct rpc_xprt_ops bc_tcp_ops = {
2694 .reserve_xprt = xprt_reserve_xprt,
2695 .release_xprt = xprt_release_xprt,
2696 .alloc_slot = xprt_alloc_slot,
2697 .free_slot = xprt_free_slot,
2698 .buf_alloc = bc_malloc,
2699 .buf_free = bc_free,
2700 .send_request = bc_send_request,
2701 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2702 .close = bc_close,
2703 .destroy = bc_destroy,
2704 .print_stats = xs_tcp_print_stats,
2705 .enable_swap = xs_enable_swap,
2706 .disable_swap = xs_disable_swap,
2707 .inject_disconnect = xs_inject_disconnect,
2708 };
2709
2710 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2711 {
2712 static const struct sockaddr_in sin = {
2713 .sin_family = AF_INET,
2714 .sin_addr.s_addr = htonl(INADDR_ANY),
2715 };
2716 static const struct sockaddr_in6 sin6 = {
2717 .sin6_family = AF_INET6,
2718 .sin6_addr = IN6ADDR_ANY_INIT,
2719 };
2720
2721 switch (family) {
2722 case AF_LOCAL:
2723 break;
2724 case AF_INET:
2725 memcpy(sap, &sin, sizeof(sin));
2726 break;
2727 case AF_INET6:
2728 memcpy(sap, &sin6, sizeof(sin6));
2729 break;
2730 default:
2731 dprintk("RPC: %s: Bad address family\n", __func__);
2732 return -EAFNOSUPPORT;
2733 }
2734 return 0;
2735 }
2736
2737 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2738 unsigned int slot_table_size,
2739 unsigned int max_slot_table_size)
2740 {
2741 struct rpc_xprt *xprt;
2742 struct sock_xprt *new;
2743
2744 if (args->addrlen > sizeof(xprt->addr)) {
2745 dprintk("RPC: xs_setup_xprt: address too large\n");
2746 return ERR_PTR(-EBADF);
2747 }
2748
2749 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2750 max_slot_table_size);
2751 if (xprt == NULL) {
2752 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2753 "rpc_xprt\n");
2754 return ERR_PTR(-ENOMEM);
2755 }
2756
2757 new = container_of(xprt, struct sock_xprt, xprt);
2758 mutex_init(&new->recv_mutex);
2759 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2760 xprt->addrlen = args->addrlen;
2761 if (args->srcaddr)
2762 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2763 else {
2764 int err;
2765 err = xs_init_anyaddr(args->dstaddr->sa_family,
2766 (struct sockaddr *)&new->srcaddr);
2767 if (err != 0) {
2768 xprt_free(xprt);
2769 return ERR_PTR(err);
2770 }
2771 }
2772
2773 return xprt;
2774 }
2775
2776 static const struct rpc_timeout xs_local_default_timeout = {
2777 .to_initval = 10 * HZ,
2778 .to_maxval = 10 * HZ,
2779 .to_retries = 2,
2780 };
2781
2782 /**
2783 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2784 * @args: rpc transport creation arguments
2785 *
2786 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2787 */
2788 static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2789 {
2790 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2791 struct sock_xprt *transport;
2792 struct rpc_xprt *xprt;
2793 struct rpc_xprt *ret;
2794
2795 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2796 xprt_max_tcp_slot_table_entries);
2797 if (IS_ERR(xprt))
2798 return xprt;
2799 transport = container_of(xprt, struct sock_xprt, xprt);
2800
2801 xprt->prot = 0;
2802 xprt->xprt_class = &xs_local_transport;
2803 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2804
2805 xprt->bind_timeout = XS_BIND_TO;
2806 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2807 xprt->idle_timeout = XS_IDLE_DISC_TO;
2808
2809 xprt->ops = &xs_local_ops;
2810 xprt->timeout = &xs_local_default_timeout;
2811
2812 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2813 INIT_WORK(&transport->error_worker, xs_error_handle);
2814 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2815
2816 switch (sun->sun_family) {
2817 case AF_LOCAL:
2818 if (sun->sun_path[0] != '/') {
2819 dprintk("RPC: bad AF_LOCAL address: %s\n",
2820 sun->sun_path);
2821 ret = ERR_PTR(-EINVAL);
2822 goto out_err;
2823 }
2824 xprt_set_bound(xprt);
2825 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2826 ret = ERR_PTR(xs_local_setup_socket(transport));
2827 if (ret)
2828 goto out_err;
2829 break;
2830 default:
2831 ret = ERR_PTR(-EAFNOSUPPORT);
2832 goto out_err;
2833 }
2834
2835 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2836 xprt->address_strings[RPC_DISPLAY_ADDR]);
2837
2838 if (try_module_get(THIS_MODULE))
2839 return xprt;
2840 ret = ERR_PTR(-EINVAL);
2841 out_err:
2842 xs_xprt_free(xprt);
2843 return ret;
2844 }
2845
2846 static const struct rpc_timeout xs_udp_default_timeout = {
2847 .to_initval = 5 * HZ,
2848 .to_maxval = 30 * HZ,
2849 .to_increment = 5 * HZ,
2850 .to_retries = 5,
2851 };
2852
2853 /**
2854 * xs_setup_udp - Set up transport to use a UDP socket
2855 * @args: rpc transport creation arguments
2856 *
2857 */
2858 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2859 {
2860 struct sockaddr *addr = args->dstaddr;
2861 struct rpc_xprt *xprt;
2862 struct sock_xprt *transport;
2863 struct rpc_xprt *ret;
2864
2865 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2866 xprt_udp_slot_table_entries);
2867 if (IS_ERR(xprt))
2868 return xprt;
2869 transport = container_of(xprt, struct sock_xprt, xprt);
2870
2871 xprt->prot = IPPROTO_UDP;
2872 xprt->xprt_class = &xs_udp_transport;
2873 /* XXX: header size can vary due to auth type, IPv6, etc. */
2874 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2875
2876 xprt->bind_timeout = XS_BIND_TO;
2877 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2878 xprt->idle_timeout = XS_IDLE_DISC_TO;
2879
2880 xprt->ops = &xs_udp_ops;
2881
2882 xprt->timeout = &xs_udp_default_timeout;
2883
2884 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2885 INIT_WORK(&transport->error_worker, xs_error_handle);
2886 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2887
2888 switch (addr->sa_family) {
2889 case AF_INET:
2890 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2891 xprt_set_bound(xprt);
2892
2893 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2894 break;
2895 case AF_INET6:
2896 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2897 xprt_set_bound(xprt);
2898
2899 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2900 break;
2901 default:
2902 ret = ERR_PTR(-EAFNOSUPPORT);
2903 goto out_err;
2904 }
2905
2906 if (xprt_bound(xprt))
2907 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2908 xprt->address_strings[RPC_DISPLAY_ADDR],
2909 xprt->address_strings[RPC_DISPLAY_PORT],
2910 xprt->address_strings[RPC_DISPLAY_PROTO]);
2911 else
2912 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2913 xprt->address_strings[RPC_DISPLAY_ADDR],
2914 xprt->address_strings[RPC_DISPLAY_PROTO]);
2915
2916 if (try_module_get(THIS_MODULE))
2917 return xprt;
2918 ret = ERR_PTR(-EINVAL);
2919 out_err:
2920 xs_xprt_free(xprt);
2921 return ret;
2922 }
2923
2924 static const struct rpc_timeout xs_tcp_default_timeout = {
2925 .to_initval = 60 * HZ,
2926 .to_maxval = 60 * HZ,
2927 .to_retries = 2,
2928 };
2929
2930 /**
2931 * xs_setup_tcp - Set up transport to use a TCP socket
2932 * @args: rpc transport creation arguments
2933 *
2934 */
2935 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2936 {
2937 struct sockaddr *addr = args->dstaddr;
2938 struct rpc_xprt *xprt;
2939 struct sock_xprt *transport;
2940 struct rpc_xprt *ret;
2941 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2942
2943 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2944 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2945
2946 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2947 max_slot_table_size);
2948 if (IS_ERR(xprt))
2949 return xprt;
2950 transport = container_of(xprt, struct sock_xprt, xprt);
2951
2952 xprt->prot = IPPROTO_TCP;
2953 xprt->xprt_class = &xs_tcp_transport;
2954 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2955
2956 xprt->bind_timeout = XS_BIND_TO;
2957 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2958 xprt->idle_timeout = XS_IDLE_DISC_TO;
2959
2960 xprt->ops = &xs_tcp_ops;
2961 xprt->timeout = &xs_tcp_default_timeout;
2962
2963 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
2964 xprt->connect_timeout = xprt->timeout->to_initval *
2965 (xprt->timeout->to_retries + 1);
2966
2967 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2968 INIT_WORK(&transport->error_worker, xs_error_handle);
2969 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2970
2971 switch (addr->sa_family) {
2972 case AF_INET:
2973 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2974 xprt_set_bound(xprt);
2975
2976 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2977 break;
2978 case AF_INET6:
2979 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2980 xprt_set_bound(xprt);
2981
2982 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2983 break;
2984 default:
2985 ret = ERR_PTR(-EAFNOSUPPORT);
2986 goto out_err;
2987 }
2988
2989 if (xprt_bound(xprt))
2990 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2991 xprt->address_strings[RPC_DISPLAY_ADDR],
2992 xprt->address_strings[RPC_DISPLAY_PORT],
2993 xprt->address_strings[RPC_DISPLAY_PROTO]);
2994 else
2995 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2996 xprt->address_strings[RPC_DISPLAY_ADDR],
2997 xprt->address_strings[RPC_DISPLAY_PROTO]);
2998
2999 if (try_module_get(THIS_MODULE))
3000 return xprt;
3001 ret = ERR_PTR(-EINVAL);
3002 out_err:
3003 xs_xprt_free(xprt);
3004 return ret;
3005 }
3006
3007 /**
3008 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3009 * @args: rpc transport creation arguments
3010 *
3011 */
3012 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3013 {
3014 struct sockaddr *addr = args->dstaddr;
3015 struct rpc_xprt *xprt;
3016 struct sock_xprt *transport;
3017 struct svc_sock *bc_sock;
3018 struct rpc_xprt *ret;
3019
3020 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3021 xprt_tcp_slot_table_entries);
3022 if (IS_ERR(xprt))
3023 return xprt;
3024 transport = container_of(xprt, struct sock_xprt, xprt);
3025
3026 xprt->prot = IPPROTO_TCP;
3027 xprt->xprt_class = &xs_bc_tcp_transport;
3028 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3029 xprt->timeout = &xs_tcp_default_timeout;
3030
3031 /* backchannel */
3032 xprt_set_bound(xprt);
3033 xprt->bind_timeout = 0;
3034 xprt->reestablish_timeout = 0;
3035 xprt->idle_timeout = 0;
3036
3037 xprt->ops = &bc_tcp_ops;
3038
3039 switch (addr->sa_family) {
3040 case AF_INET:
3041 xs_format_peer_addresses(xprt, "tcp",
3042 RPCBIND_NETID_TCP);
3043 break;
3044 case AF_INET6:
3045 xs_format_peer_addresses(xprt, "tcp",
3046 RPCBIND_NETID_TCP6);
3047 break;
3048 default:
3049 ret = ERR_PTR(-EAFNOSUPPORT);
3050 goto out_err;
3051 }
3052
3053 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3054 xprt->address_strings[RPC_DISPLAY_ADDR],
3055 xprt->address_strings[RPC_DISPLAY_PORT],
3056 xprt->address_strings[RPC_DISPLAY_PROTO]);
3057
3058 /*
3059 * Once we've associated a backchannel xprt with a connection,
3060 * we want to keep it around as long as the connection lasts,
3061 * in case we need to start using it for a backchannel again;
3062 * this reference won't be dropped until bc_xprt is destroyed.
3063 */
3064 xprt_get(xprt);
3065 args->bc_xprt->xpt_bc_xprt = xprt;
3066 xprt->bc_xprt = args->bc_xprt;
3067 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3068 transport->sock = bc_sock->sk_sock;
3069 transport->inet = bc_sock->sk_sk;
3070
3071 /*
3072 * Since we don't want connections for the backchannel, we set
3073 * the xprt status to connected
3074 */
3075 xprt_set_connected(xprt);
3076
3077 if (try_module_get(THIS_MODULE))
3078 return xprt;
3079
3080 args->bc_xprt->xpt_bc_xprt = NULL;
3081 args->bc_xprt->xpt_bc_xps = NULL;
3082 xprt_put(xprt);
3083 ret = ERR_PTR(-EINVAL);
3084 out_err:
3085 xs_xprt_free(xprt);
3086 return ret;
3087 }
3088
3089 static struct xprt_class xs_local_transport = {
3090 .list = LIST_HEAD_INIT(xs_local_transport.list),
3091 .name = "named UNIX socket",
3092 .owner = THIS_MODULE,
3093 .ident = XPRT_TRANSPORT_LOCAL,
3094 .setup = xs_setup_local,
3095 .netid = { "" },
3096 };
3097
3098 static struct xprt_class xs_udp_transport = {
3099 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3100 .name = "udp",
3101 .owner = THIS_MODULE,
3102 .ident = XPRT_TRANSPORT_UDP,
3103 .setup = xs_setup_udp,
3104 .netid = { "udp", "udp6", "" },
3105 };
3106
3107 static struct xprt_class xs_tcp_transport = {
3108 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3109 .name = "tcp",
3110 .owner = THIS_MODULE,
3111 .ident = XPRT_TRANSPORT_TCP,
3112 .setup = xs_setup_tcp,
3113 .netid = { "tcp", "tcp6", "" },
3114 };
3115
3116 static struct xprt_class xs_bc_tcp_transport = {
3117 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3118 .name = "tcp NFSv4.1 backchannel",
3119 .owner = THIS_MODULE,
3120 .ident = XPRT_TRANSPORT_BC_TCP,
3121 .setup = xs_setup_bc_tcp,
3122 .netid = { "" },
3123 };
3124
3125 /**
3126 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3127 *
3128 */
3129 int init_socket_xprt(void)
3130 {
3131 if (!sunrpc_table_header)
3132 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3133
3134 xprt_register_transport(&xs_local_transport);
3135 xprt_register_transport(&xs_udp_transport);
3136 xprt_register_transport(&xs_tcp_transport);
3137 xprt_register_transport(&xs_bc_tcp_transport);
3138
3139 return 0;
3140 }
3141
3142 /**
3143 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3144 *
3145 */
3146 void cleanup_socket_xprt(void)
3147 {
3148 if (sunrpc_table_header) {
3149 unregister_sysctl_table(sunrpc_table_header);
3150 sunrpc_table_header = NULL;
3151 }
3152
3153 xprt_unregister_transport(&xs_local_transport);
3154 xprt_unregister_transport(&xs_udp_transport);
3155 xprt_unregister_transport(&xs_tcp_transport);
3156 xprt_unregister_transport(&xs_bc_tcp_transport);
3157 }
3158
3159 static int param_set_portnr(const char *val, const struct kernel_param *kp)
3160 {
3161 return param_set_uint_minmax(val, kp,
3162 RPC_MIN_RESVPORT,
3163 RPC_MAX_RESVPORT);
3164 }
3165
3166 static const struct kernel_param_ops param_ops_portnr = {
3167 .set = param_set_portnr,
3168 .get = param_get_uint,
3169 };
3170
3171 #define param_check_portnr(name, p) \
3172 __param_check(name, p, unsigned int);
3173
3174 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3175 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3176
3177 static int param_set_slot_table_size(const char *val,
3178 const struct kernel_param *kp)
3179 {
3180 return param_set_uint_minmax(val, kp,
3181 RPC_MIN_SLOT_TABLE,
3182 RPC_MAX_SLOT_TABLE);
3183 }
3184
3185 static const struct kernel_param_ops param_ops_slot_table_size = {
3186 .set = param_set_slot_table_size,
3187 .get = param_get_uint,
3188 };
3189
3190 #define param_check_slot_table_size(name, p) \
3191 __param_check(name, p, unsigned int);
3192
3193 static int param_set_max_slot_table_size(const char *val,
3194 const struct kernel_param *kp)
3195 {
3196 return param_set_uint_minmax(val, kp,
3197 RPC_MIN_SLOT_TABLE,
3198 RPC_MAX_SLOT_TABLE_LIMIT);
3199 }
3200
3201 static const struct kernel_param_ops param_ops_max_slot_table_size = {
3202 .set = param_set_max_slot_table_size,
3203 .get = param_get_uint,
3204 };
3205
3206 #define param_check_max_slot_table_size(name, p) \
3207 __param_check(name, p, unsigned int);
3208
3209 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3210 slot_table_size, 0644);
3211 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3212 max_slot_table_size, 0644);
3213 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3214 slot_table_size, 0644);