]>
Commit | Line | Data |
---|---|---|
a246b010 CL |
1 | /* |
2 | * linux/net/sunrpc/xprtsock.c | |
3 | * | |
4 | * Client-side transport implementation for sockets. | |
5 | * | |
6 | * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
7 | * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
8 | * TCP NFS related read + write fixes | |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | |
10 | * | |
11 | * Rewrite of larges part of the code in order to stabilize TCP stuff. | |
12 | * Fix behaviour when socket buffer is full. | |
13 | * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> | |
55aa4f58 CL |
14 | * |
15 | * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> | |
a246b010 CL |
16 | */ |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/capability.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/socket.h> | |
25 | #include <linux/in.h> | |
26 | #include <linux/net.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/udp.h> | |
29 | #include <linux/tcp.h> | |
30 | #include <linux/sunrpc/clnt.h> | |
31 | #include <linux/file.h> | |
32 | ||
33 | #include <net/sock.h> | |
34 | #include <net/checksum.h> | |
35 | #include <net/udp.h> | |
36 | #include <net/tcp.h> | |
37 | ||
c556b754 CL |
38 | /* |
39 | * xprtsock tunables | |
40 | */ | |
41 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; | |
42 | unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; | |
43 | ||
44 | unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | |
45 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | |
46 | ||
262965f5 CL |
47 | /* |
48 | * How many times to try sending a request on a socket before waiting | |
49 | * for the socket buffer to clear. | |
50 | */ | |
51 | #define XS_SENDMSG_RETRY (10U) | |
52 | ||
03bf4b70 CL |
53 | /* |
54 | * Time out for an RPC UDP socket connect. UDP socket connects are | |
55 | * synchronous, but we set a timeout anyway in case of resource | |
56 | * exhaustion on the local host. | |
57 | */ | |
58 | #define XS_UDP_CONN_TO (5U * HZ) | |
59 | ||
60 | /* | |
61 | * Wait duration for an RPC TCP connection to be established. Solaris | |
62 | * NFS over TCP uses 60 seconds, for example, which is in line with how | |
63 | * long a server takes to reboot. | |
64 | */ | |
65 | #define XS_TCP_CONN_TO (60U * HZ) | |
66 | ||
67 | /* | |
68 | * Wait duration for a reply from the RPC portmapper. | |
69 | */ | |
70 | #define XS_BIND_TO (60U * HZ) | |
71 | ||
72 | /* | |
73 | * Delay if a UDP socket connect error occurs. This is most likely some | |
74 | * kind of resource problem on the local host. | |
75 | */ | |
76 | #define XS_UDP_REEST_TO (2U * HZ) | |
77 | ||
78 | /* | |
79 | * The reestablish timeout allows clients to delay for a bit before attempting | |
80 | * to reconnect to a server that just dropped our connection. | |
81 | * | |
82 | * We implement an exponential backoff when trying to reestablish a TCP | |
83 | * transport connection with the server. Some servers like to drop a TCP | |
84 | * connection when they are overworked, so we start with a short timeout and | |
85 | * increase over time if the server is down or not responding. | |
86 | */ | |
87 | #define XS_TCP_INIT_REEST_TO (3U * HZ) | |
88 | #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) | |
89 | ||
90 | /* | |
91 | * TCP idle timeout; client drops the transport socket if it is idle | |
92 | * for this long. Note that we also timeout UDP sockets to prevent | |
93 | * holding port numbers when there is no RPC traffic. | |
94 | */ | |
95 | #define XS_IDLE_DISC_TO (5U * 60 * HZ) | |
96 | ||
a246b010 CL |
97 | #ifdef RPC_DEBUG |
98 | # undef RPC_DEBUG_DATA | |
9903cd1c | 99 | # define RPCDBG_FACILITY RPCDBG_TRANS |
a246b010 CL |
100 | #endif |
101 | ||
a246b010 | 102 | #ifdef RPC_DEBUG_DATA |
9903cd1c | 103 | static void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 | 104 | { |
9903cd1c CL |
105 | u8 *buf = (u8 *) packet; |
106 | int j; | |
a246b010 CL |
107 | |
108 | dprintk("RPC: %s\n", msg); | |
109 | for (j = 0; j < count && j < 128; j += 4) { | |
110 | if (!(j & 31)) { | |
111 | if (j) | |
112 | dprintk("\n"); | |
113 | dprintk("0x%04x ", j); | |
114 | } | |
115 | dprintk("%02x%02x%02x%02x ", | |
116 | buf[j], buf[j+1], buf[j+2], buf[j+3]); | |
117 | } | |
118 | dprintk("\n"); | |
119 | } | |
120 | #else | |
9903cd1c | 121 | static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 CL |
122 | { |
123 | /* NOP */ | |
124 | } | |
125 | #endif | |
126 | ||
b4b5cc85 CL |
127 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
128 | ||
129 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
130 | { | |
131 | struct kvec iov = { | |
132 | .iov_base = xdr->head[0].iov_base + base, | |
133 | .iov_len = len - base, | |
134 | }; | |
135 | struct msghdr msg = { | |
136 | .msg_name = addr, | |
137 | .msg_namelen = addrlen, | |
138 | .msg_flags = XS_SENDMSG_FLAGS, | |
139 | }; | |
140 | ||
141 | if (xdr->len > len) | |
142 | msg.msg_flags |= MSG_MORE; | |
143 | ||
144 | if (likely(iov.iov_len)) | |
145 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
146 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | |
147 | } | |
148 | ||
149 | static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
150 | { | |
151 | struct kvec iov = { | |
152 | .iov_base = xdr->tail[0].iov_base + base, | |
153 | .iov_len = len - base, | |
154 | }; | |
155 | struct msghdr msg = { | |
156 | .msg_flags = XS_SENDMSG_FLAGS, | |
157 | }; | |
158 | ||
159 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
160 | } | |
161 | ||
9903cd1c CL |
162 | /** |
163 | * xs_sendpages - write pages directly to a socket | |
164 | * @sock: socket to send on | |
165 | * @addr: UDP only -- address of destination | |
166 | * @addrlen: UDP only -- length of destination address | |
167 | * @xdr: buffer containing this request | |
168 | * @base: starting position in the buffer | |
169 | * | |
a246b010 | 170 | */ |
262965f5 | 171 | static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) |
a246b010 CL |
172 | { |
173 | struct page **ppage = xdr->pages; | |
174 | unsigned int len, pglen = xdr->page_len; | |
175 | int err, ret = 0; | |
176 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | |
177 | ||
262965f5 CL |
178 | if (unlikely(!sock)) |
179 | return -ENOTCONN; | |
180 | ||
181 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | |
182 | ||
a246b010 CL |
183 | len = xdr->head[0].iov_len; |
184 | if (base < len || (addr != NULL && base == 0)) { | |
b4b5cc85 | 185 | err = xs_send_head(sock, addr, addrlen, xdr, base, len); |
a246b010 CL |
186 | if (ret == 0) |
187 | ret = err; | |
188 | else if (err > 0) | |
189 | ret += err; | |
b4b5cc85 | 190 | if (err != (len - base)) |
a246b010 CL |
191 | goto out; |
192 | base = 0; | |
193 | } else | |
194 | base -= len; | |
195 | ||
b4b5cc85 | 196 | if (unlikely(pglen == 0)) |
a246b010 | 197 | goto copy_tail; |
b4b5cc85 | 198 | if (unlikely(base >= pglen)) { |
a246b010 CL |
199 | base -= pglen; |
200 | goto copy_tail; | |
201 | } | |
202 | if (base || xdr->page_base) { | |
203 | pglen -= base; | |
9903cd1c | 204 | base += xdr->page_base; |
a246b010 CL |
205 | ppage += base >> PAGE_CACHE_SHIFT; |
206 | base &= ~PAGE_CACHE_MASK; | |
207 | } | |
208 | ||
209 | sendpage = sock->ops->sendpage ? : sock_no_sendpage; | |
210 | do { | |
b4b5cc85 | 211 | int flags = XS_SENDMSG_FLAGS; |
a246b010 CL |
212 | |
213 | len = PAGE_CACHE_SIZE; | |
214 | if (base) | |
215 | len -= base; | |
216 | if (pglen < len) | |
217 | len = pglen; | |
218 | ||
219 | if (pglen != len || xdr->tail[0].iov_len != 0) | |
220 | flags |= MSG_MORE; | |
221 | ||
222 | /* Hmm... We might be dealing with highmem pages */ | |
223 | if (PageHighMem(*ppage)) | |
224 | sendpage = sock_no_sendpage; | |
225 | err = sendpage(sock, *ppage, base, len, flags); | |
226 | if (ret == 0) | |
227 | ret = err; | |
228 | else if (err > 0) | |
229 | ret += err; | |
230 | if (err != len) | |
231 | goto out; | |
232 | base = 0; | |
233 | ppage++; | |
234 | } while ((pglen -= len) != 0); | |
235 | copy_tail: | |
236 | len = xdr->tail[0].iov_len; | |
237 | if (base < len) { | |
b4b5cc85 | 238 | err = xs_send_tail(sock, xdr, base, len); |
a246b010 CL |
239 | if (ret == 0) |
240 | ret = err; | |
241 | else if (err > 0) | |
242 | ret += err; | |
243 | } | |
244 | out: | |
245 | return ret; | |
246 | } | |
247 | ||
9903cd1c | 248 | /** |
262965f5 CL |
249 | * xs_nospace - place task on wait queue if transmit was incomplete |
250 | * @task: task to put to sleep | |
9903cd1c | 251 | * |
a246b010 | 252 | */ |
262965f5 | 253 | static void xs_nospace(struct rpc_task *task) |
a246b010 | 254 | { |
262965f5 CL |
255 | struct rpc_rqst *req = task->tk_rqstp; |
256 | struct rpc_xprt *xprt = req->rq_xprt; | |
a246b010 | 257 | |
262965f5 CL |
258 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", |
259 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | |
260 | req->rq_slen); | |
261 | ||
262 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | |
263 | /* Protect against races with write_space */ | |
264 | spin_lock_bh(&xprt->transport_lock); | |
265 | ||
266 | /* Don't race with disconnect */ | |
267 | if (!xprt_connected(xprt)) | |
268 | task->tk_status = -ENOTCONN; | |
269 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) | |
270 | xprt_wait_for_buffer_space(task); | |
271 | ||
272 | spin_unlock_bh(&xprt->transport_lock); | |
273 | } else | |
274 | /* Keep holding the socket if it is blocked */ | |
275 | rpc_delay(task, HZ>>4); | |
276 | } | |
277 | ||
278 | /** | |
279 | * xs_udp_send_request - write an RPC request to a UDP socket | |
280 | * @task: address of RPC task that manages the state of an RPC request | |
281 | * | |
282 | * Return values: | |
283 | * 0: The request has been sent | |
284 | * EAGAIN: The socket was blocked, please call again later to | |
285 | * complete the request | |
286 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
287 | * other: Some other error occured, the request was not sent | |
288 | */ | |
289 | static int xs_udp_send_request(struct rpc_task *task) | |
290 | { | |
291 | struct rpc_rqst *req = task->tk_rqstp; | |
292 | struct rpc_xprt *xprt = req->rq_xprt; | |
293 | struct xdr_buf *xdr = &req->rq_snd_buf; | |
294 | int status; | |
a246b010 | 295 | |
9903cd1c | 296 | xs_pktdump("packet data:", |
a246b010 CL |
297 | req->rq_svec->iov_base, |
298 | req->rq_svec->iov_len); | |
299 | ||
262965f5 CL |
300 | req->rq_xtime = jiffies; |
301 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | |
302 | sizeof(xprt->addr), xdr, req->rq_bytes_sent); | |
a246b010 | 303 | |
262965f5 CL |
304 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
305 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 306 | |
262965f5 CL |
307 | if (likely(status >= (int) req->rq_slen)) |
308 | return 0; | |
a246b010 | 309 | |
262965f5 CL |
310 | /* Still some bytes left; set up for a retry later. */ |
311 | if (status > 0) | |
312 | status = -EAGAIN; | |
a246b010 | 313 | |
262965f5 CL |
314 | switch (status) { |
315 | case -ENETUNREACH: | |
316 | case -EPIPE: | |
a246b010 CL |
317 | case -ECONNREFUSED: |
318 | /* When the server has died, an ICMP port unreachable message | |
9903cd1c | 319 | * prompts ECONNREFUSED. */ |
a246b010 | 320 | break; |
262965f5 CL |
321 | case -EAGAIN: |
322 | xs_nospace(task); | |
a246b010 CL |
323 | break; |
324 | default: | |
262965f5 CL |
325 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
326 | -status); | |
9903cd1c | 327 | break; |
a246b010 | 328 | } |
262965f5 CL |
329 | |
330 | return status; | |
a246b010 CL |
331 | } |
332 | ||
808012fb CL |
333 | static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) |
334 | { | |
335 | u32 reclen = buf->len - sizeof(rpc_fraghdr); | |
336 | rpc_fraghdr *base = buf->head[0].iov_base; | |
337 | *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); | |
338 | } | |
339 | ||
9903cd1c | 340 | /** |
262965f5 | 341 | * xs_tcp_send_request - write an RPC request to a TCP socket |
9903cd1c CL |
342 | * @task: address of RPC task that manages the state of an RPC request |
343 | * | |
344 | * Return values: | |
262965f5 CL |
345 | * 0: The request has been sent |
346 | * EAGAIN: The socket was blocked, please call again later to | |
347 | * complete the request | |
348 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
349 | * other: Some other error occured, the request was not sent | |
9903cd1c CL |
350 | * |
351 | * XXX: In the case of soft timeouts, should we eventually give up | |
262965f5 | 352 | * if sendmsg is not able to make progress? |
9903cd1c | 353 | */ |
262965f5 | 354 | static int xs_tcp_send_request(struct rpc_task *task) |
a246b010 CL |
355 | { |
356 | struct rpc_rqst *req = task->tk_rqstp; | |
357 | struct rpc_xprt *xprt = req->rq_xprt; | |
262965f5 | 358 | struct xdr_buf *xdr = &req->rq_snd_buf; |
a246b010 CL |
359 | int status, retry = 0; |
360 | ||
808012fb | 361 | xs_encode_tcp_record_marker(&req->rq_snd_buf); |
a246b010 | 362 | |
262965f5 CL |
363 | xs_pktdump("packet data:", |
364 | req->rq_svec->iov_base, | |
365 | req->rq_svec->iov_len); | |
a246b010 CL |
366 | |
367 | /* Continue transmitting the packet/record. We must be careful | |
368 | * to cope with writespace callbacks arriving _after_ we have | |
262965f5 | 369 | * called sendmsg(). */ |
a246b010 CL |
370 | while (1) { |
371 | req->rq_xtime = jiffies; | |
262965f5 CL |
372 | status = xs_sendpages(xprt->sock, NULL, 0, xdr, |
373 | req->rq_bytes_sent); | |
a246b010 | 374 | |
262965f5 CL |
375 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
376 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 377 | |
262965f5 | 378 | if (unlikely(status < 0)) |
a246b010 | 379 | break; |
a246b010 | 380 | |
262965f5 CL |
381 | /* If we've sent the entire packet, immediately |
382 | * reset the count of bytes sent. */ | |
383 | req->rq_bytes_sent += status; | |
384 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | |
385 | req->rq_bytes_sent = 0; | |
386 | return 0; | |
387 | } | |
a246b010 CL |
388 | |
389 | status = -EAGAIN; | |
262965f5 | 390 | if (retry++ > XS_SENDMSG_RETRY) |
a246b010 CL |
391 | break; |
392 | } | |
393 | ||
262965f5 CL |
394 | switch (status) { |
395 | case -EAGAIN: | |
396 | xs_nospace(task); | |
397 | break; | |
398 | case -ECONNREFUSED: | |
399 | case -ECONNRESET: | |
400 | case -ENOTCONN: | |
401 | case -EPIPE: | |
402 | status = -ENOTCONN; | |
403 | break; | |
404 | default: | |
405 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | |
406 | -status); | |
43118c29 | 407 | xprt_disconnect(xprt); |
262965f5 | 408 | break; |
a246b010 | 409 | } |
262965f5 | 410 | |
a246b010 CL |
411 | return status; |
412 | } | |
413 | ||
9903cd1c CL |
414 | /** |
415 | * xs_close - close a socket | |
416 | * @xprt: transport | |
417 | * | |
3167e12c CL |
418 | * This is used when all requests are complete; ie, no DRC state remains |
419 | * on the server we want to save. | |
a246b010 | 420 | */ |
9903cd1c | 421 | static void xs_close(struct rpc_xprt *xprt) |
a246b010 | 422 | { |
9903cd1c CL |
423 | struct socket *sock = xprt->sock; |
424 | struct sock *sk = xprt->inet; | |
a246b010 CL |
425 | |
426 | if (!sk) | |
427 | return; | |
428 | ||
9903cd1c CL |
429 | dprintk("RPC: xs_close xprt %p\n", xprt); |
430 | ||
a246b010 CL |
431 | write_lock_bh(&sk->sk_callback_lock); |
432 | xprt->inet = NULL; | |
433 | xprt->sock = NULL; | |
434 | ||
9903cd1c CL |
435 | sk->sk_user_data = NULL; |
436 | sk->sk_data_ready = xprt->old_data_ready; | |
a246b010 | 437 | sk->sk_state_change = xprt->old_state_change; |
9903cd1c | 438 | sk->sk_write_space = xprt->old_write_space; |
a246b010 CL |
439 | write_unlock_bh(&sk->sk_callback_lock); |
440 | ||
9903cd1c | 441 | sk->sk_no_check = 0; |
a246b010 CL |
442 | |
443 | sock_release(sock); | |
444 | } | |
445 | ||
9903cd1c CL |
446 | /** |
447 | * xs_destroy - prepare to shutdown a transport | |
448 | * @xprt: doomed transport | |
449 | * | |
450 | */ | |
451 | static void xs_destroy(struct rpc_xprt *xprt) | |
a246b010 | 452 | { |
9903cd1c CL |
453 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
454 | ||
55aa4f58 | 455 | cancel_delayed_work(&xprt->connect_worker); |
a246b010 CL |
456 | flush_scheduled_work(); |
457 | ||
458 | xprt_disconnect(xprt); | |
9903cd1c | 459 | xs_close(xprt); |
a246b010 CL |
460 | kfree(xprt->slot); |
461 | } | |
462 | ||
9903cd1c CL |
463 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
464 | { | |
465 | return (struct rpc_xprt *) sk->sk_user_data; | |
466 | } | |
467 | ||
468 | /** | |
469 | * xs_udp_data_ready - "data ready" callback for UDP sockets | |
470 | * @sk: socket with data to read | |
471 | * @len: how much data to read | |
472 | * | |
a246b010 | 473 | */ |
9903cd1c | 474 | static void xs_udp_data_ready(struct sock *sk, int len) |
a246b010 | 475 | { |
9903cd1c CL |
476 | struct rpc_task *task; |
477 | struct rpc_xprt *xprt; | |
a246b010 | 478 | struct rpc_rqst *rovr; |
9903cd1c | 479 | struct sk_buff *skb; |
a246b010 CL |
480 | int err, repsize, copied; |
481 | u32 _xid, *xp; | |
482 | ||
483 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
484 | dprintk("RPC: xs_udp_data_ready...\n"); |
485 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 486 | goto out; |
a246b010 CL |
487 | |
488 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | |
489 | goto out; | |
490 | ||
491 | if (xprt->shutdown) | |
492 | goto dropit; | |
493 | ||
494 | repsize = skb->len - sizeof(struct udphdr); | |
495 | if (repsize < 4) { | |
9903cd1c | 496 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
a246b010 CL |
497 | goto dropit; |
498 | } | |
499 | ||
500 | /* Copy the XID from the skb... */ | |
501 | xp = skb_header_pointer(skb, sizeof(struct udphdr), | |
502 | sizeof(_xid), &_xid); | |
503 | if (xp == NULL) | |
504 | goto dropit; | |
505 | ||
506 | /* Look up and lock the request corresponding to the given XID */ | |
4a0f8c04 | 507 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
508 | rovr = xprt_lookup_rqst(xprt, *xp); |
509 | if (!rovr) | |
510 | goto out_unlock; | |
511 | task = rovr->rq_task; | |
512 | ||
a246b010 CL |
513 | if ((copied = rovr->rq_private_buf.buflen) > repsize) |
514 | copied = repsize; | |
515 | ||
516 | /* Suck it into the iovec, verify checksum if not done by hw. */ | |
517 | if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) | |
518 | goto out_unlock; | |
519 | ||
520 | /* Something worked... */ | |
521 | dst_confirm(skb->dst); | |
522 | ||
1570c1e4 CL |
523 | xprt_adjust_cwnd(task, copied); |
524 | xprt_update_rtt(task); | |
525 | xprt_complete_rqst(task, copied); | |
a246b010 CL |
526 | |
527 | out_unlock: | |
4a0f8c04 | 528 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
529 | dropit: |
530 | skb_free_datagram(sk, skb); | |
531 | out: | |
532 | read_unlock(&sk->sk_callback_lock); | |
533 | } | |
534 | ||
9903cd1c | 535 | static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) |
a246b010 CL |
536 | { |
537 | if (len > desc->count) | |
538 | len = desc->count; | |
539 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | |
540 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | |
541 | len, desc->count); | |
542 | return 0; | |
543 | } | |
544 | desc->offset += len; | |
545 | desc->count -= len; | |
546 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | |
547 | len, desc->count); | |
548 | return len; | |
549 | } | |
550 | ||
9903cd1c | 551 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
552 | { |
553 | size_t len, used; | |
554 | char *p; | |
555 | ||
556 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | |
557 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | |
9903cd1c | 558 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
559 | xprt->tcp_offset += used; |
560 | if (used != len) | |
561 | return; | |
808012fb | 562 | |
a246b010 | 563 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); |
808012fb | 564 | if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) |
a246b010 CL |
565 | xprt->tcp_flags |= XPRT_LAST_FRAG; |
566 | else | |
567 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | |
808012fb CL |
568 | xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; |
569 | ||
a246b010 CL |
570 | xprt->tcp_flags &= ~XPRT_COPY_RECM; |
571 | xprt->tcp_offset = 0; | |
808012fb | 572 | |
a246b010 | 573 | /* Sanity check of the record length */ |
808012fb | 574 | if (unlikely(xprt->tcp_reclen < 4)) { |
9903cd1c | 575 | dprintk("RPC: invalid TCP record fragment length\n"); |
a246b010 | 576 | xprt_disconnect(xprt); |
9903cd1c | 577 | return; |
a246b010 CL |
578 | } |
579 | dprintk("RPC: reading TCP record fragment of length %d\n", | |
580 | xprt->tcp_reclen); | |
581 | } | |
582 | ||
9903cd1c | 583 | static void xs_tcp_check_recm(struct rpc_xprt *xprt) |
a246b010 CL |
584 | { |
585 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | |
586 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | |
587 | if (xprt->tcp_offset == xprt->tcp_reclen) { | |
588 | xprt->tcp_flags |= XPRT_COPY_RECM; | |
589 | xprt->tcp_offset = 0; | |
590 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | |
591 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
592 | xprt->tcp_flags |= XPRT_COPY_XID; | |
593 | xprt->tcp_copied = 0; | |
594 | } | |
595 | } | |
596 | } | |
597 | ||
9903cd1c | 598 | static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
599 | { |
600 | size_t len, used; | |
601 | char *p; | |
602 | ||
603 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | |
604 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | |
605 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | |
9903cd1c | 606 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
607 | xprt->tcp_offset += used; |
608 | if (used != len) | |
609 | return; | |
610 | xprt->tcp_flags &= ~XPRT_COPY_XID; | |
611 | xprt->tcp_flags |= XPRT_COPY_DATA; | |
612 | xprt->tcp_copied = 4; | |
613 | dprintk("RPC: reading reply for XID %08x\n", | |
614 | ntohl(xprt->tcp_xid)); | |
9903cd1c | 615 | xs_tcp_check_recm(xprt); |
a246b010 CL |
616 | } |
617 | ||
9903cd1c | 618 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
619 | { |
620 | struct rpc_rqst *req; | |
621 | struct xdr_buf *rcvbuf; | |
622 | size_t len; | |
623 | ssize_t r; | |
624 | ||
625 | /* Find and lock the request corresponding to this xid */ | |
4a0f8c04 | 626 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
627 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); |
628 | if (!req) { | |
629 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
630 | dprintk("RPC: XID %08x request not found!\n", | |
631 | ntohl(xprt->tcp_xid)); | |
4a0f8c04 | 632 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
633 | return; |
634 | } | |
635 | ||
636 | rcvbuf = &req->rq_private_buf; | |
637 | len = desc->count; | |
638 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | |
639 | skb_reader_t my_desc; | |
640 | ||
641 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
642 | memcpy(&my_desc, desc, sizeof(my_desc)); | |
643 | my_desc.count = len; | |
644 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 645 | &my_desc, xs_tcp_copy_data); |
a246b010 CL |
646 | desc->count -= r; |
647 | desc->offset += r; | |
648 | } else | |
649 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 650 | desc, xs_tcp_copy_data); |
a246b010 CL |
651 | |
652 | if (r > 0) { | |
653 | xprt->tcp_copied += r; | |
654 | xprt->tcp_offset += r; | |
655 | } | |
656 | if (r != len) { | |
657 | /* Error when copying to the receive buffer, | |
658 | * usually because we weren't able to allocate | |
659 | * additional buffer pages. All we can do now | |
660 | * is turn off XPRT_COPY_DATA, so the request | |
661 | * will not receive any additional updates, | |
662 | * and time out. | |
663 | * Any remaining data from this record will | |
664 | * be discarded. | |
665 | */ | |
666 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
667 | dprintk("RPC: XID %08x truncated request\n", | |
668 | ntohl(xprt->tcp_xid)); | |
669 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
670 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
671 | goto out; | |
672 | } | |
673 | ||
674 | dprintk("RPC: XID %08x read %Zd bytes\n", | |
675 | ntohl(xprt->tcp_xid), r); | |
676 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
677 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
678 | ||
679 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | |
680 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
681 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | |
682 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | |
683 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
684 | } | |
685 | ||
686 | out: | |
1570c1e4 CL |
687 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) |
688 | xprt_complete_rqst(req->rq_task, xprt->tcp_copied); | |
4a0f8c04 | 689 | spin_unlock(&xprt->transport_lock); |
9903cd1c | 690 | xs_tcp_check_recm(xprt); |
a246b010 CL |
691 | } |
692 | ||
9903cd1c | 693 | static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
694 | { |
695 | size_t len; | |
696 | ||
697 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
698 | if (len > desc->count) | |
699 | len = desc->count; | |
700 | desc->count -= len; | |
701 | desc->offset += len; | |
702 | xprt->tcp_offset += len; | |
703 | dprintk("RPC: discarded %Zu bytes\n", len); | |
9903cd1c | 704 | xs_tcp_check_recm(xprt); |
a246b010 CL |
705 | } |
706 | ||
9903cd1c | 707 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) |
a246b010 CL |
708 | { |
709 | struct rpc_xprt *xprt = rd_desc->arg.data; | |
710 | skb_reader_t desc = { | |
711 | .skb = skb, | |
712 | .offset = offset, | |
713 | .count = len, | |
714 | .csum = 0 | |
9903cd1c | 715 | }; |
a246b010 | 716 | |
9903cd1c | 717 | dprintk("RPC: xs_tcp_data_recv started\n"); |
a246b010 CL |
718 | do { |
719 | /* Read in a new fragment marker if necessary */ | |
720 | /* Can we ever really expect to get completely empty fragments? */ | |
721 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | |
9903cd1c | 722 | xs_tcp_read_fraghdr(xprt, &desc); |
a246b010 CL |
723 | continue; |
724 | } | |
725 | /* Read in the xid if necessary */ | |
726 | if (xprt->tcp_flags & XPRT_COPY_XID) { | |
9903cd1c | 727 | xs_tcp_read_xid(xprt, &desc); |
a246b010 CL |
728 | continue; |
729 | } | |
730 | /* Read in the request data */ | |
731 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | |
9903cd1c | 732 | xs_tcp_read_request(xprt, &desc); |
a246b010 CL |
733 | continue; |
734 | } | |
735 | /* Skip over any trailing bytes on short reads */ | |
9903cd1c | 736 | xs_tcp_read_discard(xprt, &desc); |
a246b010 | 737 | } while (desc.count); |
9903cd1c | 738 | dprintk("RPC: xs_tcp_data_recv done\n"); |
a246b010 CL |
739 | return len - desc.count; |
740 | } | |
741 | ||
9903cd1c CL |
742 | /** |
743 | * xs_tcp_data_ready - "data ready" callback for TCP sockets | |
744 | * @sk: socket with data to read | |
745 | * @bytes: how much data to read | |
746 | * | |
747 | */ | |
748 | static void xs_tcp_data_ready(struct sock *sk, int bytes) | |
a246b010 CL |
749 | { |
750 | struct rpc_xprt *xprt; | |
751 | read_descriptor_t rd_desc; | |
752 | ||
753 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
754 | dprintk("RPC: xs_tcp_data_ready...\n"); |
755 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 756 | goto out; |
a246b010 CL |
757 | if (xprt->shutdown) |
758 | goto out; | |
759 | ||
9903cd1c | 760 | /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ |
a246b010 CL |
761 | rd_desc.arg.data = xprt; |
762 | rd_desc.count = 65536; | |
9903cd1c | 763 | tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); |
a246b010 CL |
764 | out: |
765 | read_unlock(&sk->sk_callback_lock); | |
766 | } | |
767 | ||
9903cd1c CL |
768 | /** |
769 | * xs_tcp_state_change - callback to handle TCP socket state changes | |
770 | * @sk: socket whose state has changed | |
771 | * | |
772 | */ | |
773 | static void xs_tcp_state_change(struct sock *sk) | |
a246b010 | 774 | { |
9903cd1c | 775 | struct rpc_xprt *xprt; |
a246b010 CL |
776 | |
777 | read_lock(&sk->sk_callback_lock); | |
778 | if (!(xprt = xprt_from_sock(sk))) | |
779 | goto out; | |
9903cd1c | 780 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
a246b010 CL |
781 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", |
782 | sk->sk_state, xprt_connected(xprt), | |
783 | sock_flag(sk, SOCK_DEAD), | |
784 | sock_flag(sk, SOCK_ZAPPED)); | |
785 | ||
786 | switch (sk->sk_state) { | |
787 | case TCP_ESTABLISHED: | |
4a0f8c04 | 788 | spin_lock_bh(&xprt->transport_lock); |
a246b010 CL |
789 | if (!xprt_test_and_set_connected(xprt)) { |
790 | /* Reset TCP record info */ | |
791 | xprt->tcp_offset = 0; | |
792 | xprt->tcp_reclen = 0; | |
793 | xprt->tcp_copied = 0; | |
794 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | |
03bf4b70 | 795 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
44fbac22 | 796 | xprt_wake_pending_tasks(xprt, 0); |
a246b010 | 797 | } |
4a0f8c04 | 798 | spin_unlock_bh(&xprt->transport_lock); |
a246b010 CL |
799 | break; |
800 | case TCP_SYN_SENT: | |
801 | case TCP_SYN_RECV: | |
802 | break; | |
803 | default: | |
804 | xprt_disconnect(xprt); | |
805 | break; | |
806 | } | |
807 | out: | |
808 | read_unlock(&sk->sk_callback_lock); | |
809 | } | |
810 | ||
9903cd1c | 811 | /** |
c7b2cae8 CL |
812 | * xs_udp_write_space - callback invoked when socket buffer space |
813 | * becomes available | |
9903cd1c CL |
814 | * @sk: socket whose state has changed |
815 | * | |
a246b010 CL |
816 | * Called when more output buffer space is available for this socket. |
817 | * We try not to wake our writers until they can make "significant" | |
c7b2cae8 | 818 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg |
a246b010 CL |
819 | * with a bunch of small requests. |
820 | */ | |
c7b2cae8 | 821 | static void xs_udp_write_space(struct sock *sk) |
a246b010 | 822 | { |
a246b010 | 823 | read_lock(&sk->sk_callback_lock); |
a246b010 | 824 | |
c7b2cae8 CL |
825 | /* from net/core/sock.c:sock_def_write_space */ |
826 | if (sock_writeable(sk)) { | |
827 | struct socket *sock; | |
828 | struct rpc_xprt *xprt; | |
829 | ||
830 | if (unlikely(!(sock = sk->sk_socket))) | |
a246b010 | 831 | goto out; |
c7b2cae8 CL |
832 | if (unlikely(!(xprt = xprt_from_sock(sk)))) |
833 | goto out; | |
834 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
a246b010 | 835 | goto out; |
c7b2cae8 CL |
836 | |
837 | xprt_write_space(xprt); | |
a246b010 CL |
838 | } |
839 | ||
c7b2cae8 CL |
840 | out: |
841 | read_unlock(&sk->sk_callback_lock); | |
842 | } | |
a246b010 | 843 | |
c7b2cae8 CL |
844 | /** |
845 | * xs_tcp_write_space - callback invoked when socket buffer space | |
846 | * becomes available | |
847 | * @sk: socket whose state has changed | |
848 | * | |
849 | * Called when more output buffer space is available for this socket. | |
850 | * We try not to wake our writers until they can make "significant" | |
851 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg | |
852 | * with a bunch of small requests. | |
853 | */ | |
854 | static void xs_tcp_write_space(struct sock *sk) | |
855 | { | |
856 | read_lock(&sk->sk_callback_lock); | |
857 | ||
858 | /* from net/core/stream.c:sk_stream_write_space */ | |
859 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | |
860 | struct socket *sock; | |
861 | struct rpc_xprt *xprt; | |
862 | ||
863 | if (unlikely(!(sock = sk->sk_socket))) | |
864 | goto out; | |
865 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | |
866 | goto out; | |
867 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
868 | goto out; | |
869 | ||
870 | xprt_write_space(xprt); | |
871 | } | |
872 | ||
873 | out: | |
a246b010 CL |
874 | read_unlock(&sk->sk_callback_lock); |
875 | } | |
876 | ||
470056c2 | 877 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) |
a246b010 CL |
878 | { |
879 | struct sock *sk = xprt->inet; | |
880 | ||
a246b010 CL |
881 | if (xprt->rcvsize) { |
882 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | |
883 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | |
884 | } | |
885 | if (xprt->sndsize) { | |
886 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | |
887 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | |
888 | sk->sk_write_space(sk); | |
889 | } | |
890 | } | |
891 | ||
43118c29 | 892 | /** |
470056c2 | 893 | * xs_udp_set_buffer_size - set send and receive limits |
43118c29 | 894 | * @xprt: generic transport |
470056c2 CL |
895 | * @sndsize: requested size of send buffer, in bytes |
896 | * @rcvsize: requested size of receive buffer, in bytes | |
43118c29 | 897 | * |
470056c2 | 898 | * Set socket send and receive buffer size limits. |
43118c29 | 899 | */ |
470056c2 | 900 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) |
43118c29 | 901 | { |
470056c2 CL |
902 | xprt->sndsize = 0; |
903 | if (sndsize) | |
904 | xprt->sndsize = sndsize + 1024; | |
905 | xprt->rcvsize = 0; | |
906 | if (rcvsize) | |
907 | xprt->rcvsize = rcvsize + 1024; | |
908 | ||
909 | xs_udp_do_set_buffer_size(xprt); | |
43118c29 CL |
910 | } |
911 | ||
46c0ee8b CL |
912 | /** |
913 | * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport | |
914 | * @task: task that timed out | |
915 | * | |
916 | * Adjust the congestion window after a retransmit timeout has occurred. | |
917 | */ | |
918 | static void xs_udp_timer(struct rpc_task *task) | |
919 | { | |
920 | xprt_adjust_cwnd(task, -ETIMEDOUT); | |
921 | } | |
922 | ||
9903cd1c | 923 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) |
a246b010 CL |
924 | { |
925 | struct sockaddr_in myaddr = { | |
926 | .sin_family = AF_INET, | |
927 | }; | |
529b33c6 CL |
928 | int err; |
929 | unsigned short port = xprt->port; | |
a246b010 | 930 | |
a246b010 CL |
931 | do { |
932 | myaddr.sin_port = htons(port); | |
933 | err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, | |
934 | sizeof(myaddr)); | |
935 | if (err == 0) { | |
936 | xprt->port = port; | |
9903cd1c CL |
937 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
938 | port); | |
a246b010 CL |
939 | return 0; |
940 | } | |
529b33c6 CL |
941 | if (port <= xprt_min_resvport) |
942 | port = xprt_max_resvport; | |
943 | else | |
944 | port--; | |
a246b010 CL |
945 | } while (err == -EADDRINUSE && port != xprt->port); |
946 | ||
9903cd1c | 947 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
a246b010 CL |
948 | return err; |
949 | } | |
950 | ||
b0d93ad5 CL |
951 | /** |
952 | * xs_udp_connect_worker - set up a UDP socket | |
953 | * @args: RPC transport to connect | |
954 | * | |
955 | * Invoked by a work queue tasklet. | |
956 | */ | |
957 | static void xs_udp_connect_worker(void *args) | |
a246b010 | 958 | { |
b0d93ad5 CL |
959 | struct rpc_xprt *xprt = (struct rpc_xprt *) args; |
960 | struct socket *sock = xprt->sock; | |
961 | int err, status = -EIO; | |
9903cd1c | 962 | |
b0d93ad5 CL |
963 | if (xprt->shutdown || xprt->addr.sin_port == 0) |
964 | goto out; | |
9903cd1c | 965 | |
b0d93ad5 | 966 | dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 967 | |
b0d93ad5 CL |
968 | /* Start by resetting any existing state */ |
969 | xs_close(xprt); | |
9903cd1c | 970 | |
b0d93ad5 CL |
971 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { |
972 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | |
973 | goto out; | |
974 | } | |
9903cd1c | 975 | |
b0d93ad5 CL |
976 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
977 | sock_release(sock); | |
978 | goto out; | |
979 | } | |
9903cd1c | 980 | |
b0d93ad5 CL |
981 | if (!xprt->inet) { |
982 | struct sock *sk = sock->sk; | |
a246b010 | 983 | |
b0d93ad5 | 984 | write_lock_bh(&sk->sk_callback_lock); |
a246b010 | 985 | |
b0d93ad5 CL |
986 | sk->sk_user_data = xprt; |
987 | xprt->old_data_ready = sk->sk_data_ready; | |
988 | xprt->old_state_change = sk->sk_state_change; | |
989 | xprt->old_write_space = sk->sk_write_space; | |
9903cd1c | 990 | sk->sk_data_ready = xs_udp_data_ready; |
c7b2cae8 | 991 | sk->sk_write_space = xs_udp_write_space; |
a246b010 | 992 | sk->sk_no_check = UDP_CSUM_NORCV; |
b079fa7b | 993 | sk->sk_allocation = GFP_ATOMIC; |
b0d93ad5 | 994 | |
a246b010 | 995 | xprt_set_connected(xprt); |
a246b010 | 996 | |
b0d93ad5 CL |
997 | /* Reset to new socket */ |
998 | xprt->sock = sock; | |
999 | xprt->inet = sk; | |
a246b010 | 1000 | |
b0d93ad5 CL |
1001 | write_unlock_bh(&sk->sk_callback_lock); |
1002 | } | |
470056c2 | 1003 | xs_udp_do_set_buffer_size(xprt); |
b0d93ad5 CL |
1004 | status = 0; |
1005 | out: | |
1006 | xprt_wake_pending_tasks(xprt, status); | |
1007 | xprt_clear_connecting(xprt); | |
a246b010 CL |
1008 | } |
1009 | ||
3167e12c CL |
1010 | /* |
1011 | * We need to preserve the port number so the reply cache on the server can | |
1012 | * find our cached RPC replies when we get around to reconnecting. | |
1013 | */ | |
1014 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |
1015 | { | |
1016 | int result; | |
1017 | struct socket *sock = xprt->sock; | |
1018 | struct sockaddr any; | |
1019 | ||
1020 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | |
1021 | ||
1022 | /* | |
1023 | * Disconnect the transport socket by doing a connect operation | |
1024 | * with AF_UNSPEC. This should return immediately... | |
1025 | */ | |
1026 | memset(&any, 0, sizeof(any)); | |
1027 | any.sa_family = AF_UNSPEC; | |
1028 | result = sock->ops->connect(sock, &any, sizeof(any), 0); | |
1029 | if (result) | |
1030 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | |
1031 | result); | |
1032 | } | |
1033 | ||
9903cd1c | 1034 | /** |
b0d93ad5 | 1035 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
9903cd1c CL |
1036 | * @args: RPC transport to connect |
1037 | * | |
1038 | * Invoked by a work queue tasklet. | |
a246b010 | 1039 | */ |
b0d93ad5 | 1040 | static void xs_tcp_connect_worker(void *args) |
a246b010 CL |
1041 | { |
1042 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | |
1043 | struct socket *sock = xprt->sock; | |
b0d93ad5 | 1044 | int err, status = -EIO; |
a246b010 CL |
1045 | |
1046 | if (xprt->shutdown || xprt->addr.sin_port == 0) | |
1047 | goto out; | |
1048 | ||
b0d93ad5 | 1049 | dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 1050 | |
3167e12c CL |
1051 | if (!xprt->sock) { |
1052 | /* start from scratch */ | |
1053 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | |
1054 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | |
1055 | goto out; | |
1056 | } | |
a246b010 | 1057 | |
3167e12c CL |
1058 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
1059 | sock_release(sock); | |
1060 | goto out; | |
1061 | } | |
1062 | } else | |
1063 | /* "close" the socket, preserving the local port */ | |
1064 | xs_tcp_reuse_connection(xprt); | |
a246b010 | 1065 | |
b0d93ad5 CL |
1066 | if (!xprt->inet) { |
1067 | struct sock *sk = sock->sk; | |
1068 | ||
1069 | write_lock_bh(&sk->sk_callback_lock); | |
1070 | ||
1071 | sk->sk_user_data = xprt; | |
1072 | xprt->old_data_ready = sk->sk_data_ready; | |
1073 | xprt->old_state_change = sk->sk_state_change; | |
1074 | xprt->old_write_space = sk->sk_write_space; | |
1075 | sk->sk_data_ready = xs_tcp_data_ready; | |
1076 | sk->sk_state_change = xs_tcp_state_change; | |
1077 | sk->sk_write_space = xs_tcp_write_space; | |
b079fa7b | 1078 | sk->sk_allocation = GFP_ATOMIC; |
3167e12c CL |
1079 | |
1080 | /* socket options */ | |
1081 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | |
1082 | sock_reset_flag(sk, SOCK_LINGER); | |
1083 | tcp_sk(sk)->linger2 = 0; | |
1084 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; | |
b0d93ad5 CL |
1085 | |
1086 | xprt_clear_connected(xprt); | |
1087 | ||
1088 | /* Reset to new socket */ | |
1089 | xprt->sock = sock; | |
1090 | xprt->inet = sk; | |
1091 | ||
1092 | write_unlock_bh(&sk->sk_callback_lock); | |
1093 | } | |
1094 | ||
1095 | /* Tell the socket layer to start connecting... */ | |
a246b010 CL |
1096 | status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, |
1097 | sizeof(xprt->addr), O_NONBLOCK); | |
1098 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | |
1099 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | |
1100 | if (status < 0) { | |
1101 | switch (status) { | |
1102 | case -EINPROGRESS: | |
1103 | case -EALREADY: | |
1104 | goto out_clear; | |
3167e12c CL |
1105 | case -ECONNREFUSED: |
1106 | case -ECONNRESET: | |
1107 | /* retry with existing socket, after a delay */ | |
1108 | break; | |
1109 | default: | |
1110 | /* get rid of existing socket, and retry */ | |
1111 | xs_close(xprt); | |
1112 | break; | |
a246b010 CL |
1113 | } |
1114 | } | |
1115 | out: | |
44fbac22 | 1116 | xprt_wake_pending_tasks(xprt, status); |
a246b010 | 1117 | out_clear: |
2226feb6 | 1118 | xprt_clear_connecting(xprt); |
a246b010 CL |
1119 | } |
1120 | ||
9903cd1c CL |
1121 | /** |
1122 | * xs_connect - connect a socket to a remote endpoint | |
1123 | * @task: address of RPC task that manages state of connect request | |
1124 | * | |
1125 | * TCP: If the remote end dropped the connection, delay reconnecting. | |
03bf4b70 CL |
1126 | * |
1127 | * UDP socket connects are synchronous, but we use a work queue anyway | |
1128 | * to guarantee that even unprivileged user processes can set up a | |
1129 | * socket on a privileged port. | |
1130 | * | |
1131 | * If a UDP socket connect fails, the delay behavior here prevents | |
1132 | * retry floods (hard mounts). | |
9903cd1c CL |
1133 | */ |
1134 | static void xs_connect(struct rpc_task *task) | |
a246b010 CL |
1135 | { |
1136 | struct rpc_xprt *xprt = task->tk_xprt; | |
1137 | ||
b0d93ad5 CL |
1138 | if (xprt_test_and_set_connecting(xprt)) |
1139 | return; | |
1140 | ||
1141 | if (xprt->sock != NULL) { | |
03bf4b70 CL |
1142 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", |
1143 | xprt, xprt->reestablish_timeout / HZ); | |
b0d93ad5 | 1144 | schedule_delayed_work(&xprt->connect_worker, |
03bf4b70 CL |
1145 | xprt->reestablish_timeout); |
1146 | xprt->reestablish_timeout <<= 1; | |
1147 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | |
1148 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | |
b0d93ad5 CL |
1149 | } else { |
1150 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | |
1151 | schedule_work(&xprt->connect_worker); | |
1152 | ||
1153 | /* flush_scheduled_work can sleep... */ | |
1154 | if (!RPC_IS_ASYNC(task)) | |
1155 | flush_scheduled_work(); | |
a246b010 CL |
1156 | } |
1157 | } | |
1158 | ||
262965f5 | 1159 | static struct rpc_xprt_ops xs_udp_ops = { |
43118c29 | 1160 | .set_buffer_size = xs_udp_set_buffer_size, |
12a80469 | 1161 | .reserve_xprt = xprt_reserve_xprt_cong, |
49e9a890 | 1162 | .release_xprt = xprt_release_xprt_cong, |
262965f5 CL |
1163 | .connect = xs_connect, |
1164 | .send_request = xs_udp_send_request, | |
fe3aca29 | 1165 | .set_retrans_timeout = xprt_set_retrans_timeout_rtt, |
46c0ee8b | 1166 | .timer = xs_udp_timer, |
a58dd398 | 1167 | .release_request = xprt_release_rqst_cong, |
262965f5 CL |
1168 | .close = xs_close, |
1169 | .destroy = xs_destroy, | |
1170 | }; | |
1171 | ||
1172 | static struct rpc_xprt_ops xs_tcp_ops = { | |
12a80469 | 1173 | .reserve_xprt = xprt_reserve_xprt, |
49e9a890 | 1174 | .release_xprt = xprt_release_xprt, |
9903cd1c | 1175 | .connect = xs_connect, |
262965f5 | 1176 | .send_request = xs_tcp_send_request, |
fe3aca29 | 1177 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
9903cd1c CL |
1178 | .close = xs_close, |
1179 | .destroy = xs_destroy, | |
a246b010 CL |
1180 | }; |
1181 | ||
9903cd1c CL |
1182 | /** |
1183 | * xs_setup_udp - Set up transport to use a UDP socket | |
1184 | * @xprt: transport to set up | |
1185 | * @to: timeout parameters | |
1186 | * | |
1187 | */ | |
a246b010 CL |
1188 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1189 | { | |
1190 | size_t slot_table_size; | |
1191 | ||
1192 | dprintk("RPC: setting up udp-ipv4 transport...\n"); | |
1193 | ||
1194 | xprt->max_reqs = xprt_udp_slot_table_entries; | |
1195 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1196 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1197 | if (xprt->slot == NULL) | |
1198 | return -ENOMEM; | |
1199 | memset(xprt->slot, 0, slot_table_size); | |
1200 | ||
1201 | xprt->prot = IPPROTO_UDP; | |
529b33c6 | 1202 | xprt->port = xprt_max_resvport; |
808012fb | 1203 | xprt->tsh_size = 0; |
a246b010 CL |
1204 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; |
1205 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | |
1206 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | |
1207 | ||
b0d93ad5 | 1208 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); |
03bf4b70 CL |
1209 | xprt->bind_timeout = XS_BIND_TO; |
1210 | xprt->connect_timeout = XS_UDP_CONN_TO; | |
1211 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | |
1212 | xprt->idle_timeout = XS_IDLE_DISC_TO; | |
a246b010 | 1213 | |
262965f5 | 1214 | xprt->ops = &xs_udp_ops; |
a246b010 CL |
1215 | |
1216 | if (to) | |
1217 | xprt->timeout = *to; | |
1218 | else | |
9903cd1c | 1219 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
a246b010 CL |
1220 | |
1221 | return 0; | |
1222 | } | |
1223 | ||
9903cd1c CL |
1224 | /** |
1225 | * xs_setup_tcp - Set up transport to use a TCP socket | |
1226 | * @xprt: transport to set up | |
1227 | * @to: timeout parameters | |
1228 | * | |
1229 | */ | |
a246b010 CL |
1230 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1231 | { | |
1232 | size_t slot_table_size; | |
1233 | ||
1234 | dprintk("RPC: setting up tcp-ipv4 transport...\n"); | |
1235 | ||
1236 | xprt->max_reqs = xprt_tcp_slot_table_entries; | |
1237 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1238 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1239 | if (xprt->slot == NULL) | |
1240 | return -ENOMEM; | |
1241 | memset(xprt->slot, 0, slot_table_size); | |
1242 | ||
1243 | xprt->prot = IPPROTO_TCP; | |
529b33c6 | 1244 | xprt->port = xprt_max_resvport; |
808012fb | 1245 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
a246b010 | 1246 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; |
808012fb | 1247 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
a246b010 | 1248 | |
b0d93ad5 | 1249 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); |
03bf4b70 CL |
1250 | xprt->bind_timeout = XS_BIND_TO; |
1251 | xprt->connect_timeout = XS_TCP_CONN_TO; | |
1252 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | |
1253 | xprt->idle_timeout = XS_IDLE_DISC_TO; | |
a246b010 | 1254 | |
262965f5 | 1255 | xprt->ops = &xs_tcp_ops; |
a246b010 CL |
1256 | |
1257 | if (to) | |
1258 | xprt->timeout = *to; | |
1259 | else | |
9903cd1c | 1260 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
a246b010 CL |
1261 | |
1262 | return 0; | |
1263 | } |