]>
Commit | Line | Data |
---|---|---|
a246b010 CL |
1 | /* |
2 | * linux/net/sunrpc/xprtsock.c | |
3 | * | |
4 | * Client-side transport implementation for sockets. | |
5 | * | |
6 | * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
7 | * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
8 | * TCP NFS related read + write fixes | |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | |
10 | * | |
11 | * Rewrite of larges part of the code in order to stabilize TCP stuff. | |
12 | * Fix behaviour when socket buffer is full. | |
13 | * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> | |
55aa4f58 CL |
14 | * |
15 | * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> | |
a246b010 CL |
16 | */ |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/capability.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/socket.h> | |
25 | #include <linux/in.h> | |
26 | #include <linux/net.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/udp.h> | |
29 | #include <linux/tcp.h> | |
30 | #include <linux/sunrpc/clnt.h> | |
02107148 | 31 | #include <linux/sunrpc/sched.h> |
a246b010 CL |
32 | #include <linux/file.h> |
33 | ||
34 | #include <net/sock.h> | |
35 | #include <net/checksum.h> | |
36 | #include <net/udp.h> | |
37 | #include <net/tcp.h> | |
38 | ||
c556b754 CL |
39 | /* |
40 | * xprtsock tunables | |
41 | */ | |
42 | unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; | |
43 | unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; | |
44 | ||
45 | unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | |
46 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | |
47 | ||
262965f5 CL |
48 | /* |
49 | * How many times to try sending a request on a socket before waiting | |
50 | * for the socket buffer to clear. | |
51 | */ | |
52 | #define XS_SENDMSG_RETRY (10U) | |
53 | ||
03bf4b70 CL |
54 | /* |
55 | * Time out for an RPC UDP socket connect. UDP socket connects are | |
56 | * synchronous, but we set a timeout anyway in case of resource | |
57 | * exhaustion on the local host. | |
58 | */ | |
59 | #define XS_UDP_CONN_TO (5U * HZ) | |
60 | ||
61 | /* | |
62 | * Wait duration for an RPC TCP connection to be established. Solaris | |
63 | * NFS over TCP uses 60 seconds, for example, which is in line with how | |
64 | * long a server takes to reboot. | |
65 | */ | |
66 | #define XS_TCP_CONN_TO (60U * HZ) | |
67 | ||
68 | /* | |
69 | * Wait duration for a reply from the RPC portmapper. | |
70 | */ | |
71 | #define XS_BIND_TO (60U * HZ) | |
72 | ||
73 | /* | |
74 | * Delay if a UDP socket connect error occurs. This is most likely some | |
75 | * kind of resource problem on the local host. | |
76 | */ | |
77 | #define XS_UDP_REEST_TO (2U * HZ) | |
78 | ||
79 | /* | |
80 | * The reestablish timeout allows clients to delay for a bit before attempting | |
81 | * to reconnect to a server that just dropped our connection. | |
82 | * | |
83 | * We implement an exponential backoff when trying to reestablish a TCP | |
84 | * transport connection with the server. Some servers like to drop a TCP | |
85 | * connection when they are overworked, so we start with a short timeout and | |
86 | * increase over time if the server is down or not responding. | |
87 | */ | |
88 | #define XS_TCP_INIT_REEST_TO (3U * HZ) | |
89 | #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) | |
90 | ||
91 | /* | |
92 | * TCP idle timeout; client drops the transport socket if it is idle | |
93 | * for this long. Note that we also timeout UDP sockets to prevent | |
94 | * holding port numbers when there is no RPC traffic. | |
95 | */ | |
96 | #define XS_IDLE_DISC_TO (5U * 60 * HZ) | |
97 | ||
a246b010 CL |
98 | #ifdef RPC_DEBUG |
99 | # undef RPC_DEBUG_DATA | |
9903cd1c | 100 | # define RPCDBG_FACILITY RPCDBG_TRANS |
a246b010 CL |
101 | #endif |
102 | ||
a246b010 | 103 | #ifdef RPC_DEBUG_DATA |
9903cd1c | 104 | static void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 | 105 | { |
9903cd1c CL |
106 | u8 *buf = (u8 *) packet; |
107 | int j; | |
a246b010 CL |
108 | |
109 | dprintk("RPC: %s\n", msg); | |
110 | for (j = 0; j < count && j < 128; j += 4) { | |
111 | if (!(j & 31)) { | |
112 | if (j) | |
113 | dprintk("\n"); | |
114 | dprintk("0x%04x ", j); | |
115 | } | |
116 | dprintk("%02x%02x%02x%02x ", | |
117 | buf[j], buf[j+1], buf[j+2], buf[j+3]); | |
118 | } | |
119 | dprintk("\n"); | |
120 | } | |
121 | #else | |
9903cd1c | 122 | static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 CL |
123 | { |
124 | /* NOP */ | |
125 | } | |
126 | #endif | |
127 | ||
b4b5cc85 CL |
128 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
129 | ||
130 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
131 | { | |
132 | struct kvec iov = { | |
133 | .iov_base = xdr->head[0].iov_base + base, | |
134 | .iov_len = len - base, | |
135 | }; | |
136 | struct msghdr msg = { | |
137 | .msg_name = addr, | |
138 | .msg_namelen = addrlen, | |
139 | .msg_flags = XS_SENDMSG_FLAGS, | |
140 | }; | |
141 | ||
142 | if (xdr->len > len) | |
143 | msg.msg_flags |= MSG_MORE; | |
144 | ||
145 | if (likely(iov.iov_len)) | |
146 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
147 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | |
148 | } | |
149 | ||
150 | static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
151 | { | |
152 | struct kvec iov = { | |
153 | .iov_base = xdr->tail[0].iov_base + base, | |
154 | .iov_len = len - base, | |
155 | }; | |
156 | struct msghdr msg = { | |
157 | .msg_flags = XS_SENDMSG_FLAGS, | |
158 | }; | |
159 | ||
160 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
161 | } | |
162 | ||
9903cd1c CL |
163 | /** |
164 | * xs_sendpages - write pages directly to a socket | |
165 | * @sock: socket to send on | |
166 | * @addr: UDP only -- address of destination | |
167 | * @addrlen: UDP only -- length of destination address | |
168 | * @xdr: buffer containing this request | |
169 | * @base: starting position in the buffer | |
170 | * | |
a246b010 | 171 | */ |
262965f5 | 172 | static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) |
a246b010 CL |
173 | { |
174 | struct page **ppage = xdr->pages; | |
175 | unsigned int len, pglen = xdr->page_len; | |
176 | int err, ret = 0; | |
177 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | |
178 | ||
262965f5 CL |
179 | if (unlikely(!sock)) |
180 | return -ENOTCONN; | |
181 | ||
182 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | |
183 | ||
a246b010 CL |
184 | len = xdr->head[0].iov_len; |
185 | if (base < len || (addr != NULL && base == 0)) { | |
b4b5cc85 | 186 | err = xs_send_head(sock, addr, addrlen, xdr, base, len); |
a246b010 CL |
187 | if (ret == 0) |
188 | ret = err; | |
189 | else if (err > 0) | |
190 | ret += err; | |
b4b5cc85 | 191 | if (err != (len - base)) |
a246b010 CL |
192 | goto out; |
193 | base = 0; | |
194 | } else | |
195 | base -= len; | |
196 | ||
b4b5cc85 | 197 | if (unlikely(pglen == 0)) |
a246b010 | 198 | goto copy_tail; |
b4b5cc85 | 199 | if (unlikely(base >= pglen)) { |
a246b010 CL |
200 | base -= pglen; |
201 | goto copy_tail; | |
202 | } | |
203 | if (base || xdr->page_base) { | |
204 | pglen -= base; | |
9903cd1c | 205 | base += xdr->page_base; |
a246b010 CL |
206 | ppage += base >> PAGE_CACHE_SHIFT; |
207 | base &= ~PAGE_CACHE_MASK; | |
208 | } | |
209 | ||
210 | sendpage = sock->ops->sendpage ? : sock_no_sendpage; | |
211 | do { | |
b4b5cc85 | 212 | int flags = XS_SENDMSG_FLAGS; |
a246b010 CL |
213 | |
214 | len = PAGE_CACHE_SIZE; | |
215 | if (base) | |
216 | len -= base; | |
217 | if (pglen < len) | |
218 | len = pglen; | |
219 | ||
220 | if (pglen != len || xdr->tail[0].iov_len != 0) | |
221 | flags |= MSG_MORE; | |
222 | ||
223 | /* Hmm... We might be dealing with highmem pages */ | |
224 | if (PageHighMem(*ppage)) | |
225 | sendpage = sock_no_sendpage; | |
226 | err = sendpage(sock, *ppage, base, len, flags); | |
227 | if (ret == 0) | |
228 | ret = err; | |
229 | else if (err > 0) | |
230 | ret += err; | |
231 | if (err != len) | |
232 | goto out; | |
233 | base = 0; | |
234 | ppage++; | |
235 | } while ((pglen -= len) != 0); | |
236 | copy_tail: | |
237 | len = xdr->tail[0].iov_len; | |
238 | if (base < len) { | |
b4b5cc85 | 239 | err = xs_send_tail(sock, xdr, base, len); |
a246b010 CL |
240 | if (ret == 0) |
241 | ret = err; | |
242 | else if (err > 0) | |
243 | ret += err; | |
244 | } | |
245 | out: | |
246 | return ret; | |
247 | } | |
248 | ||
9903cd1c | 249 | /** |
262965f5 CL |
250 | * xs_nospace - place task on wait queue if transmit was incomplete |
251 | * @task: task to put to sleep | |
9903cd1c | 252 | * |
a246b010 | 253 | */ |
262965f5 | 254 | static void xs_nospace(struct rpc_task *task) |
a246b010 | 255 | { |
262965f5 CL |
256 | struct rpc_rqst *req = task->tk_rqstp; |
257 | struct rpc_xprt *xprt = req->rq_xprt; | |
a246b010 | 258 | |
262965f5 CL |
259 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", |
260 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | |
261 | req->rq_slen); | |
262 | ||
263 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | |
264 | /* Protect against races with write_space */ | |
265 | spin_lock_bh(&xprt->transport_lock); | |
266 | ||
267 | /* Don't race with disconnect */ | |
268 | if (!xprt_connected(xprt)) | |
269 | task->tk_status = -ENOTCONN; | |
270 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) | |
271 | xprt_wait_for_buffer_space(task); | |
272 | ||
273 | spin_unlock_bh(&xprt->transport_lock); | |
274 | } else | |
275 | /* Keep holding the socket if it is blocked */ | |
276 | rpc_delay(task, HZ>>4); | |
277 | } | |
278 | ||
279 | /** | |
280 | * xs_udp_send_request - write an RPC request to a UDP socket | |
281 | * @task: address of RPC task that manages the state of an RPC request | |
282 | * | |
283 | * Return values: | |
284 | * 0: The request has been sent | |
285 | * EAGAIN: The socket was blocked, please call again later to | |
286 | * complete the request | |
287 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
288 | * other: Some other error occured, the request was not sent | |
289 | */ | |
290 | static int xs_udp_send_request(struct rpc_task *task) | |
291 | { | |
292 | struct rpc_rqst *req = task->tk_rqstp; | |
293 | struct rpc_xprt *xprt = req->rq_xprt; | |
294 | struct xdr_buf *xdr = &req->rq_snd_buf; | |
295 | int status; | |
a246b010 | 296 | |
9903cd1c | 297 | xs_pktdump("packet data:", |
a246b010 CL |
298 | req->rq_svec->iov_base, |
299 | req->rq_svec->iov_len); | |
300 | ||
262965f5 CL |
301 | req->rq_xtime = jiffies; |
302 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | |
303 | sizeof(xprt->addr), xdr, req->rq_bytes_sent); | |
a246b010 | 304 | |
262965f5 CL |
305 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
306 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 307 | |
262965f5 CL |
308 | if (likely(status >= (int) req->rq_slen)) |
309 | return 0; | |
a246b010 | 310 | |
262965f5 CL |
311 | /* Still some bytes left; set up for a retry later. */ |
312 | if (status > 0) | |
313 | status = -EAGAIN; | |
a246b010 | 314 | |
262965f5 CL |
315 | switch (status) { |
316 | case -ENETUNREACH: | |
317 | case -EPIPE: | |
a246b010 CL |
318 | case -ECONNREFUSED: |
319 | /* When the server has died, an ICMP port unreachable message | |
9903cd1c | 320 | * prompts ECONNREFUSED. */ |
a246b010 | 321 | break; |
262965f5 CL |
322 | case -EAGAIN: |
323 | xs_nospace(task); | |
a246b010 CL |
324 | break; |
325 | default: | |
262965f5 CL |
326 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
327 | -status); | |
9903cd1c | 328 | break; |
a246b010 | 329 | } |
262965f5 CL |
330 | |
331 | return status; | |
a246b010 CL |
332 | } |
333 | ||
808012fb CL |
334 | static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) |
335 | { | |
336 | u32 reclen = buf->len - sizeof(rpc_fraghdr); | |
337 | rpc_fraghdr *base = buf->head[0].iov_base; | |
338 | *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); | |
339 | } | |
340 | ||
9903cd1c | 341 | /** |
262965f5 | 342 | * xs_tcp_send_request - write an RPC request to a TCP socket |
9903cd1c CL |
343 | * @task: address of RPC task that manages the state of an RPC request |
344 | * | |
345 | * Return values: | |
262965f5 CL |
346 | * 0: The request has been sent |
347 | * EAGAIN: The socket was blocked, please call again later to | |
348 | * complete the request | |
349 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
350 | * other: Some other error occured, the request was not sent | |
9903cd1c CL |
351 | * |
352 | * XXX: In the case of soft timeouts, should we eventually give up | |
262965f5 | 353 | * if sendmsg is not able to make progress? |
9903cd1c | 354 | */ |
262965f5 | 355 | static int xs_tcp_send_request(struct rpc_task *task) |
a246b010 CL |
356 | { |
357 | struct rpc_rqst *req = task->tk_rqstp; | |
358 | struct rpc_xprt *xprt = req->rq_xprt; | |
262965f5 | 359 | struct xdr_buf *xdr = &req->rq_snd_buf; |
a246b010 CL |
360 | int status, retry = 0; |
361 | ||
808012fb | 362 | xs_encode_tcp_record_marker(&req->rq_snd_buf); |
a246b010 | 363 | |
262965f5 CL |
364 | xs_pktdump("packet data:", |
365 | req->rq_svec->iov_base, | |
366 | req->rq_svec->iov_len); | |
a246b010 CL |
367 | |
368 | /* Continue transmitting the packet/record. We must be careful | |
369 | * to cope with writespace callbacks arriving _after_ we have | |
262965f5 | 370 | * called sendmsg(). */ |
a246b010 CL |
371 | while (1) { |
372 | req->rq_xtime = jiffies; | |
262965f5 CL |
373 | status = xs_sendpages(xprt->sock, NULL, 0, xdr, |
374 | req->rq_bytes_sent); | |
a246b010 | 375 | |
262965f5 CL |
376 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
377 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 378 | |
262965f5 | 379 | if (unlikely(status < 0)) |
a246b010 | 380 | break; |
a246b010 | 381 | |
262965f5 CL |
382 | /* If we've sent the entire packet, immediately |
383 | * reset the count of bytes sent. */ | |
384 | req->rq_bytes_sent += status; | |
385 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | |
386 | req->rq_bytes_sent = 0; | |
387 | return 0; | |
388 | } | |
a246b010 CL |
389 | |
390 | status = -EAGAIN; | |
262965f5 | 391 | if (retry++ > XS_SENDMSG_RETRY) |
a246b010 CL |
392 | break; |
393 | } | |
394 | ||
262965f5 CL |
395 | switch (status) { |
396 | case -EAGAIN: | |
397 | xs_nospace(task); | |
398 | break; | |
399 | case -ECONNREFUSED: | |
400 | case -ECONNRESET: | |
401 | case -ENOTCONN: | |
402 | case -EPIPE: | |
403 | status = -ENOTCONN; | |
404 | break; | |
405 | default: | |
406 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | |
407 | -status); | |
43118c29 | 408 | xprt_disconnect(xprt); |
262965f5 | 409 | break; |
a246b010 | 410 | } |
262965f5 | 411 | |
a246b010 CL |
412 | return status; |
413 | } | |
414 | ||
9903cd1c CL |
415 | /** |
416 | * xs_close - close a socket | |
417 | * @xprt: transport | |
418 | * | |
3167e12c CL |
419 | * This is used when all requests are complete; ie, no DRC state remains |
420 | * on the server we want to save. | |
a246b010 | 421 | */ |
9903cd1c | 422 | static void xs_close(struct rpc_xprt *xprt) |
a246b010 | 423 | { |
9903cd1c CL |
424 | struct socket *sock = xprt->sock; |
425 | struct sock *sk = xprt->inet; | |
a246b010 CL |
426 | |
427 | if (!sk) | |
428 | return; | |
429 | ||
9903cd1c CL |
430 | dprintk("RPC: xs_close xprt %p\n", xprt); |
431 | ||
a246b010 CL |
432 | write_lock_bh(&sk->sk_callback_lock); |
433 | xprt->inet = NULL; | |
434 | xprt->sock = NULL; | |
435 | ||
9903cd1c CL |
436 | sk->sk_user_data = NULL; |
437 | sk->sk_data_ready = xprt->old_data_ready; | |
a246b010 | 438 | sk->sk_state_change = xprt->old_state_change; |
9903cd1c | 439 | sk->sk_write_space = xprt->old_write_space; |
a246b010 CL |
440 | write_unlock_bh(&sk->sk_callback_lock); |
441 | ||
9903cd1c | 442 | sk->sk_no_check = 0; |
a246b010 CL |
443 | |
444 | sock_release(sock); | |
445 | } | |
446 | ||
9903cd1c CL |
447 | /** |
448 | * xs_destroy - prepare to shutdown a transport | |
449 | * @xprt: doomed transport | |
450 | * | |
451 | */ | |
452 | static void xs_destroy(struct rpc_xprt *xprt) | |
a246b010 | 453 | { |
9903cd1c CL |
454 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
455 | ||
55aa4f58 | 456 | cancel_delayed_work(&xprt->connect_worker); |
a246b010 CL |
457 | flush_scheduled_work(); |
458 | ||
459 | xprt_disconnect(xprt); | |
9903cd1c | 460 | xs_close(xprt); |
a246b010 CL |
461 | kfree(xprt->slot); |
462 | } | |
463 | ||
9903cd1c CL |
464 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
465 | { | |
466 | return (struct rpc_xprt *) sk->sk_user_data; | |
467 | } | |
468 | ||
469 | /** | |
470 | * xs_udp_data_ready - "data ready" callback for UDP sockets | |
471 | * @sk: socket with data to read | |
472 | * @len: how much data to read | |
473 | * | |
a246b010 | 474 | */ |
9903cd1c | 475 | static void xs_udp_data_ready(struct sock *sk, int len) |
a246b010 | 476 | { |
9903cd1c CL |
477 | struct rpc_task *task; |
478 | struct rpc_xprt *xprt; | |
a246b010 | 479 | struct rpc_rqst *rovr; |
9903cd1c | 480 | struct sk_buff *skb; |
a246b010 CL |
481 | int err, repsize, copied; |
482 | u32 _xid, *xp; | |
483 | ||
484 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
485 | dprintk("RPC: xs_udp_data_ready...\n"); |
486 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 487 | goto out; |
a246b010 CL |
488 | |
489 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | |
490 | goto out; | |
491 | ||
492 | if (xprt->shutdown) | |
493 | goto dropit; | |
494 | ||
495 | repsize = skb->len - sizeof(struct udphdr); | |
496 | if (repsize < 4) { | |
9903cd1c | 497 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
a246b010 CL |
498 | goto dropit; |
499 | } | |
500 | ||
501 | /* Copy the XID from the skb... */ | |
502 | xp = skb_header_pointer(skb, sizeof(struct udphdr), | |
503 | sizeof(_xid), &_xid); | |
504 | if (xp == NULL) | |
505 | goto dropit; | |
506 | ||
507 | /* Look up and lock the request corresponding to the given XID */ | |
4a0f8c04 | 508 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
509 | rovr = xprt_lookup_rqst(xprt, *xp); |
510 | if (!rovr) | |
511 | goto out_unlock; | |
512 | task = rovr->rq_task; | |
513 | ||
a246b010 CL |
514 | if ((copied = rovr->rq_private_buf.buflen) > repsize) |
515 | copied = repsize; | |
516 | ||
517 | /* Suck it into the iovec, verify checksum if not done by hw. */ | |
518 | if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) | |
519 | goto out_unlock; | |
520 | ||
521 | /* Something worked... */ | |
522 | dst_confirm(skb->dst); | |
523 | ||
1570c1e4 CL |
524 | xprt_adjust_cwnd(task, copied); |
525 | xprt_update_rtt(task); | |
526 | xprt_complete_rqst(task, copied); | |
a246b010 CL |
527 | |
528 | out_unlock: | |
4a0f8c04 | 529 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
530 | dropit: |
531 | skb_free_datagram(sk, skb); | |
532 | out: | |
533 | read_unlock(&sk->sk_callback_lock); | |
534 | } | |
535 | ||
9903cd1c | 536 | static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) |
a246b010 CL |
537 | { |
538 | if (len > desc->count) | |
539 | len = desc->count; | |
540 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | |
541 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | |
542 | len, desc->count); | |
543 | return 0; | |
544 | } | |
545 | desc->offset += len; | |
546 | desc->count -= len; | |
547 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | |
548 | len, desc->count); | |
549 | return len; | |
550 | } | |
551 | ||
9903cd1c | 552 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
553 | { |
554 | size_t len, used; | |
555 | char *p; | |
556 | ||
557 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | |
558 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | |
9903cd1c | 559 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
560 | xprt->tcp_offset += used; |
561 | if (used != len) | |
562 | return; | |
808012fb | 563 | |
a246b010 | 564 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); |
808012fb | 565 | if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) |
a246b010 CL |
566 | xprt->tcp_flags |= XPRT_LAST_FRAG; |
567 | else | |
568 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | |
808012fb CL |
569 | xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; |
570 | ||
a246b010 CL |
571 | xprt->tcp_flags &= ~XPRT_COPY_RECM; |
572 | xprt->tcp_offset = 0; | |
808012fb | 573 | |
a246b010 | 574 | /* Sanity check of the record length */ |
808012fb | 575 | if (unlikely(xprt->tcp_reclen < 4)) { |
9903cd1c | 576 | dprintk("RPC: invalid TCP record fragment length\n"); |
a246b010 | 577 | xprt_disconnect(xprt); |
9903cd1c | 578 | return; |
a246b010 CL |
579 | } |
580 | dprintk("RPC: reading TCP record fragment of length %d\n", | |
581 | xprt->tcp_reclen); | |
582 | } | |
583 | ||
9903cd1c | 584 | static void xs_tcp_check_recm(struct rpc_xprt *xprt) |
a246b010 CL |
585 | { |
586 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | |
587 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | |
588 | if (xprt->tcp_offset == xprt->tcp_reclen) { | |
589 | xprt->tcp_flags |= XPRT_COPY_RECM; | |
590 | xprt->tcp_offset = 0; | |
591 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | |
592 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
593 | xprt->tcp_flags |= XPRT_COPY_XID; | |
594 | xprt->tcp_copied = 0; | |
595 | } | |
596 | } | |
597 | } | |
598 | ||
9903cd1c | 599 | static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
600 | { |
601 | size_t len, used; | |
602 | char *p; | |
603 | ||
604 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | |
605 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | |
606 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | |
9903cd1c | 607 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
608 | xprt->tcp_offset += used; |
609 | if (used != len) | |
610 | return; | |
611 | xprt->tcp_flags &= ~XPRT_COPY_XID; | |
612 | xprt->tcp_flags |= XPRT_COPY_DATA; | |
613 | xprt->tcp_copied = 4; | |
614 | dprintk("RPC: reading reply for XID %08x\n", | |
615 | ntohl(xprt->tcp_xid)); | |
9903cd1c | 616 | xs_tcp_check_recm(xprt); |
a246b010 CL |
617 | } |
618 | ||
9903cd1c | 619 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
620 | { |
621 | struct rpc_rqst *req; | |
622 | struct xdr_buf *rcvbuf; | |
623 | size_t len; | |
624 | ssize_t r; | |
625 | ||
626 | /* Find and lock the request corresponding to this xid */ | |
4a0f8c04 | 627 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
628 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); |
629 | if (!req) { | |
630 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
631 | dprintk("RPC: XID %08x request not found!\n", | |
632 | ntohl(xprt->tcp_xid)); | |
4a0f8c04 | 633 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
634 | return; |
635 | } | |
636 | ||
637 | rcvbuf = &req->rq_private_buf; | |
638 | len = desc->count; | |
639 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | |
640 | skb_reader_t my_desc; | |
641 | ||
642 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
643 | memcpy(&my_desc, desc, sizeof(my_desc)); | |
644 | my_desc.count = len; | |
645 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 646 | &my_desc, xs_tcp_copy_data); |
a246b010 CL |
647 | desc->count -= r; |
648 | desc->offset += r; | |
649 | } else | |
650 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 651 | desc, xs_tcp_copy_data); |
a246b010 CL |
652 | |
653 | if (r > 0) { | |
654 | xprt->tcp_copied += r; | |
655 | xprt->tcp_offset += r; | |
656 | } | |
657 | if (r != len) { | |
658 | /* Error when copying to the receive buffer, | |
659 | * usually because we weren't able to allocate | |
660 | * additional buffer pages. All we can do now | |
661 | * is turn off XPRT_COPY_DATA, so the request | |
662 | * will not receive any additional updates, | |
663 | * and time out. | |
664 | * Any remaining data from this record will | |
665 | * be discarded. | |
666 | */ | |
667 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
668 | dprintk("RPC: XID %08x truncated request\n", | |
669 | ntohl(xprt->tcp_xid)); | |
670 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
671 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
672 | goto out; | |
673 | } | |
674 | ||
675 | dprintk("RPC: XID %08x read %Zd bytes\n", | |
676 | ntohl(xprt->tcp_xid), r); | |
677 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
678 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
679 | ||
680 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | |
681 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
682 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | |
683 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | |
684 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
685 | } | |
686 | ||
687 | out: | |
1570c1e4 CL |
688 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) |
689 | xprt_complete_rqst(req->rq_task, xprt->tcp_copied); | |
4a0f8c04 | 690 | spin_unlock(&xprt->transport_lock); |
9903cd1c | 691 | xs_tcp_check_recm(xprt); |
a246b010 CL |
692 | } |
693 | ||
9903cd1c | 694 | static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
695 | { |
696 | size_t len; | |
697 | ||
698 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
699 | if (len > desc->count) | |
700 | len = desc->count; | |
701 | desc->count -= len; | |
702 | desc->offset += len; | |
703 | xprt->tcp_offset += len; | |
704 | dprintk("RPC: discarded %Zu bytes\n", len); | |
9903cd1c | 705 | xs_tcp_check_recm(xprt); |
a246b010 CL |
706 | } |
707 | ||
9903cd1c | 708 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) |
a246b010 CL |
709 | { |
710 | struct rpc_xprt *xprt = rd_desc->arg.data; | |
711 | skb_reader_t desc = { | |
712 | .skb = skb, | |
713 | .offset = offset, | |
714 | .count = len, | |
715 | .csum = 0 | |
9903cd1c | 716 | }; |
a246b010 | 717 | |
9903cd1c | 718 | dprintk("RPC: xs_tcp_data_recv started\n"); |
a246b010 CL |
719 | do { |
720 | /* Read in a new fragment marker if necessary */ | |
721 | /* Can we ever really expect to get completely empty fragments? */ | |
722 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | |
9903cd1c | 723 | xs_tcp_read_fraghdr(xprt, &desc); |
a246b010 CL |
724 | continue; |
725 | } | |
726 | /* Read in the xid if necessary */ | |
727 | if (xprt->tcp_flags & XPRT_COPY_XID) { | |
9903cd1c | 728 | xs_tcp_read_xid(xprt, &desc); |
a246b010 CL |
729 | continue; |
730 | } | |
731 | /* Read in the request data */ | |
732 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | |
9903cd1c | 733 | xs_tcp_read_request(xprt, &desc); |
a246b010 CL |
734 | continue; |
735 | } | |
736 | /* Skip over any trailing bytes on short reads */ | |
9903cd1c | 737 | xs_tcp_read_discard(xprt, &desc); |
a246b010 | 738 | } while (desc.count); |
9903cd1c | 739 | dprintk("RPC: xs_tcp_data_recv done\n"); |
a246b010 CL |
740 | return len - desc.count; |
741 | } | |
742 | ||
9903cd1c CL |
743 | /** |
744 | * xs_tcp_data_ready - "data ready" callback for TCP sockets | |
745 | * @sk: socket with data to read | |
746 | * @bytes: how much data to read | |
747 | * | |
748 | */ | |
749 | static void xs_tcp_data_ready(struct sock *sk, int bytes) | |
a246b010 CL |
750 | { |
751 | struct rpc_xprt *xprt; | |
752 | read_descriptor_t rd_desc; | |
753 | ||
754 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
755 | dprintk("RPC: xs_tcp_data_ready...\n"); |
756 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 757 | goto out; |
a246b010 CL |
758 | if (xprt->shutdown) |
759 | goto out; | |
760 | ||
9903cd1c | 761 | /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ |
a246b010 CL |
762 | rd_desc.arg.data = xprt; |
763 | rd_desc.count = 65536; | |
9903cd1c | 764 | tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); |
a246b010 CL |
765 | out: |
766 | read_unlock(&sk->sk_callback_lock); | |
767 | } | |
768 | ||
9903cd1c CL |
769 | /** |
770 | * xs_tcp_state_change - callback to handle TCP socket state changes | |
771 | * @sk: socket whose state has changed | |
772 | * | |
773 | */ | |
774 | static void xs_tcp_state_change(struct sock *sk) | |
a246b010 | 775 | { |
9903cd1c | 776 | struct rpc_xprt *xprt; |
a246b010 CL |
777 | |
778 | read_lock(&sk->sk_callback_lock); | |
779 | if (!(xprt = xprt_from_sock(sk))) | |
780 | goto out; | |
9903cd1c | 781 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
a246b010 CL |
782 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", |
783 | sk->sk_state, xprt_connected(xprt), | |
784 | sock_flag(sk, SOCK_DEAD), | |
785 | sock_flag(sk, SOCK_ZAPPED)); | |
786 | ||
787 | switch (sk->sk_state) { | |
788 | case TCP_ESTABLISHED: | |
4a0f8c04 | 789 | spin_lock_bh(&xprt->transport_lock); |
a246b010 CL |
790 | if (!xprt_test_and_set_connected(xprt)) { |
791 | /* Reset TCP record info */ | |
792 | xprt->tcp_offset = 0; | |
793 | xprt->tcp_reclen = 0; | |
794 | xprt->tcp_copied = 0; | |
795 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | |
03bf4b70 | 796 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
44fbac22 | 797 | xprt_wake_pending_tasks(xprt, 0); |
a246b010 | 798 | } |
4a0f8c04 | 799 | spin_unlock_bh(&xprt->transport_lock); |
a246b010 CL |
800 | break; |
801 | case TCP_SYN_SENT: | |
802 | case TCP_SYN_RECV: | |
803 | break; | |
804 | default: | |
805 | xprt_disconnect(xprt); | |
806 | break; | |
807 | } | |
808 | out: | |
809 | read_unlock(&sk->sk_callback_lock); | |
810 | } | |
811 | ||
9903cd1c | 812 | /** |
c7b2cae8 CL |
813 | * xs_udp_write_space - callback invoked when socket buffer space |
814 | * becomes available | |
9903cd1c CL |
815 | * @sk: socket whose state has changed |
816 | * | |
a246b010 CL |
817 | * Called when more output buffer space is available for this socket. |
818 | * We try not to wake our writers until they can make "significant" | |
c7b2cae8 | 819 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg |
a246b010 CL |
820 | * with a bunch of small requests. |
821 | */ | |
c7b2cae8 | 822 | static void xs_udp_write_space(struct sock *sk) |
a246b010 | 823 | { |
a246b010 | 824 | read_lock(&sk->sk_callback_lock); |
a246b010 | 825 | |
c7b2cae8 CL |
826 | /* from net/core/sock.c:sock_def_write_space */ |
827 | if (sock_writeable(sk)) { | |
828 | struct socket *sock; | |
829 | struct rpc_xprt *xprt; | |
830 | ||
831 | if (unlikely(!(sock = sk->sk_socket))) | |
a246b010 | 832 | goto out; |
c7b2cae8 CL |
833 | if (unlikely(!(xprt = xprt_from_sock(sk)))) |
834 | goto out; | |
835 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
a246b010 | 836 | goto out; |
c7b2cae8 CL |
837 | |
838 | xprt_write_space(xprt); | |
a246b010 CL |
839 | } |
840 | ||
c7b2cae8 CL |
841 | out: |
842 | read_unlock(&sk->sk_callback_lock); | |
843 | } | |
a246b010 | 844 | |
c7b2cae8 CL |
845 | /** |
846 | * xs_tcp_write_space - callback invoked when socket buffer space | |
847 | * becomes available | |
848 | * @sk: socket whose state has changed | |
849 | * | |
850 | * Called when more output buffer space is available for this socket. | |
851 | * We try not to wake our writers until they can make "significant" | |
852 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg | |
853 | * with a bunch of small requests. | |
854 | */ | |
855 | static void xs_tcp_write_space(struct sock *sk) | |
856 | { | |
857 | read_lock(&sk->sk_callback_lock); | |
858 | ||
859 | /* from net/core/stream.c:sk_stream_write_space */ | |
860 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | |
861 | struct socket *sock; | |
862 | struct rpc_xprt *xprt; | |
863 | ||
864 | if (unlikely(!(sock = sk->sk_socket))) | |
865 | goto out; | |
866 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | |
867 | goto out; | |
868 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
869 | goto out; | |
870 | ||
871 | xprt_write_space(xprt); | |
872 | } | |
873 | ||
874 | out: | |
a246b010 CL |
875 | read_unlock(&sk->sk_callback_lock); |
876 | } | |
877 | ||
470056c2 | 878 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) |
a246b010 CL |
879 | { |
880 | struct sock *sk = xprt->inet; | |
881 | ||
a246b010 CL |
882 | if (xprt->rcvsize) { |
883 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | |
884 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | |
885 | } | |
886 | if (xprt->sndsize) { | |
887 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | |
888 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | |
889 | sk->sk_write_space(sk); | |
890 | } | |
891 | } | |
892 | ||
43118c29 | 893 | /** |
470056c2 | 894 | * xs_udp_set_buffer_size - set send and receive limits |
43118c29 | 895 | * @xprt: generic transport |
470056c2 CL |
896 | * @sndsize: requested size of send buffer, in bytes |
897 | * @rcvsize: requested size of receive buffer, in bytes | |
43118c29 | 898 | * |
470056c2 | 899 | * Set socket send and receive buffer size limits. |
43118c29 | 900 | */ |
470056c2 | 901 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) |
43118c29 | 902 | { |
470056c2 CL |
903 | xprt->sndsize = 0; |
904 | if (sndsize) | |
905 | xprt->sndsize = sndsize + 1024; | |
906 | xprt->rcvsize = 0; | |
907 | if (rcvsize) | |
908 | xprt->rcvsize = rcvsize + 1024; | |
909 | ||
910 | xs_udp_do_set_buffer_size(xprt); | |
43118c29 CL |
911 | } |
912 | ||
46c0ee8b CL |
913 | /** |
914 | * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport | |
915 | * @task: task that timed out | |
916 | * | |
917 | * Adjust the congestion window after a retransmit timeout has occurred. | |
918 | */ | |
919 | static void xs_udp_timer(struct rpc_task *task) | |
920 | { | |
921 | xprt_adjust_cwnd(task, -ETIMEDOUT); | |
922 | } | |
923 | ||
92200412 CL |
924 | /** |
925 | * xs_set_port - reset the port number in the remote endpoint address | |
926 | * @xprt: generic transport | |
927 | * @port: new port number | |
928 | * | |
929 | */ | |
930 | static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |
931 | { | |
932 | dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); | |
933 | xprt->addr.sin_port = htons(port); | |
934 | } | |
935 | ||
9903cd1c | 936 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) |
a246b010 CL |
937 | { |
938 | struct sockaddr_in myaddr = { | |
939 | .sin_family = AF_INET, | |
940 | }; | |
529b33c6 CL |
941 | int err; |
942 | unsigned short port = xprt->port; | |
a246b010 | 943 | |
a246b010 CL |
944 | do { |
945 | myaddr.sin_port = htons(port); | |
946 | err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, | |
947 | sizeof(myaddr)); | |
948 | if (err == 0) { | |
949 | xprt->port = port; | |
9903cd1c CL |
950 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
951 | port); | |
a246b010 CL |
952 | return 0; |
953 | } | |
529b33c6 CL |
954 | if (port <= xprt_min_resvport) |
955 | port = xprt_max_resvport; | |
956 | else | |
957 | port--; | |
a246b010 CL |
958 | } while (err == -EADDRINUSE && port != xprt->port); |
959 | ||
9903cd1c | 960 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
a246b010 CL |
961 | return err; |
962 | } | |
963 | ||
b0d93ad5 CL |
964 | /** |
965 | * xs_udp_connect_worker - set up a UDP socket | |
966 | * @args: RPC transport to connect | |
967 | * | |
968 | * Invoked by a work queue tasklet. | |
969 | */ | |
970 | static void xs_udp_connect_worker(void *args) | |
a246b010 | 971 | { |
b0d93ad5 CL |
972 | struct rpc_xprt *xprt = (struct rpc_xprt *) args; |
973 | struct socket *sock = xprt->sock; | |
974 | int err, status = -EIO; | |
9903cd1c | 975 | |
b0d93ad5 CL |
976 | if (xprt->shutdown || xprt->addr.sin_port == 0) |
977 | goto out; | |
9903cd1c | 978 | |
b0d93ad5 | 979 | dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 980 | |
b0d93ad5 CL |
981 | /* Start by resetting any existing state */ |
982 | xs_close(xprt); | |
9903cd1c | 983 | |
b0d93ad5 CL |
984 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { |
985 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | |
986 | goto out; | |
987 | } | |
9903cd1c | 988 | |
b0d93ad5 CL |
989 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
990 | sock_release(sock); | |
991 | goto out; | |
992 | } | |
9903cd1c | 993 | |
b0d93ad5 CL |
994 | if (!xprt->inet) { |
995 | struct sock *sk = sock->sk; | |
a246b010 | 996 | |
b0d93ad5 | 997 | write_lock_bh(&sk->sk_callback_lock); |
a246b010 | 998 | |
b0d93ad5 CL |
999 | sk->sk_user_data = xprt; |
1000 | xprt->old_data_ready = sk->sk_data_ready; | |
1001 | xprt->old_state_change = sk->sk_state_change; | |
1002 | xprt->old_write_space = sk->sk_write_space; | |
9903cd1c | 1003 | sk->sk_data_ready = xs_udp_data_ready; |
c7b2cae8 | 1004 | sk->sk_write_space = xs_udp_write_space; |
a246b010 | 1005 | sk->sk_no_check = UDP_CSUM_NORCV; |
b079fa7b | 1006 | sk->sk_allocation = GFP_ATOMIC; |
b0d93ad5 | 1007 | |
a246b010 | 1008 | xprt_set_connected(xprt); |
a246b010 | 1009 | |
b0d93ad5 CL |
1010 | /* Reset to new socket */ |
1011 | xprt->sock = sock; | |
1012 | xprt->inet = sk; | |
a246b010 | 1013 | |
b0d93ad5 CL |
1014 | write_unlock_bh(&sk->sk_callback_lock); |
1015 | } | |
470056c2 | 1016 | xs_udp_do_set_buffer_size(xprt); |
b0d93ad5 CL |
1017 | status = 0; |
1018 | out: | |
1019 | xprt_wake_pending_tasks(xprt, status); | |
1020 | xprt_clear_connecting(xprt); | |
a246b010 CL |
1021 | } |
1022 | ||
3167e12c CL |
1023 | /* |
1024 | * We need to preserve the port number so the reply cache on the server can | |
1025 | * find our cached RPC replies when we get around to reconnecting. | |
1026 | */ | |
1027 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |
1028 | { | |
1029 | int result; | |
1030 | struct socket *sock = xprt->sock; | |
1031 | struct sockaddr any; | |
1032 | ||
1033 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | |
1034 | ||
1035 | /* | |
1036 | * Disconnect the transport socket by doing a connect operation | |
1037 | * with AF_UNSPEC. This should return immediately... | |
1038 | */ | |
1039 | memset(&any, 0, sizeof(any)); | |
1040 | any.sa_family = AF_UNSPEC; | |
1041 | result = sock->ops->connect(sock, &any, sizeof(any), 0); | |
1042 | if (result) | |
1043 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | |
1044 | result); | |
1045 | } | |
1046 | ||
9903cd1c | 1047 | /** |
b0d93ad5 | 1048 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
9903cd1c CL |
1049 | * @args: RPC transport to connect |
1050 | * | |
1051 | * Invoked by a work queue tasklet. | |
a246b010 | 1052 | */ |
b0d93ad5 | 1053 | static void xs_tcp_connect_worker(void *args) |
a246b010 CL |
1054 | { |
1055 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | |
1056 | struct socket *sock = xprt->sock; | |
b0d93ad5 | 1057 | int err, status = -EIO; |
a246b010 CL |
1058 | |
1059 | if (xprt->shutdown || xprt->addr.sin_port == 0) | |
1060 | goto out; | |
1061 | ||
b0d93ad5 | 1062 | dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 1063 | |
3167e12c CL |
1064 | if (!xprt->sock) { |
1065 | /* start from scratch */ | |
1066 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | |
1067 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | |
1068 | goto out; | |
1069 | } | |
a246b010 | 1070 | |
3167e12c CL |
1071 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
1072 | sock_release(sock); | |
1073 | goto out; | |
1074 | } | |
1075 | } else | |
1076 | /* "close" the socket, preserving the local port */ | |
1077 | xs_tcp_reuse_connection(xprt); | |
a246b010 | 1078 | |
b0d93ad5 CL |
1079 | if (!xprt->inet) { |
1080 | struct sock *sk = sock->sk; | |
1081 | ||
1082 | write_lock_bh(&sk->sk_callback_lock); | |
1083 | ||
1084 | sk->sk_user_data = xprt; | |
1085 | xprt->old_data_ready = sk->sk_data_ready; | |
1086 | xprt->old_state_change = sk->sk_state_change; | |
1087 | xprt->old_write_space = sk->sk_write_space; | |
1088 | sk->sk_data_ready = xs_tcp_data_ready; | |
1089 | sk->sk_state_change = xs_tcp_state_change; | |
1090 | sk->sk_write_space = xs_tcp_write_space; | |
b079fa7b | 1091 | sk->sk_allocation = GFP_ATOMIC; |
3167e12c CL |
1092 | |
1093 | /* socket options */ | |
1094 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | |
1095 | sock_reset_flag(sk, SOCK_LINGER); | |
1096 | tcp_sk(sk)->linger2 = 0; | |
1097 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; | |
b0d93ad5 CL |
1098 | |
1099 | xprt_clear_connected(xprt); | |
1100 | ||
1101 | /* Reset to new socket */ | |
1102 | xprt->sock = sock; | |
1103 | xprt->inet = sk; | |
1104 | ||
1105 | write_unlock_bh(&sk->sk_callback_lock); | |
1106 | } | |
1107 | ||
1108 | /* Tell the socket layer to start connecting... */ | |
a246b010 CL |
1109 | status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, |
1110 | sizeof(xprt->addr), O_NONBLOCK); | |
1111 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | |
1112 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | |
1113 | if (status < 0) { | |
1114 | switch (status) { | |
1115 | case -EINPROGRESS: | |
1116 | case -EALREADY: | |
1117 | goto out_clear; | |
3167e12c CL |
1118 | case -ECONNREFUSED: |
1119 | case -ECONNRESET: | |
1120 | /* retry with existing socket, after a delay */ | |
1121 | break; | |
1122 | default: | |
1123 | /* get rid of existing socket, and retry */ | |
1124 | xs_close(xprt); | |
1125 | break; | |
a246b010 CL |
1126 | } |
1127 | } | |
1128 | out: | |
44fbac22 | 1129 | xprt_wake_pending_tasks(xprt, status); |
a246b010 | 1130 | out_clear: |
2226feb6 | 1131 | xprt_clear_connecting(xprt); |
a246b010 CL |
1132 | } |
1133 | ||
9903cd1c CL |
1134 | /** |
1135 | * xs_connect - connect a socket to a remote endpoint | |
1136 | * @task: address of RPC task that manages state of connect request | |
1137 | * | |
1138 | * TCP: If the remote end dropped the connection, delay reconnecting. | |
03bf4b70 CL |
1139 | * |
1140 | * UDP socket connects are synchronous, but we use a work queue anyway | |
1141 | * to guarantee that even unprivileged user processes can set up a | |
1142 | * socket on a privileged port. | |
1143 | * | |
1144 | * If a UDP socket connect fails, the delay behavior here prevents | |
1145 | * retry floods (hard mounts). | |
9903cd1c CL |
1146 | */ |
1147 | static void xs_connect(struct rpc_task *task) | |
a246b010 CL |
1148 | { |
1149 | struct rpc_xprt *xprt = task->tk_xprt; | |
1150 | ||
b0d93ad5 CL |
1151 | if (xprt_test_and_set_connecting(xprt)) |
1152 | return; | |
1153 | ||
1154 | if (xprt->sock != NULL) { | |
03bf4b70 CL |
1155 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", |
1156 | xprt, xprt->reestablish_timeout / HZ); | |
b0d93ad5 | 1157 | schedule_delayed_work(&xprt->connect_worker, |
03bf4b70 CL |
1158 | xprt->reestablish_timeout); |
1159 | xprt->reestablish_timeout <<= 1; | |
1160 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | |
1161 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | |
b0d93ad5 CL |
1162 | } else { |
1163 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | |
1164 | schedule_work(&xprt->connect_worker); | |
1165 | ||
1166 | /* flush_scheduled_work can sleep... */ | |
1167 | if (!RPC_IS_ASYNC(task)) | |
1168 | flush_scheduled_work(); | |
a246b010 CL |
1169 | } |
1170 | } | |
1171 | ||
262965f5 | 1172 | static struct rpc_xprt_ops xs_udp_ops = { |
43118c29 | 1173 | .set_buffer_size = xs_udp_set_buffer_size, |
12a80469 | 1174 | .reserve_xprt = xprt_reserve_xprt_cong, |
49e9a890 | 1175 | .release_xprt = xprt_release_xprt_cong, |
92200412 | 1176 | .set_port = xs_set_port, |
262965f5 | 1177 | .connect = xs_connect, |
02107148 CL |
1178 | .buf_alloc = rpc_malloc, |
1179 | .buf_free = rpc_free, | |
262965f5 | 1180 | .send_request = xs_udp_send_request, |
fe3aca29 | 1181 | .set_retrans_timeout = xprt_set_retrans_timeout_rtt, |
46c0ee8b | 1182 | .timer = xs_udp_timer, |
a58dd398 | 1183 | .release_request = xprt_release_rqst_cong, |
262965f5 CL |
1184 | .close = xs_close, |
1185 | .destroy = xs_destroy, | |
1186 | }; | |
1187 | ||
1188 | static struct rpc_xprt_ops xs_tcp_ops = { | |
12a80469 | 1189 | .reserve_xprt = xprt_reserve_xprt, |
49e9a890 | 1190 | .release_xprt = xprt_release_xprt, |
92200412 | 1191 | .set_port = xs_set_port, |
9903cd1c | 1192 | .connect = xs_connect, |
02107148 CL |
1193 | .buf_alloc = rpc_malloc, |
1194 | .buf_free = rpc_free, | |
262965f5 | 1195 | .send_request = xs_tcp_send_request, |
fe3aca29 | 1196 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
9903cd1c CL |
1197 | .close = xs_close, |
1198 | .destroy = xs_destroy, | |
a246b010 CL |
1199 | }; |
1200 | ||
9903cd1c CL |
1201 | /** |
1202 | * xs_setup_udp - Set up transport to use a UDP socket | |
1203 | * @xprt: transport to set up | |
1204 | * @to: timeout parameters | |
1205 | * | |
1206 | */ | |
a246b010 CL |
1207 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1208 | { | |
1209 | size_t slot_table_size; | |
1210 | ||
1211 | dprintk("RPC: setting up udp-ipv4 transport...\n"); | |
1212 | ||
1213 | xprt->max_reqs = xprt_udp_slot_table_entries; | |
1214 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1215 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1216 | if (xprt->slot == NULL) | |
1217 | return -ENOMEM; | |
1218 | memset(xprt->slot, 0, slot_table_size); | |
1219 | ||
1220 | xprt->prot = IPPROTO_UDP; | |
529b33c6 | 1221 | xprt->port = xprt_max_resvport; |
808012fb | 1222 | xprt->tsh_size = 0; |
a246b010 CL |
1223 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; |
1224 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | |
1225 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | |
1226 | ||
b0d93ad5 | 1227 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); |
03bf4b70 CL |
1228 | xprt->bind_timeout = XS_BIND_TO; |
1229 | xprt->connect_timeout = XS_UDP_CONN_TO; | |
1230 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | |
1231 | xprt->idle_timeout = XS_IDLE_DISC_TO; | |
a246b010 | 1232 | |
262965f5 | 1233 | xprt->ops = &xs_udp_ops; |
a246b010 CL |
1234 | |
1235 | if (to) | |
1236 | xprt->timeout = *to; | |
1237 | else | |
9903cd1c | 1238 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
a246b010 CL |
1239 | |
1240 | return 0; | |
1241 | } | |
1242 | ||
9903cd1c CL |
1243 | /** |
1244 | * xs_setup_tcp - Set up transport to use a TCP socket | |
1245 | * @xprt: transport to set up | |
1246 | * @to: timeout parameters | |
1247 | * | |
1248 | */ | |
a246b010 CL |
1249 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1250 | { | |
1251 | size_t slot_table_size; | |
1252 | ||
1253 | dprintk("RPC: setting up tcp-ipv4 transport...\n"); | |
1254 | ||
1255 | xprt->max_reqs = xprt_tcp_slot_table_entries; | |
1256 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1257 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1258 | if (xprt->slot == NULL) | |
1259 | return -ENOMEM; | |
1260 | memset(xprt->slot, 0, slot_table_size); | |
1261 | ||
1262 | xprt->prot = IPPROTO_TCP; | |
529b33c6 | 1263 | xprt->port = xprt_max_resvport; |
808012fb | 1264 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
a246b010 | 1265 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; |
808012fb | 1266 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
a246b010 | 1267 | |
b0d93ad5 | 1268 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); |
03bf4b70 CL |
1269 | xprt->bind_timeout = XS_BIND_TO; |
1270 | xprt->connect_timeout = XS_TCP_CONN_TO; | |
1271 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | |
1272 | xprt->idle_timeout = XS_IDLE_DISC_TO; | |
a246b010 | 1273 | |
262965f5 | 1274 | xprt->ops = &xs_tcp_ops; |
a246b010 CL |
1275 | |
1276 | if (to) | |
1277 | xprt->timeout = *to; | |
1278 | else | |
9903cd1c | 1279 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
a246b010 CL |
1280 | |
1281 | return 0; | |
1282 | } |