]>
Commit | Line | Data |
---|---|---|
a246b010 CL |
1 | /* |
2 | * linux/net/sunrpc/xprtsock.c | |
3 | * | |
4 | * Client-side transport implementation for sockets. | |
5 | * | |
6 | * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
7 | * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
8 | * TCP NFS related read + write fixes | |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | |
10 | * | |
11 | * Rewrite of larges part of the code in order to stabilize TCP stuff. | |
12 | * Fix behaviour when socket buffer is full. | |
13 | * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> | |
55aa4f58 CL |
14 | * |
15 | * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> | |
a246b010 CL |
16 | */ |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/capability.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/socket.h> | |
25 | #include <linux/in.h> | |
26 | #include <linux/net.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/udp.h> | |
29 | #include <linux/tcp.h> | |
30 | #include <linux/sunrpc/clnt.h> | |
31 | #include <linux/file.h> | |
32 | ||
33 | #include <net/sock.h> | |
34 | #include <net/checksum.h> | |
35 | #include <net/udp.h> | |
36 | #include <net/tcp.h> | |
37 | ||
262965f5 CL |
38 | /* |
39 | * How many times to try sending a request on a socket before waiting | |
40 | * for the socket buffer to clear. | |
41 | */ | |
42 | #define XS_SENDMSG_RETRY (10U) | |
43 | ||
a246b010 CL |
44 | #ifdef RPC_DEBUG |
45 | # undef RPC_DEBUG_DATA | |
9903cd1c | 46 | # define RPCDBG_FACILITY RPCDBG_TRANS |
a246b010 CL |
47 | #endif |
48 | ||
a246b010 | 49 | #ifdef RPC_DEBUG_DATA |
9903cd1c | 50 | static void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 | 51 | { |
9903cd1c CL |
52 | u8 *buf = (u8 *) packet; |
53 | int j; | |
a246b010 CL |
54 | |
55 | dprintk("RPC: %s\n", msg); | |
56 | for (j = 0; j < count && j < 128; j += 4) { | |
57 | if (!(j & 31)) { | |
58 | if (j) | |
59 | dprintk("\n"); | |
60 | dprintk("0x%04x ", j); | |
61 | } | |
62 | dprintk("%02x%02x%02x%02x ", | |
63 | buf[j], buf[j+1], buf[j+2], buf[j+3]); | |
64 | } | |
65 | dprintk("\n"); | |
66 | } | |
67 | #else | |
9903cd1c | 68 | static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 CL |
69 | { |
70 | /* NOP */ | |
71 | } | |
72 | #endif | |
73 | ||
b4b5cc85 CL |
74 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
75 | ||
76 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
77 | { | |
78 | struct kvec iov = { | |
79 | .iov_base = xdr->head[0].iov_base + base, | |
80 | .iov_len = len - base, | |
81 | }; | |
82 | struct msghdr msg = { | |
83 | .msg_name = addr, | |
84 | .msg_namelen = addrlen, | |
85 | .msg_flags = XS_SENDMSG_FLAGS, | |
86 | }; | |
87 | ||
88 | if (xdr->len > len) | |
89 | msg.msg_flags |= MSG_MORE; | |
90 | ||
91 | if (likely(iov.iov_len)) | |
92 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
93 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | |
94 | } | |
95 | ||
96 | static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
97 | { | |
98 | struct kvec iov = { | |
99 | .iov_base = xdr->tail[0].iov_base + base, | |
100 | .iov_len = len - base, | |
101 | }; | |
102 | struct msghdr msg = { | |
103 | .msg_flags = XS_SENDMSG_FLAGS, | |
104 | }; | |
105 | ||
106 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
107 | } | |
108 | ||
9903cd1c CL |
109 | /** |
110 | * xs_sendpages - write pages directly to a socket | |
111 | * @sock: socket to send on | |
112 | * @addr: UDP only -- address of destination | |
113 | * @addrlen: UDP only -- length of destination address | |
114 | * @xdr: buffer containing this request | |
115 | * @base: starting position in the buffer | |
116 | * | |
a246b010 | 117 | */ |
262965f5 | 118 | static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) |
a246b010 CL |
119 | { |
120 | struct page **ppage = xdr->pages; | |
121 | unsigned int len, pglen = xdr->page_len; | |
122 | int err, ret = 0; | |
123 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | |
124 | ||
262965f5 CL |
125 | if (unlikely(!sock)) |
126 | return -ENOTCONN; | |
127 | ||
128 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | |
129 | ||
a246b010 CL |
130 | len = xdr->head[0].iov_len; |
131 | if (base < len || (addr != NULL && base == 0)) { | |
b4b5cc85 | 132 | err = xs_send_head(sock, addr, addrlen, xdr, base, len); |
a246b010 CL |
133 | if (ret == 0) |
134 | ret = err; | |
135 | else if (err > 0) | |
136 | ret += err; | |
b4b5cc85 | 137 | if (err != (len - base)) |
a246b010 CL |
138 | goto out; |
139 | base = 0; | |
140 | } else | |
141 | base -= len; | |
142 | ||
b4b5cc85 | 143 | if (unlikely(pglen == 0)) |
a246b010 | 144 | goto copy_tail; |
b4b5cc85 | 145 | if (unlikely(base >= pglen)) { |
a246b010 CL |
146 | base -= pglen; |
147 | goto copy_tail; | |
148 | } | |
149 | if (base || xdr->page_base) { | |
150 | pglen -= base; | |
9903cd1c | 151 | base += xdr->page_base; |
a246b010 CL |
152 | ppage += base >> PAGE_CACHE_SHIFT; |
153 | base &= ~PAGE_CACHE_MASK; | |
154 | } | |
155 | ||
156 | sendpage = sock->ops->sendpage ? : sock_no_sendpage; | |
157 | do { | |
b4b5cc85 | 158 | int flags = XS_SENDMSG_FLAGS; |
a246b010 CL |
159 | |
160 | len = PAGE_CACHE_SIZE; | |
161 | if (base) | |
162 | len -= base; | |
163 | if (pglen < len) | |
164 | len = pglen; | |
165 | ||
166 | if (pglen != len || xdr->tail[0].iov_len != 0) | |
167 | flags |= MSG_MORE; | |
168 | ||
169 | /* Hmm... We might be dealing with highmem pages */ | |
170 | if (PageHighMem(*ppage)) | |
171 | sendpage = sock_no_sendpage; | |
172 | err = sendpage(sock, *ppage, base, len, flags); | |
173 | if (ret == 0) | |
174 | ret = err; | |
175 | else if (err > 0) | |
176 | ret += err; | |
177 | if (err != len) | |
178 | goto out; | |
179 | base = 0; | |
180 | ppage++; | |
181 | } while ((pglen -= len) != 0); | |
182 | copy_tail: | |
183 | len = xdr->tail[0].iov_len; | |
184 | if (base < len) { | |
b4b5cc85 | 185 | err = xs_send_tail(sock, xdr, base, len); |
a246b010 CL |
186 | if (ret == 0) |
187 | ret = err; | |
188 | else if (err > 0) | |
189 | ret += err; | |
190 | } | |
191 | out: | |
192 | return ret; | |
193 | } | |
194 | ||
9903cd1c | 195 | /** |
262965f5 CL |
196 | * xs_nospace - place task on wait queue if transmit was incomplete |
197 | * @task: task to put to sleep | |
9903cd1c | 198 | * |
a246b010 | 199 | */ |
262965f5 | 200 | static void xs_nospace(struct rpc_task *task) |
a246b010 | 201 | { |
262965f5 CL |
202 | struct rpc_rqst *req = task->tk_rqstp; |
203 | struct rpc_xprt *xprt = req->rq_xprt; | |
a246b010 | 204 | |
262965f5 CL |
205 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", |
206 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | |
207 | req->rq_slen); | |
208 | ||
209 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | |
210 | /* Protect against races with write_space */ | |
211 | spin_lock_bh(&xprt->transport_lock); | |
212 | ||
213 | /* Don't race with disconnect */ | |
214 | if (!xprt_connected(xprt)) | |
215 | task->tk_status = -ENOTCONN; | |
216 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) | |
217 | xprt_wait_for_buffer_space(task); | |
218 | ||
219 | spin_unlock_bh(&xprt->transport_lock); | |
220 | } else | |
221 | /* Keep holding the socket if it is blocked */ | |
222 | rpc_delay(task, HZ>>4); | |
223 | } | |
224 | ||
225 | /** | |
226 | * xs_udp_send_request - write an RPC request to a UDP socket | |
227 | * @task: address of RPC task that manages the state of an RPC request | |
228 | * | |
229 | * Return values: | |
230 | * 0: The request has been sent | |
231 | * EAGAIN: The socket was blocked, please call again later to | |
232 | * complete the request | |
233 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
234 | * other: Some other error occured, the request was not sent | |
235 | */ | |
236 | static int xs_udp_send_request(struct rpc_task *task) | |
237 | { | |
238 | struct rpc_rqst *req = task->tk_rqstp; | |
239 | struct rpc_xprt *xprt = req->rq_xprt; | |
240 | struct xdr_buf *xdr = &req->rq_snd_buf; | |
241 | int status; | |
a246b010 | 242 | |
9903cd1c | 243 | xs_pktdump("packet data:", |
a246b010 CL |
244 | req->rq_svec->iov_base, |
245 | req->rq_svec->iov_len); | |
246 | ||
262965f5 CL |
247 | req->rq_xtime = jiffies; |
248 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | |
249 | sizeof(xprt->addr), xdr, req->rq_bytes_sent); | |
a246b010 | 250 | |
262965f5 CL |
251 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
252 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 253 | |
262965f5 CL |
254 | if (likely(status >= (int) req->rq_slen)) |
255 | return 0; | |
a246b010 | 256 | |
262965f5 CL |
257 | /* Still some bytes left; set up for a retry later. */ |
258 | if (status > 0) | |
259 | status = -EAGAIN; | |
a246b010 | 260 | |
262965f5 CL |
261 | switch (status) { |
262 | case -ENETUNREACH: | |
263 | case -EPIPE: | |
a246b010 CL |
264 | case -ECONNREFUSED: |
265 | /* When the server has died, an ICMP port unreachable message | |
9903cd1c | 266 | * prompts ECONNREFUSED. */ |
a246b010 | 267 | break; |
262965f5 CL |
268 | case -EAGAIN: |
269 | xs_nospace(task); | |
a246b010 CL |
270 | break; |
271 | default: | |
262965f5 CL |
272 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
273 | -status); | |
9903cd1c | 274 | break; |
a246b010 | 275 | } |
262965f5 CL |
276 | |
277 | return status; | |
a246b010 CL |
278 | } |
279 | ||
808012fb CL |
280 | static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) |
281 | { | |
282 | u32 reclen = buf->len - sizeof(rpc_fraghdr); | |
283 | rpc_fraghdr *base = buf->head[0].iov_base; | |
284 | *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); | |
285 | } | |
286 | ||
9903cd1c | 287 | /** |
262965f5 | 288 | * xs_tcp_send_request - write an RPC request to a TCP socket |
9903cd1c CL |
289 | * @task: address of RPC task that manages the state of an RPC request |
290 | * | |
291 | * Return values: | |
262965f5 CL |
292 | * 0: The request has been sent |
293 | * EAGAIN: The socket was blocked, please call again later to | |
294 | * complete the request | |
295 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
296 | * other: Some other error occured, the request was not sent | |
9903cd1c CL |
297 | * |
298 | * XXX: In the case of soft timeouts, should we eventually give up | |
262965f5 | 299 | * if sendmsg is not able to make progress? |
9903cd1c | 300 | */ |
262965f5 | 301 | static int xs_tcp_send_request(struct rpc_task *task) |
a246b010 CL |
302 | { |
303 | struct rpc_rqst *req = task->tk_rqstp; | |
304 | struct rpc_xprt *xprt = req->rq_xprt; | |
262965f5 | 305 | struct xdr_buf *xdr = &req->rq_snd_buf; |
a246b010 CL |
306 | int status, retry = 0; |
307 | ||
808012fb | 308 | xs_encode_tcp_record_marker(&req->rq_snd_buf); |
a246b010 | 309 | |
262965f5 CL |
310 | xs_pktdump("packet data:", |
311 | req->rq_svec->iov_base, | |
312 | req->rq_svec->iov_len); | |
a246b010 CL |
313 | |
314 | /* Continue transmitting the packet/record. We must be careful | |
315 | * to cope with writespace callbacks arriving _after_ we have | |
262965f5 | 316 | * called sendmsg(). */ |
a246b010 CL |
317 | while (1) { |
318 | req->rq_xtime = jiffies; | |
262965f5 CL |
319 | status = xs_sendpages(xprt->sock, NULL, 0, xdr, |
320 | req->rq_bytes_sent); | |
a246b010 | 321 | |
262965f5 CL |
322 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
323 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 324 | |
262965f5 | 325 | if (unlikely(status < 0)) |
a246b010 | 326 | break; |
a246b010 | 327 | |
262965f5 CL |
328 | /* If we've sent the entire packet, immediately |
329 | * reset the count of bytes sent. */ | |
330 | req->rq_bytes_sent += status; | |
331 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | |
332 | req->rq_bytes_sent = 0; | |
333 | return 0; | |
334 | } | |
a246b010 CL |
335 | |
336 | status = -EAGAIN; | |
262965f5 | 337 | if (retry++ > XS_SENDMSG_RETRY) |
a246b010 CL |
338 | break; |
339 | } | |
340 | ||
262965f5 CL |
341 | switch (status) { |
342 | case -EAGAIN: | |
343 | xs_nospace(task); | |
344 | break; | |
345 | case -ECONNREFUSED: | |
346 | case -ECONNRESET: | |
347 | case -ENOTCONN: | |
348 | case -EPIPE: | |
349 | status = -ENOTCONN; | |
350 | break; | |
351 | default: | |
352 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | |
353 | -status); | |
43118c29 | 354 | xprt_disconnect(xprt); |
262965f5 | 355 | break; |
a246b010 | 356 | } |
262965f5 | 357 | |
a246b010 CL |
358 | return status; |
359 | } | |
360 | ||
9903cd1c CL |
361 | /** |
362 | * xs_close - close a socket | |
363 | * @xprt: transport | |
364 | * | |
3167e12c CL |
365 | * This is used when all requests are complete; ie, no DRC state remains |
366 | * on the server we want to save. | |
a246b010 | 367 | */ |
9903cd1c | 368 | static void xs_close(struct rpc_xprt *xprt) |
a246b010 | 369 | { |
9903cd1c CL |
370 | struct socket *sock = xprt->sock; |
371 | struct sock *sk = xprt->inet; | |
a246b010 CL |
372 | |
373 | if (!sk) | |
374 | return; | |
375 | ||
9903cd1c CL |
376 | dprintk("RPC: xs_close xprt %p\n", xprt); |
377 | ||
a246b010 CL |
378 | write_lock_bh(&sk->sk_callback_lock); |
379 | xprt->inet = NULL; | |
380 | xprt->sock = NULL; | |
381 | ||
9903cd1c CL |
382 | sk->sk_user_data = NULL; |
383 | sk->sk_data_ready = xprt->old_data_ready; | |
a246b010 | 384 | sk->sk_state_change = xprt->old_state_change; |
9903cd1c | 385 | sk->sk_write_space = xprt->old_write_space; |
a246b010 CL |
386 | write_unlock_bh(&sk->sk_callback_lock); |
387 | ||
9903cd1c | 388 | sk->sk_no_check = 0; |
a246b010 CL |
389 | |
390 | sock_release(sock); | |
391 | } | |
392 | ||
9903cd1c CL |
393 | /** |
394 | * xs_destroy - prepare to shutdown a transport | |
395 | * @xprt: doomed transport | |
396 | * | |
397 | */ | |
398 | static void xs_destroy(struct rpc_xprt *xprt) | |
a246b010 | 399 | { |
9903cd1c CL |
400 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
401 | ||
55aa4f58 | 402 | cancel_delayed_work(&xprt->connect_worker); |
a246b010 CL |
403 | flush_scheduled_work(); |
404 | ||
405 | xprt_disconnect(xprt); | |
9903cd1c | 406 | xs_close(xprt); |
a246b010 CL |
407 | kfree(xprt->slot); |
408 | } | |
409 | ||
9903cd1c CL |
410 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
411 | { | |
412 | return (struct rpc_xprt *) sk->sk_user_data; | |
413 | } | |
414 | ||
415 | /** | |
416 | * xs_udp_data_ready - "data ready" callback for UDP sockets | |
417 | * @sk: socket with data to read | |
418 | * @len: how much data to read | |
419 | * | |
a246b010 | 420 | */ |
9903cd1c | 421 | static void xs_udp_data_ready(struct sock *sk, int len) |
a246b010 | 422 | { |
9903cd1c CL |
423 | struct rpc_task *task; |
424 | struct rpc_xprt *xprt; | |
a246b010 | 425 | struct rpc_rqst *rovr; |
9903cd1c | 426 | struct sk_buff *skb; |
a246b010 CL |
427 | int err, repsize, copied; |
428 | u32 _xid, *xp; | |
429 | ||
430 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
431 | dprintk("RPC: xs_udp_data_ready...\n"); |
432 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 433 | goto out; |
a246b010 CL |
434 | |
435 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | |
436 | goto out; | |
437 | ||
438 | if (xprt->shutdown) | |
439 | goto dropit; | |
440 | ||
441 | repsize = skb->len - sizeof(struct udphdr); | |
442 | if (repsize < 4) { | |
9903cd1c | 443 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
a246b010 CL |
444 | goto dropit; |
445 | } | |
446 | ||
447 | /* Copy the XID from the skb... */ | |
448 | xp = skb_header_pointer(skb, sizeof(struct udphdr), | |
449 | sizeof(_xid), &_xid); | |
450 | if (xp == NULL) | |
451 | goto dropit; | |
452 | ||
453 | /* Look up and lock the request corresponding to the given XID */ | |
4a0f8c04 | 454 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
455 | rovr = xprt_lookup_rqst(xprt, *xp); |
456 | if (!rovr) | |
457 | goto out_unlock; | |
458 | task = rovr->rq_task; | |
459 | ||
a246b010 CL |
460 | if ((copied = rovr->rq_private_buf.buflen) > repsize) |
461 | copied = repsize; | |
462 | ||
463 | /* Suck it into the iovec, verify checksum if not done by hw. */ | |
464 | if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) | |
465 | goto out_unlock; | |
466 | ||
467 | /* Something worked... */ | |
468 | dst_confirm(skb->dst); | |
469 | ||
1570c1e4 CL |
470 | xprt_adjust_cwnd(task, copied); |
471 | xprt_update_rtt(task); | |
472 | xprt_complete_rqst(task, copied); | |
a246b010 CL |
473 | |
474 | out_unlock: | |
4a0f8c04 | 475 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
476 | dropit: |
477 | skb_free_datagram(sk, skb); | |
478 | out: | |
479 | read_unlock(&sk->sk_callback_lock); | |
480 | } | |
481 | ||
9903cd1c | 482 | static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) |
a246b010 CL |
483 | { |
484 | if (len > desc->count) | |
485 | len = desc->count; | |
486 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | |
487 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | |
488 | len, desc->count); | |
489 | return 0; | |
490 | } | |
491 | desc->offset += len; | |
492 | desc->count -= len; | |
493 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | |
494 | len, desc->count); | |
495 | return len; | |
496 | } | |
497 | ||
9903cd1c | 498 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
499 | { |
500 | size_t len, used; | |
501 | char *p; | |
502 | ||
503 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | |
504 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | |
9903cd1c | 505 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
506 | xprt->tcp_offset += used; |
507 | if (used != len) | |
508 | return; | |
808012fb | 509 | |
a246b010 | 510 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); |
808012fb | 511 | if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) |
a246b010 CL |
512 | xprt->tcp_flags |= XPRT_LAST_FRAG; |
513 | else | |
514 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | |
808012fb CL |
515 | xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; |
516 | ||
a246b010 CL |
517 | xprt->tcp_flags &= ~XPRT_COPY_RECM; |
518 | xprt->tcp_offset = 0; | |
808012fb | 519 | |
a246b010 | 520 | /* Sanity check of the record length */ |
808012fb | 521 | if (unlikely(xprt->tcp_reclen < 4)) { |
9903cd1c | 522 | dprintk("RPC: invalid TCP record fragment length\n"); |
a246b010 | 523 | xprt_disconnect(xprt); |
9903cd1c | 524 | return; |
a246b010 CL |
525 | } |
526 | dprintk("RPC: reading TCP record fragment of length %d\n", | |
527 | xprt->tcp_reclen); | |
528 | } | |
529 | ||
9903cd1c | 530 | static void xs_tcp_check_recm(struct rpc_xprt *xprt) |
a246b010 CL |
531 | { |
532 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | |
533 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | |
534 | if (xprt->tcp_offset == xprt->tcp_reclen) { | |
535 | xprt->tcp_flags |= XPRT_COPY_RECM; | |
536 | xprt->tcp_offset = 0; | |
537 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | |
538 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
539 | xprt->tcp_flags |= XPRT_COPY_XID; | |
540 | xprt->tcp_copied = 0; | |
541 | } | |
542 | } | |
543 | } | |
544 | ||
9903cd1c | 545 | static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
546 | { |
547 | size_t len, used; | |
548 | char *p; | |
549 | ||
550 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | |
551 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | |
552 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | |
9903cd1c | 553 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
554 | xprt->tcp_offset += used; |
555 | if (used != len) | |
556 | return; | |
557 | xprt->tcp_flags &= ~XPRT_COPY_XID; | |
558 | xprt->tcp_flags |= XPRT_COPY_DATA; | |
559 | xprt->tcp_copied = 4; | |
560 | dprintk("RPC: reading reply for XID %08x\n", | |
561 | ntohl(xprt->tcp_xid)); | |
9903cd1c | 562 | xs_tcp_check_recm(xprt); |
a246b010 CL |
563 | } |
564 | ||
9903cd1c | 565 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
566 | { |
567 | struct rpc_rqst *req; | |
568 | struct xdr_buf *rcvbuf; | |
569 | size_t len; | |
570 | ssize_t r; | |
571 | ||
572 | /* Find and lock the request corresponding to this xid */ | |
4a0f8c04 | 573 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
574 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); |
575 | if (!req) { | |
576 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
577 | dprintk("RPC: XID %08x request not found!\n", | |
578 | ntohl(xprt->tcp_xid)); | |
4a0f8c04 | 579 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
580 | return; |
581 | } | |
582 | ||
583 | rcvbuf = &req->rq_private_buf; | |
584 | len = desc->count; | |
585 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | |
586 | skb_reader_t my_desc; | |
587 | ||
588 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
589 | memcpy(&my_desc, desc, sizeof(my_desc)); | |
590 | my_desc.count = len; | |
591 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 592 | &my_desc, xs_tcp_copy_data); |
a246b010 CL |
593 | desc->count -= r; |
594 | desc->offset += r; | |
595 | } else | |
596 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 597 | desc, xs_tcp_copy_data); |
a246b010 CL |
598 | |
599 | if (r > 0) { | |
600 | xprt->tcp_copied += r; | |
601 | xprt->tcp_offset += r; | |
602 | } | |
603 | if (r != len) { | |
604 | /* Error when copying to the receive buffer, | |
605 | * usually because we weren't able to allocate | |
606 | * additional buffer pages. All we can do now | |
607 | * is turn off XPRT_COPY_DATA, so the request | |
608 | * will not receive any additional updates, | |
609 | * and time out. | |
610 | * Any remaining data from this record will | |
611 | * be discarded. | |
612 | */ | |
613 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
614 | dprintk("RPC: XID %08x truncated request\n", | |
615 | ntohl(xprt->tcp_xid)); | |
616 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
617 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
618 | goto out; | |
619 | } | |
620 | ||
621 | dprintk("RPC: XID %08x read %Zd bytes\n", | |
622 | ntohl(xprt->tcp_xid), r); | |
623 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
624 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
625 | ||
626 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | |
627 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
628 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | |
629 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | |
630 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
631 | } | |
632 | ||
633 | out: | |
1570c1e4 CL |
634 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) |
635 | xprt_complete_rqst(req->rq_task, xprt->tcp_copied); | |
4a0f8c04 | 636 | spin_unlock(&xprt->transport_lock); |
9903cd1c | 637 | xs_tcp_check_recm(xprt); |
a246b010 CL |
638 | } |
639 | ||
9903cd1c | 640 | static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
641 | { |
642 | size_t len; | |
643 | ||
644 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
645 | if (len > desc->count) | |
646 | len = desc->count; | |
647 | desc->count -= len; | |
648 | desc->offset += len; | |
649 | xprt->tcp_offset += len; | |
650 | dprintk("RPC: discarded %Zu bytes\n", len); | |
9903cd1c | 651 | xs_tcp_check_recm(xprt); |
a246b010 CL |
652 | } |
653 | ||
9903cd1c | 654 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) |
a246b010 CL |
655 | { |
656 | struct rpc_xprt *xprt = rd_desc->arg.data; | |
657 | skb_reader_t desc = { | |
658 | .skb = skb, | |
659 | .offset = offset, | |
660 | .count = len, | |
661 | .csum = 0 | |
9903cd1c | 662 | }; |
a246b010 | 663 | |
9903cd1c | 664 | dprintk("RPC: xs_tcp_data_recv started\n"); |
a246b010 CL |
665 | do { |
666 | /* Read in a new fragment marker if necessary */ | |
667 | /* Can we ever really expect to get completely empty fragments? */ | |
668 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | |
9903cd1c | 669 | xs_tcp_read_fraghdr(xprt, &desc); |
a246b010 CL |
670 | continue; |
671 | } | |
672 | /* Read in the xid if necessary */ | |
673 | if (xprt->tcp_flags & XPRT_COPY_XID) { | |
9903cd1c | 674 | xs_tcp_read_xid(xprt, &desc); |
a246b010 CL |
675 | continue; |
676 | } | |
677 | /* Read in the request data */ | |
678 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | |
9903cd1c | 679 | xs_tcp_read_request(xprt, &desc); |
a246b010 CL |
680 | continue; |
681 | } | |
682 | /* Skip over any trailing bytes on short reads */ | |
9903cd1c | 683 | xs_tcp_read_discard(xprt, &desc); |
a246b010 | 684 | } while (desc.count); |
9903cd1c | 685 | dprintk("RPC: xs_tcp_data_recv done\n"); |
a246b010 CL |
686 | return len - desc.count; |
687 | } | |
688 | ||
9903cd1c CL |
689 | /** |
690 | * xs_tcp_data_ready - "data ready" callback for TCP sockets | |
691 | * @sk: socket with data to read | |
692 | * @bytes: how much data to read | |
693 | * | |
694 | */ | |
695 | static void xs_tcp_data_ready(struct sock *sk, int bytes) | |
a246b010 CL |
696 | { |
697 | struct rpc_xprt *xprt; | |
698 | read_descriptor_t rd_desc; | |
699 | ||
700 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
701 | dprintk("RPC: xs_tcp_data_ready...\n"); |
702 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 703 | goto out; |
a246b010 CL |
704 | if (xprt->shutdown) |
705 | goto out; | |
706 | ||
9903cd1c | 707 | /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ |
a246b010 CL |
708 | rd_desc.arg.data = xprt; |
709 | rd_desc.count = 65536; | |
9903cd1c | 710 | tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); |
a246b010 CL |
711 | out: |
712 | read_unlock(&sk->sk_callback_lock); | |
713 | } | |
714 | ||
9903cd1c CL |
715 | /** |
716 | * xs_tcp_state_change - callback to handle TCP socket state changes | |
717 | * @sk: socket whose state has changed | |
718 | * | |
719 | */ | |
720 | static void xs_tcp_state_change(struct sock *sk) | |
a246b010 | 721 | { |
9903cd1c | 722 | struct rpc_xprt *xprt; |
a246b010 CL |
723 | |
724 | read_lock(&sk->sk_callback_lock); | |
725 | if (!(xprt = xprt_from_sock(sk))) | |
726 | goto out; | |
9903cd1c | 727 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
a246b010 CL |
728 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", |
729 | sk->sk_state, xprt_connected(xprt), | |
730 | sock_flag(sk, SOCK_DEAD), | |
731 | sock_flag(sk, SOCK_ZAPPED)); | |
732 | ||
733 | switch (sk->sk_state) { | |
734 | case TCP_ESTABLISHED: | |
4a0f8c04 | 735 | spin_lock_bh(&xprt->transport_lock); |
a246b010 CL |
736 | if (!xprt_test_and_set_connected(xprt)) { |
737 | /* Reset TCP record info */ | |
738 | xprt->tcp_offset = 0; | |
739 | xprt->tcp_reclen = 0; | |
740 | xprt->tcp_copied = 0; | |
741 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | |
44fbac22 | 742 | xprt_wake_pending_tasks(xprt, 0); |
a246b010 | 743 | } |
4a0f8c04 | 744 | spin_unlock_bh(&xprt->transport_lock); |
a246b010 CL |
745 | break; |
746 | case TCP_SYN_SENT: | |
747 | case TCP_SYN_RECV: | |
748 | break; | |
749 | default: | |
750 | xprt_disconnect(xprt); | |
751 | break; | |
752 | } | |
753 | out: | |
754 | read_unlock(&sk->sk_callback_lock); | |
755 | } | |
756 | ||
9903cd1c | 757 | /** |
c7b2cae8 CL |
758 | * xs_udp_write_space - callback invoked when socket buffer space |
759 | * becomes available | |
9903cd1c CL |
760 | * @sk: socket whose state has changed |
761 | * | |
a246b010 CL |
762 | * Called when more output buffer space is available for this socket. |
763 | * We try not to wake our writers until they can make "significant" | |
c7b2cae8 | 764 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg |
a246b010 CL |
765 | * with a bunch of small requests. |
766 | */ | |
c7b2cae8 | 767 | static void xs_udp_write_space(struct sock *sk) |
a246b010 | 768 | { |
a246b010 | 769 | read_lock(&sk->sk_callback_lock); |
a246b010 | 770 | |
c7b2cae8 CL |
771 | /* from net/core/sock.c:sock_def_write_space */ |
772 | if (sock_writeable(sk)) { | |
773 | struct socket *sock; | |
774 | struct rpc_xprt *xprt; | |
775 | ||
776 | if (unlikely(!(sock = sk->sk_socket))) | |
a246b010 | 777 | goto out; |
c7b2cae8 CL |
778 | if (unlikely(!(xprt = xprt_from_sock(sk)))) |
779 | goto out; | |
780 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
a246b010 | 781 | goto out; |
c7b2cae8 CL |
782 | |
783 | xprt_write_space(xprt); | |
a246b010 CL |
784 | } |
785 | ||
c7b2cae8 CL |
786 | out: |
787 | read_unlock(&sk->sk_callback_lock); | |
788 | } | |
a246b010 | 789 | |
c7b2cae8 CL |
790 | /** |
791 | * xs_tcp_write_space - callback invoked when socket buffer space | |
792 | * becomes available | |
793 | * @sk: socket whose state has changed | |
794 | * | |
795 | * Called when more output buffer space is available for this socket. | |
796 | * We try not to wake our writers until they can make "significant" | |
797 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg | |
798 | * with a bunch of small requests. | |
799 | */ | |
800 | static void xs_tcp_write_space(struct sock *sk) | |
801 | { | |
802 | read_lock(&sk->sk_callback_lock); | |
803 | ||
804 | /* from net/core/stream.c:sk_stream_write_space */ | |
805 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | |
806 | struct socket *sock; | |
807 | struct rpc_xprt *xprt; | |
808 | ||
809 | if (unlikely(!(sock = sk->sk_socket))) | |
810 | goto out; | |
811 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | |
812 | goto out; | |
813 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
814 | goto out; | |
815 | ||
816 | xprt_write_space(xprt); | |
817 | } | |
818 | ||
819 | out: | |
a246b010 CL |
820 | read_unlock(&sk->sk_callback_lock); |
821 | } | |
822 | ||
9903cd1c | 823 | /** |
43118c29 | 824 | * xs_udp_set_buffer_size - set send and receive limits |
9903cd1c CL |
825 | * @xprt: generic transport |
826 | * | |
827 | * Set socket send and receive limits based on the | |
828 | * sndsize and rcvsize fields in the generic transport | |
43118c29 | 829 | * structure. |
a246b010 | 830 | */ |
43118c29 | 831 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt) |
a246b010 CL |
832 | { |
833 | struct sock *sk = xprt->inet; | |
834 | ||
a246b010 CL |
835 | if (xprt->rcvsize) { |
836 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | |
837 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | |
838 | } | |
839 | if (xprt->sndsize) { | |
840 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | |
841 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | |
842 | sk->sk_write_space(sk); | |
843 | } | |
844 | } | |
845 | ||
43118c29 CL |
846 | /** |
847 | * xs_tcp_set_buffer_size - set send and receive limits | |
848 | * @xprt: generic transport | |
849 | * | |
850 | * Nothing to do for TCP. | |
851 | */ | |
852 | static void xs_tcp_set_buffer_size(struct rpc_xprt *xprt) | |
853 | { | |
854 | return; | |
855 | } | |
856 | ||
46c0ee8b CL |
857 | /** |
858 | * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport | |
859 | * @task: task that timed out | |
860 | * | |
861 | * Adjust the congestion window after a retransmit timeout has occurred. | |
862 | */ | |
863 | static void xs_udp_timer(struct rpc_task *task) | |
864 | { | |
865 | xprt_adjust_cwnd(task, -ETIMEDOUT); | |
866 | } | |
867 | ||
9903cd1c | 868 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) |
a246b010 CL |
869 | { |
870 | struct sockaddr_in myaddr = { | |
871 | .sin_family = AF_INET, | |
872 | }; | |
529b33c6 CL |
873 | int err; |
874 | unsigned short port = xprt->port; | |
a246b010 | 875 | |
a246b010 CL |
876 | do { |
877 | myaddr.sin_port = htons(port); | |
878 | err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, | |
879 | sizeof(myaddr)); | |
880 | if (err == 0) { | |
881 | xprt->port = port; | |
9903cd1c CL |
882 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
883 | port); | |
a246b010 CL |
884 | return 0; |
885 | } | |
529b33c6 CL |
886 | if (port <= xprt_min_resvport) |
887 | port = xprt_max_resvport; | |
888 | else | |
889 | port--; | |
a246b010 CL |
890 | } while (err == -EADDRINUSE && port != xprt->port); |
891 | ||
9903cd1c | 892 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
a246b010 CL |
893 | return err; |
894 | } | |
895 | ||
b0d93ad5 CL |
896 | /** |
897 | * xs_udp_connect_worker - set up a UDP socket | |
898 | * @args: RPC transport to connect | |
899 | * | |
900 | * Invoked by a work queue tasklet. | |
901 | */ | |
902 | static void xs_udp_connect_worker(void *args) | |
a246b010 | 903 | { |
b0d93ad5 CL |
904 | struct rpc_xprt *xprt = (struct rpc_xprt *) args; |
905 | struct socket *sock = xprt->sock; | |
906 | int err, status = -EIO; | |
9903cd1c | 907 | |
b0d93ad5 CL |
908 | if (xprt->shutdown || xprt->addr.sin_port == 0) |
909 | goto out; | |
9903cd1c | 910 | |
b0d93ad5 | 911 | dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 912 | |
b0d93ad5 CL |
913 | /* Start by resetting any existing state */ |
914 | xs_close(xprt); | |
9903cd1c | 915 | |
b0d93ad5 CL |
916 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { |
917 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | |
918 | goto out; | |
919 | } | |
9903cd1c | 920 | |
b0d93ad5 CL |
921 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
922 | sock_release(sock); | |
923 | goto out; | |
924 | } | |
9903cd1c | 925 | |
b0d93ad5 CL |
926 | if (!xprt->inet) { |
927 | struct sock *sk = sock->sk; | |
a246b010 | 928 | |
b0d93ad5 | 929 | write_lock_bh(&sk->sk_callback_lock); |
a246b010 | 930 | |
b0d93ad5 CL |
931 | sk->sk_user_data = xprt; |
932 | xprt->old_data_ready = sk->sk_data_ready; | |
933 | xprt->old_state_change = sk->sk_state_change; | |
934 | xprt->old_write_space = sk->sk_write_space; | |
9903cd1c | 935 | sk->sk_data_ready = xs_udp_data_ready; |
c7b2cae8 | 936 | sk->sk_write_space = xs_udp_write_space; |
a246b010 | 937 | sk->sk_no_check = UDP_CSUM_NORCV; |
b0d93ad5 | 938 | |
a246b010 | 939 | xprt_set_connected(xprt); |
a246b010 | 940 | |
b0d93ad5 CL |
941 | /* Reset to new socket */ |
942 | xprt->sock = sock; | |
943 | xprt->inet = sk; | |
a246b010 | 944 | |
b0d93ad5 CL |
945 | write_unlock_bh(&sk->sk_callback_lock); |
946 | } | |
43118c29 | 947 | xs_udp_set_buffer_size(xprt); |
b0d93ad5 CL |
948 | status = 0; |
949 | out: | |
950 | xprt_wake_pending_tasks(xprt, status); | |
951 | xprt_clear_connecting(xprt); | |
a246b010 CL |
952 | } |
953 | ||
3167e12c CL |
954 | /* |
955 | * We need to preserve the port number so the reply cache on the server can | |
956 | * find our cached RPC replies when we get around to reconnecting. | |
957 | */ | |
958 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |
959 | { | |
960 | int result; | |
961 | struct socket *sock = xprt->sock; | |
962 | struct sockaddr any; | |
963 | ||
964 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | |
965 | ||
966 | /* | |
967 | * Disconnect the transport socket by doing a connect operation | |
968 | * with AF_UNSPEC. This should return immediately... | |
969 | */ | |
970 | memset(&any, 0, sizeof(any)); | |
971 | any.sa_family = AF_UNSPEC; | |
972 | result = sock->ops->connect(sock, &any, sizeof(any), 0); | |
973 | if (result) | |
974 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | |
975 | result); | |
976 | } | |
977 | ||
9903cd1c | 978 | /** |
b0d93ad5 | 979 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
9903cd1c CL |
980 | * @args: RPC transport to connect |
981 | * | |
982 | * Invoked by a work queue tasklet. | |
a246b010 | 983 | */ |
b0d93ad5 | 984 | static void xs_tcp_connect_worker(void *args) |
a246b010 CL |
985 | { |
986 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | |
987 | struct socket *sock = xprt->sock; | |
b0d93ad5 | 988 | int err, status = -EIO; |
a246b010 CL |
989 | |
990 | if (xprt->shutdown || xprt->addr.sin_port == 0) | |
991 | goto out; | |
992 | ||
b0d93ad5 | 993 | dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 994 | |
3167e12c CL |
995 | if (!xprt->sock) { |
996 | /* start from scratch */ | |
997 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | |
998 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | |
999 | goto out; | |
1000 | } | |
a246b010 | 1001 | |
3167e12c CL |
1002 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
1003 | sock_release(sock); | |
1004 | goto out; | |
1005 | } | |
1006 | } else | |
1007 | /* "close" the socket, preserving the local port */ | |
1008 | xs_tcp_reuse_connection(xprt); | |
a246b010 | 1009 | |
b0d93ad5 CL |
1010 | if (!xprt->inet) { |
1011 | struct sock *sk = sock->sk; | |
1012 | ||
1013 | write_lock_bh(&sk->sk_callback_lock); | |
1014 | ||
1015 | sk->sk_user_data = xprt; | |
1016 | xprt->old_data_ready = sk->sk_data_ready; | |
1017 | xprt->old_state_change = sk->sk_state_change; | |
1018 | xprt->old_write_space = sk->sk_write_space; | |
1019 | sk->sk_data_ready = xs_tcp_data_ready; | |
1020 | sk->sk_state_change = xs_tcp_state_change; | |
1021 | sk->sk_write_space = xs_tcp_write_space; | |
3167e12c CL |
1022 | |
1023 | /* socket options */ | |
1024 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | |
1025 | sock_reset_flag(sk, SOCK_LINGER); | |
1026 | tcp_sk(sk)->linger2 = 0; | |
1027 | tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; | |
b0d93ad5 CL |
1028 | |
1029 | xprt_clear_connected(xprt); | |
1030 | ||
1031 | /* Reset to new socket */ | |
1032 | xprt->sock = sock; | |
1033 | xprt->inet = sk; | |
1034 | ||
1035 | write_unlock_bh(&sk->sk_callback_lock); | |
1036 | } | |
1037 | ||
1038 | /* Tell the socket layer to start connecting... */ | |
a246b010 CL |
1039 | status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, |
1040 | sizeof(xprt->addr), O_NONBLOCK); | |
1041 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | |
1042 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | |
1043 | if (status < 0) { | |
1044 | switch (status) { | |
1045 | case -EINPROGRESS: | |
1046 | case -EALREADY: | |
1047 | goto out_clear; | |
3167e12c CL |
1048 | case -ECONNREFUSED: |
1049 | case -ECONNRESET: | |
1050 | /* retry with existing socket, after a delay */ | |
1051 | break; | |
1052 | default: | |
1053 | /* get rid of existing socket, and retry */ | |
1054 | xs_close(xprt); | |
1055 | break; | |
a246b010 CL |
1056 | } |
1057 | } | |
1058 | out: | |
44fbac22 | 1059 | xprt_wake_pending_tasks(xprt, status); |
a246b010 | 1060 | out_clear: |
2226feb6 | 1061 | xprt_clear_connecting(xprt); |
a246b010 CL |
1062 | } |
1063 | ||
9903cd1c CL |
1064 | /** |
1065 | * xs_connect - connect a socket to a remote endpoint | |
1066 | * @task: address of RPC task that manages state of connect request | |
1067 | * | |
1068 | * TCP: If the remote end dropped the connection, delay reconnecting. | |
1069 | */ | |
1070 | static void xs_connect(struct rpc_task *task) | |
a246b010 CL |
1071 | { |
1072 | struct rpc_xprt *xprt = task->tk_xprt; | |
1073 | ||
b0d93ad5 CL |
1074 | if (xprt_test_and_set_connecting(xprt)) |
1075 | return; | |
1076 | ||
1077 | if (xprt->sock != NULL) { | |
1078 | dprintk("RPC: xs_connect delayed xprt %p\n", xprt); | |
1079 | schedule_delayed_work(&xprt->connect_worker, | |
a246b010 | 1080 | RPC_REESTABLISH_TIMEOUT); |
b0d93ad5 CL |
1081 | } else { |
1082 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | |
1083 | schedule_work(&xprt->connect_worker); | |
1084 | ||
1085 | /* flush_scheduled_work can sleep... */ | |
1086 | if (!RPC_IS_ASYNC(task)) | |
1087 | flush_scheduled_work(); | |
a246b010 CL |
1088 | } |
1089 | } | |
1090 | ||
262965f5 | 1091 | static struct rpc_xprt_ops xs_udp_ops = { |
43118c29 | 1092 | .set_buffer_size = xs_udp_set_buffer_size, |
12a80469 | 1093 | .reserve_xprt = xprt_reserve_xprt_cong, |
49e9a890 | 1094 | .release_xprt = xprt_release_xprt_cong, |
262965f5 CL |
1095 | .connect = xs_connect, |
1096 | .send_request = xs_udp_send_request, | |
fe3aca29 | 1097 | .set_retrans_timeout = xprt_set_retrans_timeout_rtt, |
46c0ee8b | 1098 | .timer = xs_udp_timer, |
a58dd398 | 1099 | .release_request = xprt_release_rqst_cong, |
262965f5 CL |
1100 | .close = xs_close, |
1101 | .destroy = xs_destroy, | |
1102 | }; | |
1103 | ||
1104 | static struct rpc_xprt_ops xs_tcp_ops = { | |
43118c29 | 1105 | .set_buffer_size = xs_tcp_set_buffer_size, |
12a80469 | 1106 | .reserve_xprt = xprt_reserve_xprt, |
49e9a890 | 1107 | .release_xprt = xprt_release_xprt, |
9903cd1c | 1108 | .connect = xs_connect, |
262965f5 | 1109 | .send_request = xs_tcp_send_request, |
fe3aca29 | 1110 | .set_retrans_timeout = xprt_set_retrans_timeout_def, |
9903cd1c CL |
1111 | .close = xs_close, |
1112 | .destroy = xs_destroy, | |
a246b010 CL |
1113 | }; |
1114 | ||
9903cd1c CL |
1115 | /** |
1116 | * xs_setup_udp - Set up transport to use a UDP socket | |
1117 | * @xprt: transport to set up | |
1118 | * @to: timeout parameters | |
1119 | * | |
1120 | */ | |
a246b010 CL |
1121 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1122 | { | |
1123 | size_t slot_table_size; | |
1124 | ||
1125 | dprintk("RPC: setting up udp-ipv4 transport...\n"); | |
1126 | ||
1127 | xprt->max_reqs = xprt_udp_slot_table_entries; | |
1128 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1129 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1130 | if (xprt->slot == NULL) | |
1131 | return -ENOMEM; | |
1132 | memset(xprt->slot, 0, slot_table_size); | |
1133 | ||
1134 | xprt->prot = IPPROTO_UDP; | |
529b33c6 | 1135 | xprt->port = xprt_max_resvport; |
808012fb | 1136 | xprt->tsh_size = 0; |
a246b010 CL |
1137 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; |
1138 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | |
1139 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | |
1140 | ||
b0d93ad5 | 1141 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); |
a246b010 | 1142 | |
262965f5 | 1143 | xprt->ops = &xs_udp_ops; |
a246b010 CL |
1144 | |
1145 | if (to) | |
1146 | xprt->timeout = *to; | |
1147 | else | |
9903cd1c | 1148 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
a246b010 CL |
1149 | |
1150 | return 0; | |
1151 | } | |
1152 | ||
9903cd1c CL |
1153 | /** |
1154 | * xs_setup_tcp - Set up transport to use a TCP socket | |
1155 | * @xprt: transport to set up | |
1156 | * @to: timeout parameters | |
1157 | * | |
1158 | */ | |
a246b010 CL |
1159 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1160 | { | |
1161 | size_t slot_table_size; | |
1162 | ||
1163 | dprintk("RPC: setting up tcp-ipv4 transport...\n"); | |
1164 | ||
1165 | xprt->max_reqs = xprt_tcp_slot_table_entries; | |
1166 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1167 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1168 | if (xprt->slot == NULL) | |
1169 | return -ENOMEM; | |
1170 | memset(xprt->slot, 0, slot_table_size); | |
1171 | ||
1172 | xprt->prot = IPPROTO_TCP; | |
529b33c6 | 1173 | xprt->port = xprt_max_resvport; |
808012fb | 1174 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
a246b010 | 1175 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; |
808012fb | 1176 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
a246b010 | 1177 | |
b0d93ad5 | 1178 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); |
a246b010 | 1179 | |
262965f5 | 1180 | xprt->ops = &xs_tcp_ops; |
a246b010 CL |
1181 | |
1182 | if (to) | |
1183 | xprt->timeout = *to; | |
1184 | else | |
9903cd1c | 1185 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
a246b010 CL |
1186 | |
1187 | return 0; | |
1188 | } |