]>
Commit | Line | Data |
---|---|---|
a246b010 CL |
1 | /* |
2 | * linux/net/sunrpc/xprtsock.c | |
3 | * | |
4 | * Client-side transport implementation for sockets. | |
5 | * | |
6 | * TCP callback races fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
7 | * TCP send fixes (C) 1998 Red Hat Software <alan@redhat.com> | |
8 | * TCP NFS related read + write fixes | |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | |
10 | * | |
11 | * Rewrite of larges part of the code in order to stabilize TCP stuff. | |
12 | * Fix behaviour when socket buffer is full. | |
13 | * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no> | |
55aa4f58 CL |
14 | * |
15 | * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com> | |
a246b010 CL |
16 | */ |
17 | ||
18 | #include <linux/types.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/capability.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/socket.h> | |
25 | #include <linux/in.h> | |
26 | #include <linux/net.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/udp.h> | |
29 | #include <linux/tcp.h> | |
30 | #include <linux/sunrpc/clnt.h> | |
31 | #include <linux/file.h> | |
32 | ||
33 | #include <net/sock.h> | |
34 | #include <net/checksum.h> | |
35 | #include <net/udp.h> | |
36 | #include <net/tcp.h> | |
37 | ||
9903cd1c CL |
38 | /* |
39 | * Maximum port number to use when requesting a reserved port. | |
40 | */ | |
41 | #define XS_MAX_RESVPORT (800U) | |
42 | ||
262965f5 CL |
43 | /* |
44 | * How many times to try sending a request on a socket before waiting | |
45 | * for the socket buffer to clear. | |
46 | */ | |
47 | #define XS_SENDMSG_RETRY (10U) | |
48 | ||
a246b010 CL |
49 | #ifdef RPC_DEBUG |
50 | # undef RPC_DEBUG_DATA | |
9903cd1c | 51 | # define RPCDBG_FACILITY RPCDBG_TRANS |
a246b010 CL |
52 | #endif |
53 | ||
a246b010 | 54 | #ifdef RPC_DEBUG_DATA |
9903cd1c | 55 | static void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 | 56 | { |
9903cd1c CL |
57 | u8 *buf = (u8 *) packet; |
58 | int j; | |
a246b010 CL |
59 | |
60 | dprintk("RPC: %s\n", msg); | |
61 | for (j = 0; j < count && j < 128; j += 4) { | |
62 | if (!(j & 31)) { | |
63 | if (j) | |
64 | dprintk("\n"); | |
65 | dprintk("0x%04x ", j); | |
66 | } | |
67 | dprintk("%02x%02x%02x%02x ", | |
68 | buf[j], buf[j+1], buf[j+2], buf[j+3]); | |
69 | } | |
70 | dprintk("\n"); | |
71 | } | |
72 | #else | |
9903cd1c | 73 | static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) |
a246b010 CL |
74 | { |
75 | /* NOP */ | |
76 | } | |
77 | #endif | |
78 | ||
b4b5cc85 CL |
79 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
80 | ||
81 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
82 | { | |
83 | struct kvec iov = { | |
84 | .iov_base = xdr->head[0].iov_base + base, | |
85 | .iov_len = len - base, | |
86 | }; | |
87 | struct msghdr msg = { | |
88 | .msg_name = addr, | |
89 | .msg_namelen = addrlen, | |
90 | .msg_flags = XS_SENDMSG_FLAGS, | |
91 | }; | |
92 | ||
93 | if (xdr->len > len) | |
94 | msg.msg_flags |= MSG_MORE; | |
95 | ||
96 | if (likely(iov.iov_len)) | |
97 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
98 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | |
99 | } | |
100 | ||
101 | static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) | |
102 | { | |
103 | struct kvec iov = { | |
104 | .iov_base = xdr->tail[0].iov_base + base, | |
105 | .iov_len = len - base, | |
106 | }; | |
107 | struct msghdr msg = { | |
108 | .msg_flags = XS_SENDMSG_FLAGS, | |
109 | }; | |
110 | ||
111 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | |
112 | } | |
113 | ||
9903cd1c CL |
114 | /** |
115 | * xs_sendpages - write pages directly to a socket | |
116 | * @sock: socket to send on | |
117 | * @addr: UDP only -- address of destination | |
118 | * @addrlen: UDP only -- length of destination address | |
119 | * @xdr: buffer containing this request | |
120 | * @base: starting position in the buffer | |
121 | * | |
a246b010 | 122 | */ |
262965f5 | 123 | static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) |
a246b010 CL |
124 | { |
125 | struct page **ppage = xdr->pages; | |
126 | unsigned int len, pglen = xdr->page_len; | |
127 | int err, ret = 0; | |
128 | ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | |
129 | ||
262965f5 CL |
130 | if (unlikely(!sock)) |
131 | return -ENOTCONN; | |
132 | ||
133 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | |
134 | ||
a246b010 CL |
135 | len = xdr->head[0].iov_len; |
136 | if (base < len || (addr != NULL && base == 0)) { | |
b4b5cc85 | 137 | err = xs_send_head(sock, addr, addrlen, xdr, base, len); |
a246b010 CL |
138 | if (ret == 0) |
139 | ret = err; | |
140 | else if (err > 0) | |
141 | ret += err; | |
b4b5cc85 | 142 | if (err != (len - base)) |
a246b010 CL |
143 | goto out; |
144 | base = 0; | |
145 | } else | |
146 | base -= len; | |
147 | ||
b4b5cc85 | 148 | if (unlikely(pglen == 0)) |
a246b010 | 149 | goto copy_tail; |
b4b5cc85 | 150 | if (unlikely(base >= pglen)) { |
a246b010 CL |
151 | base -= pglen; |
152 | goto copy_tail; | |
153 | } | |
154 | if (base || xdr->page_base) { | |
155 | pglen -= base; | |
9903cd1c | 156 | base += xdr->page_base; |
a246b010 CL |
157 | ppage += base >> PAGE_CACHE_SHIFT; |
158 | base &= ~PAGE_CACHE_MASK; | |
159 | } | |
160 | ||
161 | sendpage = sock->ops->sendpage ? : sock_no_sendpage; | |
162 | do { | |
b4b5cc85 | 163 | int flags = XS_SENDMSG_FLAGS; |
a246b010 CL |
164 | |
165 | len = PAGE_CACHE_SIZE; | |
166 | if (base) | |
167 | len -= base; | |
168 | if (pglen < len) | |
169 | len = pglen; | |
170 | ||
171 | if (pglen != len || xdr->tail[0].iov_len != 0) | |
172 | flags |= MSG_MORE; | |
173 | ||
174 | /* Hmm... We might be dealing with highmem pages */ | |
175 | if (PageHighMem(*ppage)) | |
176 | sendpage = sock_no_sendpage; | |
177 | err = sendpage(sock, *ppage, base, len, flags); | |
178 | if (ret == 0) | |
179 | ret = err; | |
180 | else if (err > 0) | |
181 | ret += err; | |
182 | if (err != len) | |
183 | goto out; | |
184 | base = 0; | |
185 | ppage++; | |
186 | } while ((pglen -= len) != 0); | |
187 | copy_tail: | |
188 | len = xdr->tail[0].iov_len; | |
189 | if (base < len) { | |
b4b5cc85 | 190 | err = xs_send_tail(sock, xdr, base, len); |
a246b010 CL |
191 | if (ret == 0) |
192 | ret = err; | |
193 | else if (err > 0) | |
194 | ret += err; | |
195 | } | |
196 | out: | |
197 | return ret; | |
198 | } | |
199 | ||
9903cd1c | 200 | /** |
262965f5 CL |
201 | * xs_nospace - place task on wait queue if transmit was incomplete |
202 | * @task: task to put to sleep | |
9903cd1c | 203 | * |
a246b010 | 204 | */ |
262965f5 | 205 | static void xs_nospace(struct rpc_task *task) |
a246b010 | 206 | { |
262965f5 CL |
207 | struct rpc_rqst *req = task->tk_rqstp; |
208 | struct rpc_xprt *xprt = req->rq_xprt; | |
a246b010 | 209 | |
262965f5 CL |
210 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", |
211 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | |
212 | req->rq_slen); | |
213 | ||
214 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | |
215 | /* Protect against races with write_space */ | |
216 | spin_lock_bh(&xprt->transport_lock); | |
217 | ||
218 | /* Don't race with disconnect */ | |
219 | if (!xprt_connected(xprt)) | |
220 | task->tk_status = -ENOTCONN; | |
221 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) | |
222 | xprt_wait_for_buffer_space(task); | |
223 | ||
224 | spin_unlock_bh(&xprt->transport_lock); | |
225 | } else | |
226 | /* Keep holding the socket if it is blocked */ | |
227 | rpc_delay(task, HZ>>4); | |
228 | } | |
229 | ||
230 | /** | |
231 | * xs_udp_send_request - write an RPC request to a UDP socket | |
232 | * @task: address of RPC task that manages the state of an RPC request | |
233 | * | |
234 | * Return values: | |
235 | * 0: The request has been sent | |
236 | * EAGAIN: The socket was blocked, please call again later to | |
237 | * complete the request | |
238 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
239 | * other: Some other error occured, the request was not sent | |
240 | */ | |
241 | static int xs_udp_send_request(struct rpc_task *task) | |
242 | { | |
243 | struct rpc_rqst *req = task->tk_rqstp; | |
244 | struct rpc_xprt *xprt = req->rq_xprt; | |
245 | struct xdr_buf *xdr = &req->rq_snd_buf; | |
246 | int status; | |
a246b010 | 247 | |
9903cd1c | 248 | xs_pktdump("packet data:", |
a246b010 CL |
249 | req->rq_svec->iov_base, |
250 | req->rq_svec->iov_len); | |
251 | ||
262965f5 CL |
252 | req->rq_xtime = jiffies; |
253 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | |
254 | sizeof(xprt->addr), xdr, req->rq_bytes_sent); | |
a246b010 | 255 | |
262965f5 CL |
256 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
257 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 258 | |
262965f5 CL |
259 | if (likely(status >= (int) req->rq_slen)) |
260 | return 0; | |
a246b010 | 261 | |
262965f5 CL |
262 | /* Still some bytes left; set up for a retry later. */ |
263 | if (status > 0) | |
264 | status = -EAGAIN; | |
a246b010 | 265 | |
262965f5 CL |
266 | switch (status) { |
267 | case -ENETUNREACH: | |
268 | case -EPIPE: | |
a246b010 CL |
269 | case -ECONNREFUSED: |
270 | /* When the server has died, an ICMP port unreachable message | |
9903cd1c | 271 | * prompts ECONNREFUSED. */ |
a246b010 | 272 | break; |
262965f5 CL |
273 | case -EAGAIN: |
274 | xs_nospace(task); | |
a246b010 CL |
275 | break; |
276 | default: | |
262965f5 CL |
277 | dprintk("RPC: sendmsg returned unrecognized error %d\n", |
278 | -status); | |
9903cd1c | 279 | break; |
a246b010 | 280 | } |
262965f5 CL |
281 | |
282 | return status; | |
a246b010 CL |
283 | } |
284 | ||
808012fb CL |
285 | static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) |
286 | { | |
287 | u32 reclen = buf->len - sizeof(rpc_fraghdr); | |
288 | rpc_fraghdr *base = buf->head[0].iov_base; | |
289 | *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); | |
290 | } | |
291 | ||
9903cd1c | 292 | /** |
262965f5 | 293 | * xs_tcp_send_request - write an RPC request to a TCP socket |
9903cd1c CL |
294 | * @task: address of RPC task that manages the state of an RPC request |
295 | * | |
296 | * Return values: | |
262965f5 CL |
297 | * 0: The request has been sent |
298 | * EAGAIN: The socket was blocked, please call again later to | |
299 | * complete the request | |
300 | * ENOTCONN: Caller needs to invoke connect logic then call again | |
301 | * other: Some other error occured, the request was not sent | |
9903cd1c CL |
302 | * |
303 | * XXX: In the case of soft timeouts, should we eventually give up | |
262965f5 | 304 | * if sendmsg is not able to make progress? |
9903cd1c | 305 | */ |
262965f5 | 306 | static int xs_tcp_send_request(struct rpc_task *task) |
a246b010 CL |
307 | { |
308 | struct rpc_rqst *req = task->tk_rqstp; | |
309 | struct rpc_xprt *xprt = req->rq_xprt; | |
262965f5 | 310 | struct xdr_buf *xdr = &req->rq_snd_buf; |
a246b010 CL |
311 | int status, retry = 0; |
312 | ||
808012fb | 313 | xs_encode_tcp_record_marker(&req->rq_snd_buf); |
a246b010 | 314 | |
262965f5 CL |
315 | xs_pktdump("packet data:", |
316 | req->rq_svec->iov_base, | |
317 | req->rq_svec->iov_len); | |
a246b010 CL |
318 | |
319 | /* Continue transmitting the packet/record. We must be careful | |
320 | * to cope with writespace callbacks arriving _after_ we have | |
262965f5 | 321 | * called sendmsg(). */ |
a246b010 CL |
322 | while (1) { |
323 | req->rq_xtime = jiffies; | |
262965f5 CL |
324 | status = xs_sendpages(xprt->sock, NULL, 0, xdr, |
325 | req->rq_bytes_sent); | |
a246b010 | 326 | |
262965f5 CL |
327 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
328 | xdr->len - req->rq_bytes_sent, status); | |
a246b010 | 329 | |
262965f5 | 330 | if (unlikely(status < 0)) |
a246b010 | 331 | break; |
a246b010 | 332 | |
262965f5 CL |
333 | /* If we've sent the entire packet, immediately |
334 | * reset the count of bytes sent. */ | |
335 | req->rq_bytes_sent += status; | |
336 | if (likely(req->rq_bytes_sent >= req->rq_slen)) { | |
337 | req->rq_bytes_sent = 0; | |
338 | return 0; | |
339 | } | |
a246b010 CL |
340 | |
341 | status = -EAGAIN; | |
262965f5 | 342 | if (retry++ > XS_SENDMSG_RETRY) |
a246b010 CL |
343 | break; |
344 | } | |
345 | ||
262965f5 CL |
346 | switch (status) { |
347 | case -EAGAIN: | |
348 | xs_nospace(task); | |
349 | break; | |
350 | case -ECONNREFUSED: | |
351 | case -ECONNRESET: | |
352 | case -ENOTCONN: | |
353 | case -EPIPE: | |
354 | status = -ENOTCONN; | |
355 | break; | |
356 | default: | |
357 | dprintk("RPC: sendmsg returned unrecognized error %d\n", | |
358 | -status); | |
43118c29 | 359 | xprt_disconnect(xprt); |
262965f5 | 360 | break; |
a246b010 | 361 | } |
262965f5 | 362 | |
a246b010 CL |
363 | return status; |
364 | } | |
365 | ||
9903cd1c CL |
366 | /** |
367 | * xs_close - close a socket | |
368 | * @xprt: transport | |
369 | * | |
a246b010 | 370 | */ |
9903cd1c | 371 | static void xs_close(struct rpc_xprt *xprt) |
a246b010 | 372 | { |
9903cd1c CL |
373 | struct socket *sock = xprt->sock; |
374 | struct sock *sk = xprt->inet; | |
a246b010 CL |
375 | |
376 | if (!sk) | |
377 | return; | |
378 | ||
9903cd1c CL |
379 | dprintk("RPC: xs_close xprt %p\n", xprt); |
380 | ||
a246b010 CL |
381 | write_lock_bh(&sk->sk_callback_lock); |
382 | xprt->inet = NULL; | |
383 | xprt->sock = NULL; | |
384 | ||
9903cd1c CL |
385 | sk->sk_user_data = NULL; |
386 | sk->sk_data_ready = xprt->old_data_ready; | |
a246b010 | 387 | sk->sk_state_change = xprt->old_state_change; |
9903cd1c | 388 | sk->sk_write_space = xprt->old_write_space; |
a246b010 CL |
389 | write_unlock_bh(&sk->sk_callback_lock); |
390 | ||
9903cd1c | 391 | sk->sk_no_check = 0; |
a246b010 CL |
392 | |
393 | sock_release(sock); | |
394 | } | |
395 | ||
9903cd1c CL |
396 | /** |
397 | * xs_destroy - prepare to shutdown a transport | |
398 | * @xprt: doomed transport | |
399 | * | |
400 | */ | |
401 | static void xs_destroy(struct rpc_xprt *xprt) | |
a246b010 | 402 | { |
9903cd1c CL |
403 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
404 | ||
55aa4f58 | 405 | cancel_delayed_work(&xprt->connect_worker); |
a246b010 CL |
406 | flush_scheduled_work(); |
407 | ||
408 | xprt_disconnect(xprt); | |
9903cd1c | 409 | xs_close(xprt); |
a246b010 CL |
410 | kfree(xprt->slot); |
411 | } | |
412 | ||
9903cd1c CL |
413 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
414 | { | |
415 | return (struct rpc_xprt *) sk->sk_user_data; | |
416 | } | |
417 | ||
418 | /** | |
419 | * xs_udp_data_ready - "data ready" callback for UDP sockets | |
420 | * @sk: socket with data to read | |
421 | * @len: how much data to read | |
422 | * | |
a246b010 | 423 | */ |
9903cd1c | 424 | static void xs_udp_data_ready(struct sock *sk, int len) |
a246b010 | 425 | { |
9903cd1c CL |
426 | struct rpc_task *task; |
427 | struct rpc_xprt *xprt; | |
a246b010 | 428 | struct rpc_rqst *rovr; |
9903cd1c | 429 | struct sk_buff *skb; |
a246b010 CL |
430 | int err, repsize, copied; |
431 | u32 _xid, *xp; | |
432 | ||
433 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
434 | dprintk("RPC: xs_udp_data_ready...\n"); |
435 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 436 | goto out; |
a246b010 CL |
437 | |
438 | if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) | |
439 | goto out; | |
440 | ||
441 | if (xprt->shutdown) | |
442 | goto dropit; | |
443 | ||
444 | repsize = skb->len - sizeof(struct udphdr); | |
445 | if (repsize < 4) { | |
9903cd1c | 446 | dprintk("RPC: impossible RPC reply size %d!\n", repsize); |
a246b010 CL |
447 | goto dropit; |
448 | } | |
449 | ||
450 | /* Copy the XID from the skb... */ | |
451 | xp = skb_header_pointer(skb, sizeof(struct udphdr), | |
452 | sizeof(_xid), &_xid); | |
453 | if (xp == NULL) | |
454 | goto dropit; | |
455 | ||
456 | /* Look up and lock the request corresponding to the given XID */ | |
4a0f8c04 | 457 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
458 | rovr = xprt_lookup_rqst(xprt, *xp); |
459 | if (!rovr) | |
460 | goto out_unlock; | |
461 | task = rovr->rq_task; | |
462 | ||
463 | dprintk("RPC: %4d received reply\n", task->tk_pid); | |
464 | ||
465 | if ((copied = rovr->rq_private_buf.buflen) > repsize) | |
466 | copied = repsize; | |
467 | ||
468 | /* Suck it into the iovec, verify checksum if not done by hw. */ | |
469 | if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) | |
470 | goto out_unlock; | |
471 | ||
472 | /* Something worked... */ | |
473 | dst_confirm(skb->dst); | |
474 | ||
475 | xprt_complete_rqst(xprt, rovr, copied); | |
476 | ||
477 | out_unlock: | |
4a0f8c04 | 478 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
479 | dropit: |
480 | skb_free_datagram(sk, skb); | |
481 | out: | |
482 | read_unlock(&sk->sk_callback_lock); | |
483 | } | |
484 | ||
9903cd1c | 485 | static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) |
a246b010 CL |
486 | { |
487 | if (len > desc->count) | |
488 | len = desc->count; | |
489 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | |
490 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | |
491 | len, desc->count); | |
492 | return 0; | |
493 | } | |
494 | desc->offset += len; | |
495 | desc->count -= len; | |
496 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | |
497 | len, desc->count); | |
498 | return len; | |
499 | } | |
500 | ||
9903cd1c | 501 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
502 | { |
503 | size_t len, used; | |
504 | char *p; | |
505 | ||
506 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | |
507 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | |
9903cd1c | 508 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
509 | xprt->tcp_offset += used; |
510 | if (used != len) | |
511 | return; | |
808012fb | 512 | |
a246b010 | 513 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); |
808012fb | 514 | if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) |
a246b010 CL |
515 | xprt->tcp_flags |= XPRT_LAST_FRAG; |
516 | else | |
517 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | |
808012fb CL |
518 | xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; |
519 | ||
a246b010 CL |
520 | xprt->tcp_flags &= ~XPRT_COPY_RECM; |
521 | xprt->tcp_offset = 0; | |
808012fb | 522 | |
a246b010 | 523 | /* Sanity check of the record length */ |
808012fb | 524 | if (unlikely(xprt->tcp_reclen < 4)) { |
9903cd1c | 525 | dprintk("RPC: invalid TCP record fragment length\n"); |
a246b010 | 526 | xprt_disconnect(xprt); |
9903cd1c | 527 | return; |
a246b010 CL |
528 | } |
529 | dprintk("RPC: reading TCP record fragment of length %d\n", | |
530 | xprt->tcp_reclen); | |
531 | } | |
532 | ||
9903cd1c | 533 | static void xs_tcp_check_recm(struct rpc_xprt *xprt) |
a246b010 CL |
534 | { |
535 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | |
536 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | |
537 | if (xprt->tcp_offset == xprt->tcp_reclen) { | |
538 | xprt->tcp_flags |= XPRT_COPY_RECM; | |
539 | xprt->tcp_offset = 0; | |
540 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | |
541 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
542 | xprt->tcp_flags |= XPRT_COPY_XID; | |
543 | xprt->tcp_copied = 0; | |
544 | } | |
545 | } | |
546 | } | |
547 | ||
9903cd1c | 548 | static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
549 | { |
550 | size_t len, used; | |
551 | char *p; | |
552 | ||
553 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | |
554 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | |
555 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | |
9903cd1c | 556 | used = xs_tcp_copy_data(desc, p, len); |
a246b010 CL |
557 | xprt->tcp_offset += used; |
558 | if (used != len) | |
559 | return; | |
560 | xprt->tcp_flags &= ~XPRT_COPY_XID; | |
561 | xprt->tcp_flags |= XPRT_COPY_DATA; | |
562 | xprt->tcp_copied = 4; | |
563 | dprintk("RPC: reading reply for XID %08x\n", | |
564 | ntohl(xprt->tcp_xid)); | |
9903cd1c | 565 | xs_tcp_check_recm(xprt); |
a246b010 CL |
566 | } |
567 | ||
9903cd1c | 568 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
569 | { |
570 | struct rpc_rqst *req; | |
571 | struct xdr_buf *rcvbuf; | |
572 | size_t len; | |
573 | ssize_t r; | |
574 | ||
575 | /* Find and lock the request corresponding to this xid */ | |
4a0f8c04 | 576 | spin_lock(&xprt->transport_lock); |
a246b010 CL |
577 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); |
578 | if (!req) { | |
579 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
580 | dprintk("RPC: XID %08x request not found!\n", | |
581 | ntohl(xprt->tcp_xid)); | |
4a0f8c04 | 582 | spin_unlock(&xprt->transport_lock); |
a246b010 CL |
583 | return; |
584 | } | |
585 | ||
586 | rcvbuf = &req->rq_private_buf; | |
587 | len = desc->count; | |
588 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | |
589 | skb_reader_t my_desc; | |
590 | ||
591 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
592 | memcpy(&my_desc, desc, sizeof(my_desc)); | |
593 | my_desc.count = len; | |
594 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 595 | &my_desc, xs_tcp_copy_data); |
a246b010 CL |
596 | desc->count -= r; |
597 | desc->offset += r; | |
598 | } else | |
599 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | |
9903cd1c | 600 | desc, xs_tcp_copy_data); |
a246b010 CL |
601 | |
602 | if (r > 0) { | |
603 | xprt->tcp_copied += r; | |
604 | xprt->tcp_offset += r; | |
605 | } | |
606 | if (r != len) { | |
607 | /* Error when copying to the receive buffer, | |
608 | * usually because we weren't able to allocate | |
609 | * additional buffer pages. All we can do now | |
610 | * is turn off XPRT_COPY_DATA, so the request | |
611 | * will not receive any additional updates, | |
612 | * and time out. | |
613 | * Any remaining data from this record will | |
614 | * be discarded. | |
615 | */ | |
616 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
617 | dprintk("RPC: XID %08x truncated request\n", | |
618 | ntohl(xprt->tcp_xid)); | |
619 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
620 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
621 | goto out; | |
622 | } | |
623 | ||
624 | dprintk("RPC: XID %08x read %Zd bytes\n", | |
625 | ntohl(xprt->tcp_xid), r); | |
626 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | |
627 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | |
628 | ||
629 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | |
630 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
631 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | |
632 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | |
633 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | |
634 | } | |
635 | ||
636 | out: | |
637 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { | |
638 | dprintk("RPC: %4d received reply complete\n", | |
639 | req->rq_task->tk_pid); | |
640 | xprt_complete_rqst(xprt, req, xprt->tcp_copied); | |
641 | } | |
4a0f8c04 | 642 | spin_unlock(&xprt->transport_lock); |
9903cd1c | 643 | xs_tcp_check_recm(xprt); |
a246b010 CL |
644 | } |
645 | ||
9903cd1c | 646 | static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) |
a246b010 CL |
647 | { |
648 | size_t len; | |
649 | ||
650 | len = xprt->tcp_reclen - xprt->tcp_offset; | |
651 | if (len > desc->count) | |
652 | len = desc->count; | |
653 | desc->count -= len; | |
654 | desc->offset += len; | |
655 | xprt->tcp_offset += len; | |
656 | dprintk("RPC: discarded %Zu bytes\n", len); | |
9903cd1c | 657 | xs_tcp_check_recm(xprt); |
a246b010 CL |
658 | } |
659 | ||
9903cd1c | 660 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) |
a246b010 CL |
661 | { |
662 | struct rpc_xprt *xprt = rd_desc->arg.data; | |
663 | skb_reader_t desc = { | |
664 | .skb = skb, | |
665 | .offset = offset, | |
666 | .count = len, | |
667 | .csum = 0 | |
9903cd1c | 668 | }; |
a246b010 | 669 | |
9903cd1c | 670 | dprintk("RPC: xs_tcp_data_recv started\n"); |
a246b010 CL |
671 | do { |
672 | /* Read in a new fragment marker if necessary */ | |
673 | /* Can we ever really expect to get completely empty fragments? */ | |
674 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | |
9903cd1c | 675 | xs_tcp_read_fraghdr(xprt, &desc); |
a246b010 CL |
676 | continue; |
677 | } | |
678 | /* Read in the xid if necessary */ | |
679 | if (xprt->tcp_flags & XPRT_COPY_XID) { | |
9903cd1c | 680 | xs_tcp_read_xid(xprt, &desc); |
a246b010 CL |
681 | continue; |
682 | } | |
683 | /* Read in the request data */ | |
684 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | |
9903cd1c | 685 | xs_tcp_read_request(xprt, &desc); |
a246b010 CL |
686 | continue; |
687 | } | |
688 | /* Skip over any trailing bytes on short reads */ | |
9903cd1c | 689 | xs_tcp_read_discard(xprt, &desc); |
a246b010 | 690 | } while (desc.count); |
9903cd1c | 691 | dprintk("RPC: xs_tcp_data_recv done\n"); |
a246b010 CL |
692 | return len - desc.count; |
693 | } | |
694 | ||
9903cd1c CL |
695 | /** |
696 | * xs_tcp_data_ready - "data ready" callback for TCP sockets | |
697 | * @sk: socket with data to read | |
698 | * @bytes: how much data to read | |
699 | * | |
700 | */ | |
701 | static void xs_tcp_data_ready(struct sock *sk, int bytes) | |
a246b010 CL |
702 | { |
703 | struct rpc_xprt *xprt; | |
704 | read_descriptor_t rd_desc; | |
705 | ||
706 | read_lock(&sk->sk_callback_lock); | |
9903cd1c CL |
707 | dprintk("RPC: xs_tcp_data_ready...\n"); |
708 | if (!(xprt = xprt_from_sock(sk))) | |
a246b010 | 709 | goto out; |
a246b010 CL |
710 | if (xprt->shutdown) |
711 | goto out; | |
712 | ||
9903cd1c | 713 | /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ |
a246b010 CL |
714 | rd_desc.arg.data = xprt; |
715 | rd_desc.count = 65536; | |
9903cd1c | 716 | tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); |
a246b010 CL |
717 | out: |
718 | read_unlock(&sk->sk_callback_lock); | |
719 | } | |
720 | ||
9903cd1c CL |
721 | /** |
722 | * xs_tcp_state_change - callback to handle TCP socket state changes | |
723 | * @sk: socket whose state has changed | |
724 | * | |
725 | */ | |
726 | static void xs_tcp_state_change(struct sock *sk) | |
a246b010 | 727 | { |
9903cd1c | 728 | struct rpc_xprt *xprt; |
a246b010 CL |
729 | |
730 | read_lock(&sk->sk_callback_lock); | |
731 | if (!(xprt = xprt_from_sock(sk))) | |
732 | goto out; | |
9903cd1c | 733 | dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); |
a246b010 CL |
734 | dprintk("RPC: state %x conn %d dead %d zapped %d\n", |
735 | sk->sk_state, xprt_connected(xprt), | |
736 | sock_flag(sk, SOCK_DEAD), | |
737 | sock_flag(sk, SOCK_ZAPPED)); | |
738 | ||
739 | switch (sk->sk_state) { | |
740 | case TCP_ESTABLISHED: | |
4a0f8c04 | 741 | spin_lock_bh(&xprt->transport_lock); |
a246b010 CL |
742 | if (!xprt_test_and_set_connected(xprt)) { |
743 | /* Reset TCP record info */ | |
744 | xprt->tcp_offset = 0; | |
745 | xprt->tcp_reclen = 0; | |
746 | xprt->tcp_copied = 0; | |
747 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | |
44fbac22 | 748 | xprt_wake_pending_tasks(xprt, 0); |
a246b010 | 749 | } |
4a0f8c04 | 750 | spin_unlock_bh(&xprt->transport_lock); |
a246b010 CL |
751 | break; |
752 | case TCP_SYN_SENT: | |
753 | case TCP_SYN_RECV: | |
754 | break; | |
755 | default: | |
756 | xprt_disconnect(xprt); | |
757 | break; | |
758 | } | |
759 | out: | |
760 | read_unlock(&sk->sk_callback_lock); | |
761 | } | |
762 | ||
9903cd1c | 763 | /** |
c7b2cae8 CL |
764 | * xs_udp_write_space - callback invoked when socket buffer space |
765 | * becomes available | |
9903cd1c CL |
766 | * @sk: socket whose state has changed |
767 | * | |
a246b010 CL |
768 | * Called when more output buffer space is available for this socket. |
769 | * We try not to wake our writers until they can make "significant" | |
c7b2cae8 | 770 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg |
a246b010 CL |
771 | * with a bunch of small requests. |
772 | */ | |
c7b2cae8 | 773 | static void xs_udp_write_space(struct sock *sk) |
a246b010 | 774 | { |
a246b010 | 775 | read_lock(&sk->sk_callback_lock); |
a246b010 | 776 | |
c7b2cae8 CL |
777 | /* from net/core/sock.c:sock_def_write_space */ |
778 | if (sock_writeable(sk)) { | |
779 | struct socket *sock; | |
780 | struct rpc_xprt *xprt; | |
781 | ||
782 | if (unlikely(!(sock = sk->sk_socket))) | |
a246b010 | 783 | goto out; |
c7b2cae8 CL |
784 | if (unlikely(!(xprt = xprt_from_sock(sk)))) |
785 | goto out; | |
786 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
a246b010 | 787 | goto out; |
c7b2cae8 CL |
788 | |
789 | xprt_write_space(xprt); | |
a246b010 CL |
790 | } |
791 | ||
c7b2cae8 CL |
792 | out: |
793 | read_unlock(&sk->sk_callback_lock); | |
794 | } | |
a246b010 | 795 | |
c7b2cae8 CL |
796 | /** |
797 | * xs_tcp_write_space - callback invoked when socket buffer space | |
798 | * becomes available | |
799 | * @sk: socket whose state has changed | |
800 | * | |
801 | * Called when more output buffer space is available for this socket. | |
802 | * We try not to wake our writers until they can make "significant" | |
803 | * progress, otherwise we'll waste resources thrashing kernel_sendmsg | |
804 | * with a bunch of small requests. | |
805 | */ | |
806 | static void xs_tcp_write_space(struct sock *sk) | |
807 | { | |
808 | read_lock(&sk->sk_callback_lock); | |
809 | ||
810 | /* from net/core/stream.c:sk_stream_write_space */ | |
811 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | |
812 | struct socket *sock; | |
813 | struct rpc_xprt *xprt; | |
814 | ||
815 | if (unlikely(!(sock = sk->sk_socket))) | |
816 | goto out; | |
817 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | |
818 | goto out; | |
819 | if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) | |
820 | goto out; | |
821 | ||
822 | xprt_write_space(xprt); | |
823 | } | |
824 | ||
825 | out: | |
a246b010 CL |
826 | read_unlock(&sk->sk_callback_lock); |
827 | } | |
828 | ||
9903cd1c | 829 | /** |
43118c29 | 830 | * xs_udp_set_buffer_size - set send and receive limits |
9903cd1c CL |
831 | * @xprt: generic transport |
832 | * | |
833 | * Set socket send and receive limits based on the | |
834 | * sndsize and rcvsize fields in the generic transport | |
43118c29 | 835 | * structure. |
a246b010 | 836 | */ |
43118c29 | 837 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt) |
a246b010 CL |
838 | { |
839 | struct sock *sk = xprt->inet; | |
840 | ||
a246b010 CL |
841 | if (xprt->rcvsize) { |
842 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | |
843 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | |
844 | } | |
845 | if (xprt->sndsize) { | |
846 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | |
847 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | |
848 | sk->sk_write_space(sk); | |
849 | } | |
850 | } | |
851 | ||
43118c29 CL |
852 | /** |
853 | * xs_tcp_set_buffer_size - set send and receive limits | |
854 | * @xprt: generic transport | |
855 | * | |
856 | * Nothing to do for TCP. | |
857 | */ | |
858 | static void xs_tcp_set_buffer_size(struct rpc_xprt *xprt) | |
859 | { | |
860 | return; | |
861 | } | |
862 | ||
9903cd1c | 863 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) |
a246b010 CL |
864 | { |
865 | struct sockaddr_in myaddr = { | |
866 | .sin_family = AF_INET, | |
867 | }; | |
9903cd1c | 868 | int err, port; |
a246b010 CL |
869 | |
870 | /* Were we already bound to a given port? Try to reuse it */ | |
871 | port = xprt->port; | |
872 | do { | |
873 | myaddr.sin_port = htons(port); | |
874 | err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, | |
875 | sizeof(myaddr)); | |
876 | if (err == 0) { | |
877 | xprt->port = port; | |
9903cd1c CL |
878 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
879 | port); | |
a246b010 CL |
880 | return 0; |
881 | } | |
882 | if (--port == 0) | |
9903cd1c | 883 | port = XS_MAX_RESVPORT; |
a246b010 CL |
884 | } while (err == -EADDRINUSE && port != xprt->port); |
885 | ||
9903cd1c | 886 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
a246b010 CL |
887 | return err; |
888 | } | |
889 | ||
b0d93ad5 CL |
890 | /** |
891 | * xs_udp_connect_worker - set up a UDP socket | |
892 | * @args: RPC transport to connect | |
893 | * | |
894 | * Invoked by a work queue tasklet. | |
895 | */ | |
896 | static void xs_udp_connect_worker(void *args) | |
a246b010 | 897 | { |
b0d93ad5 CL |
898 | struct rpc_xprt *xprt = (struct rpc_xprt *) args; |
899 | struct socket *sock = xprt->sock; | |
900 | int err, status = -EIO; | |
9903cd1c | 901 | |
b0d93ad5 CL |
902 | if (xprt->shutdown || xprt->addr.sin_port == 0) |
903 | goto out; | |
9903cd1c | 904 | |
b0d93ad5 | 905 | dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 906 | |
b0d93ad5 CL |
907 | /* Start by resetting any existing state */ |
908 | xs_close(xprt); | |
9903cd1c | 909 | |
b0d93ad5 CL |
910 | if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { |
911 | dprintk("RPC: can't create UDP transport socket (%d).\n", -err); | |
912 | goto out; | |
913 | } | |
9903cd1c | 914 | |
b0d93ad5 CL |
915 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
916 | sock_release(sock); | |
917 | goto out; | |
918 | } | |
9903cd1c | 919 | |
b0d93ad5 CL |
920 | if (!xprt->inet) { |
921 | struct sock *sk = sock->sk; | |
a246b010 | 922 | |
b0d93ad5 | 923 | write_lock_bh(&sk->sk_callback_lock); |
a246b010 | 924 | |
b0d93ad5 CL |
925 | sk->sk_user_data = xprt; |
926 | xprt->old_data_ready = sk->sk_data_ready; | |
927 | xprt->old_state_change = sk->sk_state_change; | |
928 | xprt->old_write_space = sk->sk_write_space; | |
9903cd1c | 929 | sk->sk_data_ready = xs_udp_data_ready; |
c7b2cae8 | 930 | sk->sk_write_space = xs_udp_write_space; |
a246b010 | 931 | sk->sk_no_check = UDP_CSUM_NORCV; |
b0d93ad5 | 932 | |
a246b010 | 933 | xprt_set_connected(xprt); |
a246b010 | 934 | |
b0d93ad5 CL |
935 | /* Reset to new socket */ |
936 | xprt->sock = sock; | |
937 | xprt->inet = sk; | |
a246b010 | 938 | |
b0d93ad5 CL |
939 | write_unlock_bh(&sk->sk_callback_lock); |
940 | } | |
43118c29 | 941 | xs_udp_set_buffer_size(xprt); |
b0d93ad5 CL |
942 | status = 0; |
943 | out: | |
944 | xprt_wake_pending_tasks(xprt, status); | |
945 | xprt_clear_connecting(xprt); | |
a246b010 CL |
946 | } |
947 | ||
9903cd1c | 948 | /** |
b0d93ad5 | 949 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
9903cd1c CL |
950 | * @args: RPC transport to connect |
951 | * | |
952 | * Invoked by a work queue tasklet. | |
a246b010 | 953 | */ |
b0d93ad5 | 954 | static void xs_tcp_connect_worker(void *args) |
a246b010 CL |
955 | { |
956 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | |
957 | struct socket *sock = xprt->sock; | |
b0d93ad5 | 958 | int err, status = -EIO; |
a246b010 CL |
959 | |
960 | if (xprt->shutdown || xprt->addr.sin_port == 0) | |
961 | goto out; | |
962 | ||
b0d93ad5 | 963 | dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); |
9903cd1c | 964 | |
b0d93ad5 | 965 | /* Start by resetting any existing socket state */ |
9903cd1c | 966 | xs_close(xprt); |
b0d93ad5 CL |
967 | |
968 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | |
969 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | |
a246b010 CL |
970 | goto out; |
971 | } | |
a246b010 | 972 | |
b0d93ad5 CL |
973 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { |
974 | sock_release(sock); | |
a246b010 | 975 | goto out; |
b0d93ad5 | 976 | } |
a246b010 | 977 | |
b0d93ad5 CL |
978 | if (!xprt->inet) { |
979 | struct sock *sk = sock->sk; | |
980 | ||
981 | write_lock_bh(&sk->sk_callback_lock); | |
982 | ||
983 | sk->sk_user_data = xprt; | |
984 | xprt->old_data_ready = sk->sk_data_ready; | |
985 | xprt->old_state_change = sk->sk_state_change; | |
986 | xprt->old_write_space = sk->sk_write_space; | |
987 | sk->sk_data_ready = xs_tcp_data_ready; | |
988 | sk->sk_state_change = xs_tcp_state_change; | |
989 | sk->sk_write_space = xs_tcp_write_space; | |
990 | tcp_sk(sk)->nonagle = 1; | |
991 | ||
992 | xprt_clear_connected(xprt); | |
993 | ||
994 | /* Reset to new socket */ | |
995 | xprt->sock = sock; | |
996 | xprt->inet = sk; | |
997 | ||
998 | write_unlock_bh(&sk->sk_callback_lock); | |
999 | } | |
1000 | ||
1001 | /* Tell the socket layer to start connecting... */ | |
a246b010 CL |
1002 | status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, |
1003 | sizeof(xprt->addr), O_NONBLOCK); | |
1004 | dprintk("RPC: %p connect status %d connected %d sock state %d\n", | |
1005 | xprt, -status, xprt_connected(xprt), sock->sk->sk_state); | |
1006 | if (status < 0) { | |
1007 | switch (status) { | |
1008 | case -EINPROGRESS: | |
1009 | case -EALREADY: | |
1010 | goto out_clear; | |
1011 | } | |
1012 | } | |
1013 | out: | |
44fbac22 | 1014 | xprt_wake_pending_tasks(xprt, status); |
a246b010 | 1015 | out_clear: |
2226feb6 | 1016 | xprt_clear_connecting(xprt); |
a246b010 CL |
1017 | } |
1018 | ||
9903cd1c CL |
1019 | /** |
1020 | * xs_connect - connect a socket to a remote endpoint | |
1021 | * @task: address of RPC task that manages state of connect request | |
1022 | * | |
1023 | * TCP: If the remote end dropped the connection, delay reconnecting. | |
1024 | */ | |
1025 | static void xs_connect(struct rpc_task *task) | |
a246b010 CL |
1026 | { |
1027 | struct rpc_xprt *xprt = task->tk_xprt; | |
1028 | ||
b0d93ad5 CL |
1029 | if (xprt_test_and_set_connecting(xprt)) |
1030 | return; | |
1031 | ||
1032 | if (xprt->sock != NULL) { | |
1033 | dprintk("RPC: xs_connect delayed xprt %p\n", xprt); | |
1034 | schedule_delayed_work(&xprt->connect_worker, | |
a246b010 | 1035 | RPC_REESTABLISH_TIMEOUT); |
b0d93ad5 CL |
1036 | } else { |
1037 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | |
1038 | schedule_work(&xprt->connect_worker); | |
1039 | ||
1040 | /* flush_scheduled_work can sleep... */ | |
1041 | if (!RPC_IS_ASYNC(task)) | |
1042 | flush_scheduled_work(); | |
a246b010 CL |
1043 | } |
1044 | } | |
1045 | ||
262965f5 | 1046 | static struct rpc_xprt_ops xs_udp_ops = { |
43118c29 | 1047 | .set_buffer_size = xs_udp_set_buffer_size, |
262965f5 CL |
1048 | .connect = xs_connect, |
1049 | .send_request = xs_udp_send_request, | |
1050 | .close = xs_close, | |
1051 | .destroy = xs_destroy, | |
1052 | }; | |
1053 | ||
1054 | static struct rpc_xprt_ops xs_tcp_ops = { | |
43118c29 | 1055 | .set_buffer_size = xs_tcp_set_buffer_size, |
9903cd1c | 1056 | .connect = xs_connect, |
262965f5 | 1057 | .send_request = xs_tcp_send_request, |
9903cd1c CL |
1058 | .close = xs_close, |
1059 | .destroy = xs_destroy, | |
a246b010 CL |
1060 | }; |
1061 | ||
1062 | extern unsigned int xprt_udp_slot_table_entries; | |
1063 | extern unsigned int xprt_tcp_slot_table_entries; | |
1064 | ||
9903cd1c CL |
1065 | /** |
1066 | * xs_setup_udp - Set up transport to use a UDP socket | |
1067 | * @xprt: transport to set up | |
1068 | * @to: timeout parameters | |
1069 | * | |
1070 | */ | |
a246b010 CL |
1071 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1072 | { | |
1073 | size_t slot_table_size; | |
1074 | ||
1075 | dprintk("RPC: setting up udp-ipv4 transport...\n"); | |
1076 | ||
1077 | xprt->max_reqs = xprt_udp_slot_table_entries; | |
1078 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1079 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1080 | if (xprt->slot == NULL) | |
1081 | return -ENOMEM; | |
1082 | memset(xprt->slot, 0, slot_table_size); | |
1083 | ||
1084 | xprt->prot = IPPROTO_UDP; | |
9903cd1c | 1085 | xprt->port = XS_MAX_RESVPORT; |
808012fb | 1086 | xprt->tsh_size = 0; |
a246b010 CL |
1087 | xprt->nocong = 0; |
1088 | xprt->cwnd = RPC_INITCWND; | |
1089 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; | |
1090 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | |
1091 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | |
1092 | ||
b0d93ad5 | 1093 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); |
a246b010 | 1094 | |
262965f5 | 1095 | xprt->ops = &xs_udp_ops; |
a246b010 CL |
1096 | |
1097 | if (to) | |
1098 | xprt->timeout = *to; | |
1099 | else | |
9903cd1c | 1100 | xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); |
a246b010 CL |
1101 | |
1102 | return 0; | |
1103 | } | |
1104 | ||
9903cd1c CL |
1105 | /** |
1106 | * xs_setup_tcp - Set up transport to use a TCP socket | |
1107 | * @xprt: transport to set up | |
1108 | * @to: timeout parameters | |
1109 | * | |
1110 | */ | |
a246b010 CL |
1111 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) |
1112 | { | |
1113 | size_t slot_table_size; | |
1114 | ||
1115 | dprintk("RPC: setting up tcp-ipv4 transport...\n"); | |
1116 | ||
1117 | xprt->max_reqs = xprt_tcp_slot_table_entries; | |
1118 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | |
1119 | xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); | |
1120 | if (xprt->slot == NULL) | |
1121 | return -ENOMEM; | |
1122 | memset(xprt->slot, 0, slot_table_size); | |
1123 | ||
1124 | xprt->prot = IPPROTO_TCP; | |
9903cd1c | 1125 | xprt->port = XS_MAX_RESVPORT; |
808012fb | 1126 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
a246b010 CL |
1127 | xprt->nocong = 1; |
1128 | xprt->cwnd = RPC_MAXCWND(xprt); | |
1129 | xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; | |
808012fb | 1130 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
a246b010 | 1131 | |
b0d93ad5 | 1132 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); |
a246b010 | 1133 | |
262965f5 | 1134 | xprt->ops = &xs_tcp_ops; |
a246b010 CL |
1135 | |
1136 | if (to) | |
1137 | xprt->timeout = *to; | |
1138 | else | |
9903cd1c | 1139 | xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); |
a246b010 CL |
1140 | |
1141 | return 0; | |
1142 | } |