]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/svcsock.c | |
3 | * | |
4 | * These are the RPC server socket internals. | |
5 | * | |
6 | * The server scheduling algorithm does not always distribute the load | |
7 | * evenly when servicing a single client. May need to modify the | |
8 | * svc_sock_enqueue procedure... | |
9 | * | |
10 | * TCP support is largely untested and may be a little slow. The problem | |
11 | * is that we currently do two separate recvfrom's, one for the 4-byte | |
12 | * record length, and the second for the actual record. This could possibly | |
13 | * be improved by always reading a minimum size of around 100 bytes and | |
14 | * tucking any superfluous bytes away in a temporary store. Still, that | |
15 | * leaves write requests out in the rain. An alternative may be to peek at | |
16 | * the first skb in the queue, and if it matches the next TCP sequence | |
17 | * number, to extract the record marker. Yuck. | |
18 | * | |
19 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | |
20 | */ | |
21 | ||
22 | #include <linux/sched.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/fcntl.h> | |
25 | #include <linux/net.h> | |
26 | #include <linux/in.h> | |
27 | #include <linux/inet.h> | |
28 | #include <linux/udp.h> | |
91483c4b | 29 | #include <linux/tcp.h> |
1da177e4 LT |
30 | #include <linux/unistd.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/netdevice.h> | |
33 | #include <linux/skbuff.h> | |
b41b66d6 | 34 | #include <linux/file.h> |
1da177e4 LT |
35 | #include <net/sock.h> |
36 | #include <net/checksum.h> | |
37 | #include <net/ip.h> | |
c752f073 | 38 | #include <net/tcp_states.h> |
1da177e4 LT |
39 | #include <asm/uaccess.h> |
40 | #include <asm/ioctls.h> | |
41 | ||
42 | #include <linux/sunrpc/types.h> | |
43 | #include <linux/sunrpc/xdr.h> | |
44 | #include <linux/sunrpc/svcsock.h> | |
45 | #include <linux/sunrpc/stats.h> | |
46 | ||
47 | /* SMP locking strategy: | |
48 | * | |
49 | * svc_serv->sv_lock protects most stuff for that service. | |
50 | * | |
51 | * Some flags can be set to certain values at any time | |
52 | * providing that certain rules are followed: | |
53 | * | |
54 | * SK_BUSY can be set to 0 at any time. | |
55 | * svc_sock_enqueue must be called afterwards | |
56 | * SK_CONN, SK_DATA, can be set or cleared at any time. | |
57 | * after a set, svc_sock_enqueue must be called. | |
58 | * after a clear, the socket must be read/accepted | |
59 | * if this succeeds, it must be set again. | |
60 | * SK_CLOSE can set at any time. It is never cleared. | |
61 | * | |
62 | */ | |
63 | ||
64 | #define RPCDBG_FACILITY RPCDBG_SVCSOCK | |
65 | ||
66 | ||
67 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, | |
68 | int *errp, int pmap_reg); | |
69 | static void svc_udp_data_ready(struct sock *, int); | |
70 | static int svc_udp_recvfrom(struct svc_rqst *); | |
71 | static int svc_udp_sendto(struct svc_rqst *); | |
72 | ||
73 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); | |
74 | static int svc_deferred_recv(struct svc_rqst *rqstp); | |
75 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | |
76 | ||
77 | /* | |
78 | * Queue up an idle server thread. Must have serv->sv_lock held. | |
79 | * Note: this is really a stack rather than a queue, so that we only | |
80 | * use as many different threads as we need, and the rest don't polute | |
81 | * the cache. | |
82 | */ | |
83 | static inline void | |
84 | svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp) | |
85 | { | |
86 | list_add(&rqstp->rq_list, &serv->sv_threads); | |
87 | } | |
88 | ||
89 | /* | |
90 | * Dequeue an nfsd thread. Must have serv->sv_lock held. | |
91 | */ | |
92 | static inline void | |
93 | svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp) | |
94 | { | |
95 | list_del(&rqstp->rq_list); | |
96 | } | |
97 | ||
98 | /* | |
99 | * Release an skbuff after use | |
100 | */ | |
101 | static inline void | |
102 | svc_release_skb(struct svc_rqst *rqstp) | |
103 | { | |
104 | struct sk_buff *skb = rqstp->rq_skbuff; | |
105 | struct svc_deferred_req *dr = rqstp->rq_deferred; | |
106 | ||
107 | if (skb) { | |
108 | rqstp->rq_skbuff = NULL; | |
109 | ||
110 | dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); | |
111 | skb_free_datagram(rqstp->rq_sock->sk_sk, skb); | |
112 | } | |
113 | if (dr) { | |
114 | rqstp->rq_deferred = NULL; | |
115 | kfree(dr); | |
116 | } | |
117 | } | |
118 | ||
119 | /* | |
120 | * Any space to write? | |
121 | */ | |
122 | static inline unsigned long | |
123 | svc_sock_wspace(struct svc_sock *svsk) | |
124 | { | |
125 | int wspace; | |
126 | ||
127 | if (svsk->sk_sock->type == SOCK_STREAM) | |
128 | wspace = sk_stream_wspace(svsk->sk_sk); | |
129 | else | |
130 | wspace = sock_wspace(svsk->sk_sk); | |
131 | ||
132 | return wspace; | |
133 | } | |
134 | ||
135 | /* | |
136 | * Queue up a socket with data pending. If there are idle nfsd | |
137 | * processes, wake 'em up. | |
138 | * | |
139 | */ | |
140 | static void | |
141 | svc_sock_enqueue(struct svc_sock *svsk) | |
142 | { | |
143 | struct svc_serv *serv = svsk->sk_server; | |
144 | struct svc_rqst *rqstp; | |
145 | ||
146 | if (!(svsk->sk_flags & | |
147 | ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) | |
148 | return; | |
149 | if (test_bit(SK_DEAD, &svsk->sk_flags)) | |
150 | return; | |
151 | ||
152 | spin_lock_bh(&serv->sv_lock); | |
153 | ||
154 | if (!list_empty(&serv->sv_threads) && | |
155 | !list_empty(&serv->sv_sockets)) | |
156 | printk(KERN_ERR | |
157 | "svc_sock_enqueue: threads and sockets both waiting??\n"); | |
158 | ||
159 | if (test_bit(SK_DEAD, &svsk->sk_flags)) { | |
160 | /* Don't enqueue dead sockets */ | |
161 | dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); | |
162 | goto out_unlock; | |
163 | } | |
164 | ||
165 | if (test_bit(SK_BUSY, &svsk->sk_flags)) { | |
166 | /* Don't enqueue socket while daemon is receiving */ | |
167 | dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); | |
168 | goto out_unlock; | |
169 | } | |
170 | ||
171 | set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | |
172 | if (((svsk->sk_reserved + serv->sv_bufsz)*2 | |
173 | > svc_sock_wspace(svsk)) | |
174 | && !test_bit(SK_CLOSE, &svsk->sk_flags) | |
175 | && !test_bit(SK_CONN, &svsk->sk_flags)) { | |
176 | /* Don't enqueue while not enough space for reply */ | |
177 | dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", | |
178 | svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz, | |
179 | svc_sock_wspace(svsk)); | |
180 | goto out_unlock; | |
181 | } | |
182 | clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | |
183 | ||
184 | /* Mark socket as busy. It will remain in this state until the | |
185 | * server has processed all pending data and put the socket back | |
186 | * on the idle list. | |
187 | */ | |
188 | set_bit(SK_BUSY, &svsk->sk_flags); | |
189 | ||
190 | if (!list_empty(&serv->sv_threads)) { | |
191 | rqstp = list_entry(serv->sv_threads.next, | |
192 | struct svc_rqst, | |
193 | rq_list); | |
194 | dprintk("svc: socket %p served by daemon %p\n", | |
195 | svsk->sk_sk, rqstp); | |
196 | svc_serv_dequeue(serv, rqstp); | |
197 | if (rqstp->rq_sock) | |
198 | printk(KERN_ERR | |
199 | "svc_sock_enqueue: server %p, rq_sock=%p!\n", | |
200 | rqstp, rqstp->rq_sock); | |
201 | rqstp->rq_sock = svsk; | |
202 | svsk->sk_inuse++; | |
203 | rqstp->rq_reserved = serv->sv_bufsz; | |
204 | svsk->sk_reserved += rqstp->rq_reserved; | |
205 | wake_up(&rqstp->rq_wait); | |
206 | } else { | |
207 | dprintk("svc: socket %p put into queue\n", svsk->sk_sk); | |
208 | list_add_tail(&svsk->sk_ready, &serv->sv_sockets); | |
209 | } | |
210 | ||
211 | out_unlock: | |
212 | spin_unlock_bh(&serv->sv_lock); | |
213 | } | |
214 | ||
215 | /* | |
216 | * Dequeue the first socket. Must be called with the serv->sv_lock held. | |
217 | */ | |
218 | static inline struct svc_sock * | |
219 | svc_sock_dequeue(struct svc_serv *serv) | |
220 | { | |
221 | struct svc_sock *svsk; | |
222 | ||
223 | if (list_empty(&serv->sv_sockets)) | |
224 | return NULL; | |
225 | ||
226 | svsk = list_entry(serv->sv_sockets.next, | |
227 | struct svc_sock, sk_ready); | |
228 | list_del_init(&svsk->sk_ready); | |
229 | ||
230 | dprintk("svc: socket %p dequeued, inuse=%d\n", | |
231 | svsk->sk_sk, svsk->sk_inuse); | |
232 | ||
233 | return svsk; | |
234 | } | |
235 | ||
236 | /* | |
237 | * Having read something from a socket, check whether it | |
238 | * needs to be re-enqueued. | |
239 | * Note: SK_DATA only gets cleared when a read-attempt finds | |
240 | * no (or insufficient) data. | |
241 | */ | |
242 | static inline void | |
243 | svc_sock_received(struct svc_sock *svsk) | |
244 | { | |
245 | clear_bit(SK_BUSY, &svsk->sk_flags); | |
246 | svc_sock_enqueue(svsk); | |
247 | } | |
248 | ||
249 | ||
250 | /** | |
251 | * svc_reserve - change the space reserved for the reply to a request. | |
252 | * @rqstp: The request in question | |
253 | * @space: new max space to reserve | |
254 | * | |
255 | * Each request reserves some space on the output queue of the socket | |
256 | * to make sure the reply fits. This function reduces that reserved | |
257 | * space to be the amount of space used already, plus @space. | |
258 | * | |
259 | */ | |
260 | void svc_reserve(struct svc_rqst *rqstp, int space) | |
261 | { | |
262 | space += rqstp->rq_res.head[0].iov_len; | |
263 | ||
264 | if (space < rqstp->rq_reserved) { | |
265 | struct svc_sock *svsk = rqstp->rq_sock; | |
266 | spin_lock_bh(&svsk->sk_server->sv_lock); | |
267 | svsk->sk_reserved -= (rqstp->rq_reserved - space); | |
268 | rqstp->rq_reserved = space; | |
269 | spin_unlock_bh(&svsk->sk_server->sv_lock); | |
270 | ||
271 | svc_sock_enqueue(svsk); | |
272 | } | |
273 | } | |
274 | ||
275 | /* | |
276 | * Release a socket after use. | |
277 | */ | |
278 | static inline void | |
279 | svc_sock_put(struct svc_sock *svsk) | |
280 | { | |
281 | struct svc_serv *serv = svsk->sk_server; | |
282 | ||
283 | spin_lock_bh(&serv->sv_lock); | |
284 | if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) { | |
285 | spin_unlock_bh(&serv->sv_lock); | |
286 | dprintk("svc: releasing dead socket\n"); | |
287 | sock_release(svsk->sk_sock); | |
288 | kfree(svsk); | |
289 | } | |
290 | else | |
291 | spin_unlock_bh(&serv->sv_lock); | |
292 | } | |
293 | ||
294 | static void | |
295 | svc_sock_release(struct svc_rqst *rqstp) | |
296 | { | |
297 | struct svc_sock *svsk = rqstp->rq_sock; | |
298 | ||
299 | svc_release_skb(rqstp); | |
300 | ||
301 | svc_free_allpages(rqstp); | |
302 | rqstp->rq_res.page_len = 0; | |
303 | rqstp->rq_res.page_base = 0; | |
304 | ||
305 | ||
306 | /* Reset response buffer and release | |
307 | * the reservation. | |
308 | * But first, check that enough space was reserved | |
309 | * for the reply, otherwise we have a bug! | |
310 | */ | |
311 | if ((rqstp->rq_res.len) > rqstp->rq_reserved) | |
312 | printk(KERN_ERR "RPC request reserved %d but used %d\n", | |
313 | rqstp->rq_reserved, | |
314 | rqstp->rq_res.len); | |
315 | ||
316 | rqstp->rq_res.head[0].iov_len = 0; | |
317 | svc_reserve(rqstp, 0); | |
318 | rqstp->rq_sock = NULL; | |
319 | ||
320 | svc_sock_put(svsk); | |
321 | } | |
322 | ||
323 | /* | |
324 | * External function to wake up a server waiting for data | |
325 | */ | |
326 | void | |
327 | svc_wake_up(struct svc_serv *serv) | |
328 | { | |
329 | struct svc_rqst *rqstp; | |
330 | ||
331 | spin_lock_bh(&serv->sv_lock); | |
332 | if (!list_empty(&serv->sv_threads)) { | |
333 | rqstp = list_entry(serv->sv_threads.next, | |
334 | struct svc_rqst, | |
335 | rq_list); | |
336 | dprintk("svc: daemon %p woken up.\n", rqstp); | |
337 | /* | |
338 | svc_serv_dequeue(serv, rqstp); | |
339 | rqstp->rq_sock = NULL; | |
340 | */ | |
341 | wake_up(&rqstp->rq_wait); | |
342 | } | |
343 | spin_unlock_bh(&serv->sv_lock); | |
344 | } | |
345 | ||
346 | /* | |
347 | * Generic sendto routine | |
348 | */ | |
349 | static int | |
350 | svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) | |
351 | { | |
352 | struct svc_sock *svsk = rqstp->rq_sock; | |
353 | struct socket *sock = svsk->sk_sock; | |
354 | int slen; | |
355 | char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))]; | |
356 | struct cmsghdr *cmh = (struct cmsghdr *)buffer; | |
357 | struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh); | |
358 | int len = 0; | |
359 | int result; | |
360 | int size; | |
361 | struct page **ppage = xdr->pages; | |
362 | size_t base = xdr->page_base; | |
363 | unsigned int pglen = xdr->page_len; | |
364 | unsigned int flags = MSG_MORE; | |
365 | ||
366 | slen = xdr->len; | |
367 | ||
368 | if (rqstp->rq_prot == IPPROTO_UDP) { | |
369 | /* set the source and destination */ | |
370 | struct msghdr msg; | |
371 | msg.msg_name = &rqstp->rq_addr; | |
372 | msg.msg_namelen = sizeof(rqstp->rq_addr); | |
373 | msg.msg_iov = NULL; | |
374 | msg.msg_iovlen = 0; | |
375 | msg.msg_flags = MSG_MORE; | |
376 | ||
377 | msg.msg_control = cmh; | |
378 | msg.msg_controllen = sizeof(buffer); | |
379 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | |
380 | cmh->cmsg_level = SOL_IP; | |
381 | cmh->cmsg_type = IP_PKTINFO; | |
382 | pki->ipi_ifindex = 0; | |
383 | pki->ipi_spec_dst.s_addr = rqstp->rq_daddr; | |
384 | ||
385 | if (sock_sendmsg(sock, &msg, 0) < 0) | |
386 | goto out; | |
387 | } | |
388 | ||
389 | /* send head */ | |
390 | if (slen == xdr->head[0].iov_len) | |
391 | flags = 0; | |
e6242e92 | 392 | len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); |
1da177e4 LT |
393 | if (len != xdr->head[0].iov_len) |
394 | goto out; | |
395 | slen -= xdr->head[0].iov_len; | |
396 | if (slen == 0) | |
397 | goto out; | |
398 | ||
399 | /* send page data */ | |
400 | size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; | |
401 | while (pglen > 0) { | |
402 | if (slen == size) | |
403 | flags = 0; | |
e6242e92 | 404 | result = kernel_sendpage(sock, *ppage, base, size, flags); |
1da177e4 LT |
405 | if (result > 0) |
406 | len += result; | |
407 | if (result != size) | |
408 | goto out; | |
409 | slen -= size; | |
410 | pglen -= size; | |
411 | size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; | |
412 | base = 0; | |
413 | ppage++; | |
414 | } | |
415 | /* send tail */ | |
416 | if (xdr->tail[0].iov_len) { | |
e6242e92 | 417 | result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], |
1da177e4 LT |
418 | ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), |
419 | xdr->tail[0].iov_len, 0); | |
420 | ||
421 | if (result > 0) | |
422 | len += result; | |
423 | } | |
424 | out: | |
425 | dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n", | |
426 | rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len, | |
427 | rqstp->rq_addr.sin_addr.s_addr); | |
428 | ||
429 | return len; | |
430 | } | |
431 | ||
80212d59 N |
432 | /* |
433 | * Report socket names for nfsdfs | |
434 | */ | |
435 | static int one_sock_name(char *buf, struct svc_sock *svsk) | |
436 | { | |
437 | int len; | |
438 | ||
439 | switch(svsk->sk_sk->sk_family) { | |
440 | case AF_INET: | |
441 | len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n", | |
442 | svsk->sk_sk->sk_protocol==IPPROTO_UDP? | |
443 | "udp" : "tcp", | |
444 | NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr), | |
445 | inet_sk(svsk->sk_sk)->num); | |
446 | break; | |
447 | default: | |
448 | len = sprintf(buf, "*unknown-%d*\n", | |
449 | svsk->sk_sk->sk_family); | |
450 | } | |
451 | return len; | |
452 | } | |
453 | ||
454 | int | |
b41b66d6 | 455 | svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) |
80212d59 | 456 | { |
b41b66d6 | 457 | struct svc_sock *svsk, *closesk = NULL; |
80212d59 N |
458 | int len = 0; |
459 | ||
460 | if (!serv) | |
461 | return 0; | |
462 | spin_lock(&serv->sv_lock); | |
463 | list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { | |
464 | int onelen = one_sock_name(buf+len, svsk); | |
b41b66d6 N |
465 | if (toclose && strcmp(toclose, buf+len) == 0) |
466 | closesk = svsk; | |
467 | else | |
468 | len += onelen; | |
80212d59 N |
469 | } |
470 | spin_unlock(&serv->sv_lock); | |
b41b66d6 N |
471 | if (closesk) |
472 | svc_delete_socket(closesk); | |
80212d59 N |
473 | return len; |
474 | } | |
475 | EXPORT_SYMBOL(svc_sock_names); | |
476 | ||
1da177e4 LT |
477 | /* |
478 | * Check input queue length | |
479 | */ | |
480 | static int | |
481 | svc_recv_available(struct svc_sock *svsk) | |
482 | { | |
1da177e4 LT |
483 | struct socket *sock = svsk->sk_sock; |
484 | int avail, err; | |
485 | ||
e6242e92 | 486 | err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); |
1da177e4 LT |
487 | |
488 | return (err >= 0)? avail : err; | |
489 | } | |
490 | ||
491 | /* | |
492 | * Generic recvfrom routine. | |
493 | */ | |
494 | static int | |
495 | svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) | |
496 | { | |
497 | struct msghdr msg; | |
498 | struct socket *sock; | |
499 | int len, alen; | |
500 | ||
501 | rqstp->rq_addrlen = sizeof(rqstp->rq_addr); | |
502 | sock = rqstp->rq_sock->sk_sock; | |
503 | ||
504 | msg.msg_name = &rqstp->rq_addr; | |
505 | msg.msg_namelen = sizeof(rqstp->rq_addr); | |
506 | msg.msg_control = NULL; | |
507 | msg.msg_controllen = 0; | |
508 | ||
509 | msg.msg_flags = MSG_DONTWAIT; | |
510 | ||
511 | len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT); | |
512 | ||
513 | /* sock_recvmsg doesn't fill in the name/namelen, so we must.. | |
514 | * possibly we should cache this in the svc_sock structure | |
515 | * at accept time. FIXME | |
516 | */ | |
517 | alen = sizeof(rqstp->rq_addr); | |
e6242e92 | 518 | kernel_getpeername(sock, (struct sockaddr *)&rqstp->rq_addr, &alen); |
1da177e4 LT |
519 | |
520 | dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", | |
521 | rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len); | |
522 | ||
523 | return len; | |
524 | } | |
525 | ||
526 | /* | |
527 | * Set socket snd and rcv buffer lengths | |
528 | */ | |
529 | static inline void | |
530 | svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) | |
531 | { | |
532 | #if 0 | |
533 | mm_segment_t oldfs; | |
534 | oldfs = get_fs(); set_fs(KERNEL_DS); | |
535 | sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, | |
536 | (char*)&snd, sizeof(snd)); | |
537 | sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, | |
538 | (char*)&rcv, sizeof(rcv)); | |
539 | #else | |
540 | /* sock_setsockopt limits use to sysctl_?mem_max, | |
541 | * which isn't acceptable. Until that is made conditional | |
542 | * on not having CAP_SYS_RESOURCE or similar, we go direct... | |
543 | * DaveM said I could! | |
544 | */ | |
545 | lock_sock(sock->sk); | |
546 | sock->sk->sk_sndbuf = snd * 2; | |
547 | sock->sk->sk_rcvbuf = rcv * 2; | |
548 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; | |
549 | release_sock(sock->sk); | |
550 | #endif | |
551 | } | |
552 | /* | |
553 | * INET callback when data has been received on the socket. | |
554 | */ | |
555 | static void | |
556 | svc_udp_data_ready(struct sock *sk, int count) | |
557 | { | |
939bb7ef | 558 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 | 559 | |
939bb7ef NB |
560 | if (svsk) { |
561 | dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", | |
562 | svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); | |
563 | set_bit(SK_DATA, &svsk->sk_flags); | |
564 | svc_sock_enqueue(svsk); | |
565 | } | |
1da177e4 LT |
566 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
567 | wake_up_interruptible(sk->sk_sleep); | |
568 | } | |
569 | ||
570 | /* | |
571 | * INET callback when space is newly available on the socket. | |
572 | */ | |
573 | static void | |
574 | svc_write_space(struct sock *sk) | |
575 | { | |
576 | struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); | |
577 | ||
578 | if (svsk) { | |
579 | dprintk("svc: socket %p(inet %p), write_space busy=%d\n", | |
580 | svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); | |
581 | svc_sock_enqueue(svsk); | |
582 | } | |
583 | ||
584 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { | |
939bb7ef | 585 | dprintk("RPC svc_write_space: someone sleeping on %p\n", |
1da177e4 LT |
586 | svsk); |
587 | wake_up_interruptible(sk->sk_sleep); | |
588 | } | |
589 | } | |
590 | ||
591 | /* | |
592 | * Receive a datagram from a UDP socket. | |
593 | */ | |
1da177e4 LT |
594 | static int |
595 | svc_udp_recvfrom(struct svc_rqst *rqstp) | |
596 | { | |
597 | struct svc_sock *svsk = rqstp->rq_sock; | |
598 | struct svc_serv *serv = svsk->sk_server; | |
599 | struct sk_buff *skb; | |
600 | int err, len; | |
601 | ||
602 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | |
603 | /* udp sockets need large rcvbuf as all pending | |
604 | * requests are still in that buffer. sndbuf must | |
605 | * also be large enough that there is enough space | |
606 | * for one reply per thread. | |
607 | */ | |
608 | svc_sock_setbufsize(svsk->sk_sock, | |
609 | (serv->sv_nrthreads+3) * serv->sv_bufsz, | |
610 | (serv->sv_nrthreads+3) * serv->sv_bufsz); | |
611 | ||
612 | if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { | |
613 | svc_sock_received(svsk); | |
614 | return svc_deferred_recv(rqstp); | |
615 | } | |
616 | ||
617 | clear_bit(SK_DATA, &svsk->sk_flags); | |
618 | while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | |
619 | if (err == -EAGAIN) { | |
620 | svc_sock_received(svsk); | |
621 | return err; | |
622 | } | |
623 | /* possibly an icmp error */ | |
624 | dprintk("svc: recvfrom returned error %d\n", -err); | |
625 | } | |
a61bbcf2 PM |
626 | if (skb->tstamp.off_sec == 0) { |
627 | struct timeval tv; | |
628 | ||
629 | tv.tv_sec = xtime.tv_sec; | |
4bcde03d | 630 | tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; |
a61bbcf2 | 631 | skb_set_timestamp(skb, &tv); |
1da177e4 LT |
632 | /* Don't enable netstamp, sunrpc doesn't |
633 | need that much accuracy */ | |
634 | } | |
a61bbcf2 | 635 | skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); |
1da177e4 LT |
636 | set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ |
637 | ||
638 | /* | |
639 | * Maybe more packets - kick another thread ASAP. | |
640 | */ | |
641 | svc_sock_received(svsk); | |
642 | ||
643 | len = skb->len - sizeof(struct udphdr); | |
644 | rqstp->rq_arg.len = len; | |
645 | ||
646 | rqstp->rq_prot = IPPROTO_UDP; | |
647 | ||
648 | /* Get sender address */ | |
649 | rqstp->rq_addr.sin_family = AF_INET; | |
650 | rqstp->rq_addr.sin_port = skb->h.uh->source; | |
651 | rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr; | |
652 | rqstp->rq_daddr = skb->nh.iph->daddr; | |
653 | ||
654 | if (skb_is_nonlinear(skb)) { | |
655 | /* we have to copy */ | |
656 | local_bh_disable(); | |
657 | if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { | |
658 | local_bh_enable(); | |
659 | /* checksum error */ | |
660 | skb_free_datagram(svsk->sk_sk, skb); | |
661 | return 0; | |
662 | } | |
663 | local_bh_enable(); | |
664 | skb_free_datagram(svsk->sk_sk, skb); | |
665 | } else { | |
666 | /* we can use it in-place */ | |
667 | rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); | |
668 | rqstp->rq_arg.head[0].iov_len = len; | |
fb286bb2 HX |
669 | if (skb_checksum_complete(skb)) { |
670 | skb_free_datagram(svsk->sk_sk, skb); | |
671 | return 0; | |
1da177e4 LT |
672 | } |
673 | rqstp->rq_skbuff = skb; | |
674 | } | |
675 | ||
676 | rqstp->rq_arg.page_base = 0; | |
677 | if (len <= rqstp->rq_arg.head[0].iov_len) { | |
678 | rqstp->rq_arg.head[0].iov_len = len; | |
679 | rqstp->rq_arg.page_len = 0; | |
680 | } else { | |
681 | rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; | |
682 | rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; | |
683 | } | |
684 | ||
685 | if (serv->sv_stats) | |
686 | serv->sv_stats->netudpcnt++; | |
687 | ||
688 | return len; | |
689 | } | |
690 | ||
691 | static int | |
692 | svc_udp_sendto(struct svc_rqst *rqstp) | |
693 | { | |
694 | int error; | |
695 | ||
696 | error = svc_sendto(rqstp, &rqstp->rq_res); | |
697 | if (error == -ECONNREFUSED) | |
698 | /* ICMP error on earlier request. */ | |
699 | error = svc_sendto(rqstp, &rqstp->rq_res); | |
700 | ||
701 | return error; | |
702 | } | |
703 | ||
704 | static void | |
705 | svc_udp_init(struct svc_sock *svsk) | |
706 | { | |
707 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | |
708 | svsk->sk_sk->sk_write_space = svc_write_space; | |
709 | svsk->sk_recvfrom = svc_udp_recvfrom; | |
710 | svsk->sk_sendto = svc_udp_sendto; | |
711 | ||
712 | /* initialise setting must have enough space to | |
713 | * receive and respond to one request. | |
714 | * svc_udp_recvfrom will re-adjust if necessary | |
715 | */ | |
716 | svc_sock_setbufsize(svsk->sk_sock, | |
717 | 3 * svsk->sk_server->sv_bufsz, | |
718 | 3 * svsk->sk_server->sv_bufsz); | |
719 | ||
720 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ | |
721 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
722 | } | |
723 | ||
724 | /* | |
725 | * A data_ready event on a listening socket means there's a connection | |
726 | * pending. Do not use state_change as a substitute for it. | |
727 | */ | |
728 | static void | |
729 | svc_tcp_listen_data_ready(struct sock *sk, int count_unused) | |
730 | { | |
939bb7ef | 731 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 LT |
732 | |
733 | dprintk("svc: socket %p TCP (listen) state change %d\n", | |
939bb7ef | 734 | sk, sk->sk_state); |
1da177e4 | 735 | |
939bb7ef NB |
736 | /* |
737 | * This callback may called twice when a new connection | |
738 | * is established as a child socket inherits everything | |
739 | * from a parent LISTEN socket. | |
740 | * 1) data_ready method of the parent socket will be called | |
741 | * when one of child sockets become ESTABLISHED. | |
742 | * 2) data_ready method of the child socket may be called | |
743 | * when it receives data before the socket is accepted. | |
744 | * In case of 2, we should ignore it silently. | |
745 | */ | |
746 | if (sk->sk_state == TCP_LISTEN) { | |
747 | if (svsk) { | |
748 | set_bit(SK_CONN, &svsk->sk_flags); | |
749 | svc_sock_enqueue(svsk); | |
750 | } else | |
751 | printk("svc: socket %p: no user data\n", sk); | |
1da177e4 | 752 | } |
939bb7ef | 753 | |
1da177e4 LT |
754 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
755 | wake_up_interruptible_all(sk->sk_sleep); | |
756 | } | |
757 | ||
758 | /* | |
759 | * A state change on a connected socket means it's dying or dead. | |
760 | */ | |
761 | static void | |
762 | svc_tcp_state_change(struct sock *sk) | |
763 | { | |
939bb7ef | 764 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 LT |
765 | |
766 | dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", | |
939bb7ef | 767 | sk, sk->sk_state, sk->sk_user_data); |
1da177e4 | 768 | |
939bb7ef | 769 | if (!svsk) |
1da177e4 | 770 | printk("svc: socket %p: no user data\n", sk); |
939bb7ef NB |
771 | else { |
772 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
773 | svc_sock_enqueue(svsk); | |
1da177e4 | 774 | } |
1da177e4 LT |
775 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
776 | wake_up_interruptible_all(sk->sk_sleep); | |
777 | } | |
778 | ||
779 | static void | |
780 | svc_tcp_data_ready(struct sock *sk, int count) | |
781 | { | |
939bb7ef | 782 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 LT |
783 | |
784 | dprintk("svc: socket %p TCP data ready (svsk %p)\n", | |
939bb7ef NB |
785 | sk, sk->sk_user_data); |
786 | if (svsk) { | |
787 | set_bit(SK_DATA, &svsk->sk_flags); | |
788 | svc_sock_enqueue(svsk); | |
789 | } | |
1da177e4 LT |
790 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
791 | wake_up_interruptible(sk->sk_sleep); | |
792 | } | |
793 | ||
794 | /* | |
795 | * Accept a TCP connection | |
796 | */ | |
797 | static void | |
798 | svc_tcp_accept(struct svc_sock *svsk) | |
799 | { | |
800 | struct sockaddr_in sin; | |
801 | struct svc_serv *serv = svsk->sk_server; | |
802 | struct socket *sock = svsk->sk_sock; | |
803 | struct socket *newsock; | |
1da177e4 LT |
804 | struct svc_sock *newsvsk; |
805 | int err, slen; | |
806 | ||
807 | dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); | |
808 | if (!sock) | |
809 | return; | |
810 | ||
e6242e92 SS |
811 | clear_bit(SK_CONN, &svsk->sk_flags); |
812 | err = kernel_accept(sock, &newsock, O_NONBLOCK); | |
813 | if (err < 0) { | |
1da177e4 LT |
814 | if (err == -ENOMEM) |
815 | printk(KERN_WARNING "%s: no more sockets!\n", | |
816 | serv->sv_name); | |
e6242e92 | 817 | else if (err != -EAGAIN && net_ratelimit()) |
1da177e4 LT |
818 | printk(KERN_WARNING "%s: accept failed (err %d)!\n", |
819 | serv->sv_name, -err); | |
e6242e92 | 820 | return; |
1da177e4 | 821 | } |
e6242e92 | 822 | |
1da177e4 LT |
823 | set_bit(SK_CONN, &svsk->sk_flags); |
824 | svc_sock_enqueue(svsk); | |
825 | ||
826 | slen = sizeof(sin); | |
e6242e92 | 827 | err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen); |
1da177e4 LT |
828 | if (err < 0) { |
829 | if (net_ratelimit()) | |
830 | printk(KERN_WARNING "%s: peername failed (err %d)!\n", | |
831 | serv->sv_name, -err); | |
832 | goto failed; /* aborted connection or whatever */ | |
833 | } | |
834 | ||
835 | /* Ideally, we would want to reject connections from unauthorized | |
836 | * hosts here, but when we get encription, the IP of the host won't | |
837 | * tell us anything. For now just warn about unpriv connections. | |
838 | */ | |
839 | if (ntohs(sin.sin_port) >= 1024) { | |
840 | dprintk(KERN_WARNING | |
841 | "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", | |
842 | serv->sv_name, | |
843 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | |
844 | } | |
845 | ||
846 | dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name, | |
847 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | |
848 | ||
849 | /* make sure that a write doesn't block forever when | |
850 | * low on memory | |
851 | */ | |
852 | newsock->sk->sk_sndtimeo = HZ*30; | |
853 | ||
854 | if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0))) | |
855 | goto failed; | |
856 | ||
857 | ||
858 | /* make sure that we don't have too many active connections. | |
859 | * If we have, something must be dropped. | |
860 | * | |
861 | * There's no point in trying to do random drop here for | |
862 | * DoS prevention. The NFS clients does 1 reconnect in 15 | |
863 | * seconds. An attacker can easily beat that. | |
864 | * | |
865 | * The only somewhat efficient mechanism would be if drop | |
866 | * old connections from the same IP first. But right now | |
867 | * we don't even record the client IP in svc_sock. | |
868 | */ | |
869 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | |
870 | struct svc_sock *svsk = NULL; | |
871 | spin_lock_bh(&serv->sv_lock); | |
872 | if (!list_empty(&serv->sv_tempsocks)) { | |
873 | if (net_ratelimit()) { | |
874 | /* Try to help the admin */ | |
875 | printk(KERN_NOTICE "%s: too many open TCP " | |
876 | "sockets, consider increasing the " | |
877 | "number of nfsd threads\n", | |
878 | serv->sv_name); | |
879 | printk(KERN_NOTICE "%s: last TCP connect from " | |
880 | "%u.%u.%u.%u:%d\n", | |
881 | serv->sv_name, | |
882 | NIPQUAD(sin.sin_addr.s_addr), | |
883 | ntohs(sin.sin_port)); | |
884 | } | |
885 | /* | |
886 | * Always select the oldest socket. It's not fair, | |
887 | * but so is life | |
888 | */ | |
889 | svsk = list_entry(serv->sv_tempsocks.prev, | |
890 | struct svc_sock, | |
891 | sk_list); | |
892 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
893 | svsk->sk_inuse ++; | |
894 | } | |
895 | spin_unlock_bh(&serv->sv_lock); | |
896 | ||
897 | if (svsk) { | |
898 | svc_sock_enqueue(svsk); | |
899 | svc_sock_put(svsk); | |
900 | } | |
901 | ||
902 | } | |
903 | ||
904 | if (serv->sv_stats) | |
905 | serv->sv_stats->nettcpconn++; | |
906 | ||
907 | return; | |
908 | ||
909 | failed: | |
910 | sock_release(newsock); | |
911 | return; | |
912 | } | |
913 | ||
914 | /* | |
915 | * Receive data from a TCP socket. | |
916 | */ | |
917 | static int | |
918 | svc_tcp_recvfrom(struct svc_rqst *rqstp) | |
919 | { | |
920 | struct svc_sock *svsk = rqstp->rq_sock; | |
921 | struct svc_serv *serv = svsk->sk_server; | |
922 | int len; | |
923 | struct kvec vec[RPCSVC_MAXPAGES]; | |
924 | int pnum, vlen; | |
925 | ||
926 | dprintk("svc: tcp_recv %p data %d conn %d close %d\n", | |
927 | svsk, test_bit(SK_DATA, &svsk->sk_flags), | |
928 | test_bit(SK_CONN, &svsk->sk_flags), | |
929 | test_bit(SK_CLOSE, &svsk->sk_flags)); | |
930 | ||
931 | if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { | |
932 | svc_sock_received(svsk); | |
933 | return svc_deferred_recv(rqstp); | |
934 | } | |
935 | ||
936 | if (test_bit(SK_CLOSE, &svsk->sk_flags)) { | |
937 | svc_delete_socket(svsk); | |
938 | return 0; | |
939 | } | |
940 | ||
941 | if (test_bit(SK_CONN, &svsk->sk_flags)) { | |
942 | svc_tcp_accept(svsk); | |
943 | svc_sock_received(svsk); | |
944 | return 0; | |
945 | } | |
946 | ||
947 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | |
948 | /* sndbuf needs to have room for one request | |
949 | * per thread, otherwise we can stall even when the | |
950 | * network isn't a bottleneck. | |
951 | * rcvbuf just needs to be able to hold a few requests. | |
952 | * Normally they will be removed from the queue | |
953 | * as soon a a complete request arrives. | |
954 | */ | |
955 | svc_sock_setbufsize(svsk->sk_sock, | |
956 | (serv->sv_nrthreads+3) * serv->sv_bufsz, | |
957 | 3 * serv->sv_bufsz); | |
958 | ||
959 | clear_bit(SK_DATA, &svsk->sk_flags); | |
960 | ||
961 | /* Receive data. If we haven't got the record length yet, get | |
962 | * the next four bytes. Otherwise try to gobble up as much as | |
963 | * possible up to the complete record length. | |
964 | */ | |
965 | if (svsk->sk_tcplen < 4) { | |
966 | unsigned long want = 4 - svsk->sk_tcplen; | |
967 | struct kvec iov; | |
968 | ||
969 | iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; | |
970 | iov.iov_len = want; | |
971 | if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) | |
972 | goto error; | |
973 | svsk->sk_tcplen += len; | |
974 | ||
975 | if (len < want) { | |
976 | dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", | |
977 | len, want); | |
978 | svc_sock_received(svsk); | |
979 | return -EAGAIN; /* record header not complete */ | |
980 | } | |
981 | ||
982 | svsk->sk_reclen = ntohl(svsk->sk_reclen); | |
983 | if (!(svsk->sk_reclen & 0x80000000)) { | |
984 | /* FIXME: technically, a record can be fragmented, | |
985 | * and non-terminal fragments will not have the top | |
986 | * bit set in the fragment length header. | |
987 | * But apparently no known nfs clients send fragmented | |
988 | * records. */ | |
989 | printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n", | |
990 | (unsigned long) svsk->sk_reclen); | |
991 | goto err_delete; | |
992 | } | |
993 | svsk->sk_reclen &= 0x7fffffff; | |
994 | dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); | |
995 | if (svsk->sk_reclen > serv->sv_bufsz) { | |
996 | printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n", | |
997 | (unsigned long) svsk->sk_reclen); | |
998 | goto err_delete; | |
999 | } | |
1000 | } | |
1001 | ||
1002 | /* Check whether enough data is available */ | |
1003 | len = svc_recv_available(svsk); | |
1004 | if (len < 0) | |
1005 | goto error; | |
1006 | ||
1007 | if (len < svsk->sk_reclen) { | |
1008 | dprintk("svc: incomplete TCP record (%d of %d)\n", | |
1009 | len, svsk->sk_reclen); | |
1010 | svc_sock_received(svsk); | |
1011 | return -EAGAIN; /* record not complete */ | |
1012 | } | |
1013 | len = svsk->sk_reclen; | |
1014 | set_bit(SK_DATA, &svsk->sk_flags); | |
1015 | ||
1016 | vec[0] = rqstp->rq_arg.head[0]; | |
1017 | vlen = PAGE_SIZE; | |
1018 | pnum = 1; | |
1019 | while (vlen < len) { | |
1020 | vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]); | |
1021 | vec[pnum].iov_len = PAGE_SIZE; | |
1022 | pnum++; | |
1023 | vlen += PAGE_SIZE; | |
1024 | } | |
1025 | ||
1026 | /* Now receive data */ | |
1027 | len = svc_recvfrom(rqstp, vec, pnum, len); | |
1028 | if (len < 0) | |
1029 | goto error; | |
1030 | ||
1031 | dprintk("svc: TCP complete record (%d bytes)\n", len); | |
1032 | rqstp->rq_arg.len = len; | |
1033 | rqstp->rq_arg.page_base = 0; | |
1034 | if (len <= rqstp->rq_arg.head[0].iov_len) { | |
1035 | rqstp->rq_arg.head[0].iov_len = len; | |
1036 | rqstp->rq_arg.page_len = 0; | |
1037 | } else { | |
1038 | rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; | |
1039 | } | |
1040 | ||
1041 | rqstp->rq_skbuff = NULL; | |
1042 | rqstp->rq_prot = IPPROTO_TCP; | |
1043 | ||
1044 | /* Reset TCP read info */ | |
1045 | svsk->sk_reclen = 0; | |
1046 | svsk->sk_tcplen = 0; | |
1047 | ||
1048 | svc_sock_received(svsk); | |
1049 | if (serv->sv_stats) | |
1050 | serv->sv_stats->nettcpcnt++; | |
1051 | ||
1052 | return len; | |
1053 | ||
1054 | err_delete: | |
1055 | svc_delete_socket(svsk); | |
1056 | return -EAGAIN; | |
1057 | ||
1058 | error: | |
1059 | if (len == -EAGAIN) { | |
1060 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | |
1061 | svc_sock_received(svsk); | |
1062 | } else { | |
1063 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", | |
1064 | svsk->sk_server->sv_name, -len); | |
93fbf1a5 | 1065 | goto err_delete; |
1da177e4 LT |
1066 | } |
1067 | ||
1068 | return len; | |
1069 | } | |
1070 | ||
1071 | /* | |
1072 | * Send out data on TCP socket. | |
1073 | */ | |
1074 | static int | |
1075 | svc_tcp_sendto(struct svc_rqst *rqstp) | |
1076 | { | |
1077 | struct xdr_buf *xbufp = &rqstp->rq_res; | |
1078 | int sent; | |
d8ed029d | 1079 | __be32 reclen; |
1da177e4 LT |
1080 | |
1081 | /* Set up the first element of the reply kvec. | |
1082 | * Any other kvecs that may be in use have been taken | |
1083 | * care of by the server implementation itself. | |
1084 | */ | |
1085 | reclen = htonl(0x80000000|((xbufp->len ) - 4)); | |
1086 | memcpy(xbufp->head[0].iov_base, &reclen, 4); | |
1087 | ||
1088 | if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) | |
1089 | return -ENOTCONN; | |
1090 | ||
1091 | sent = svc_sendto(rqstp, &rqstp->rq_res); | |
1092 | if (sent != xbufp->len) { | |
1093 | printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", | |
1094 | rqstp->rq_sock->sk_server->sv_name, | |
1095 | (sent<0)?"got error":"sent only", | |
1096 | sent, xbufp->len); | |
1097 | svc_delete_socket(rqstp->rq_sock); | |
1098 | sent = -EAGAIN; | |
1099 | } | |
1100 | return sent; | |
1101 | } | |
1102 | ||
1103 | static void | |
1104 | svc_tcp_init(struct svc_sock *svsk) | |
1105 | { | |
1106 | struct sock *sk = svsk->sk_sk; | |
1107 | struct tcp_sock *tp = tcp_sk(sk); | |
1108 | ||
1109 | svsk->sk_recvfrom = svc_tcp_recvfrom; | |
1110 | svsk->sk_sendto = svc_tcp_sendto; | |
1111 | ||
1112 | if (sk->sk_state == TCP_LISTEN) { | |
1113 | dprintk("setting up TCP socket for listening\n"); | |
1114 | sk->sk_data_ready = svc_tcp_listen_data_ready; | |
1115 | set_bit(SK_CONN, &svsk->sk_flags); | |
1116 | } else { | |
1117 | dprintk("setting up TCP socket for reading\n"); | |
1118 | sk->sk_state_change = svc_tcp_state_change; | |
1119 | sk->sk_data_ready = svc_tcp_data_ready; | |
1120 | sk->sk_write_space = svc_write_space; | |
1121 | ||
1122 | svsk->sk_reclen = 0; | |
1123 | svsk->sk_tcplen = 0; | |
1124 | ||
1125 | tp->nonagle = 1; /* disable Nagle's algorithm */ | |
1126 | ||
1127 | /* initialise setting must have enough space to | |
1128 | * receive and respond to one request. | |
1129 | * svc_tcp_recvfrom will re-adjust if necessary | |
1130 | */ | |
1131 | svc_sock_setbufsize(svsk->sk_sock, | |
1132 | 3 * svsk->sk_server->sv_bufsz, | |
1133 | 3 * svsk->sk_server->sv_bufsz); | |
1134 | ||
1135 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
1136 | set_bit(SK_DATA, &svsk->sk_flags); | |
1137 | if (sk->sk_state != TCP_ESTABLISHED) | |
1138 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
1139 | } | |
1140 | } | |
1141 | ||
1142 | void | |
1143 | svc_sock_update_bufs(struct svc_serv *serv) | |
1144 | { | |
1145 | /* | |
1146 | * The number of server threads has changed. Update | |
1147 | * rcvbuf and sndbuf accordingly on all sockets | |
1148 | */ | |
1149 | struct list_head *le; | |
1150 | ||
1151 | spin_lock_bh(&serv->sv_lock); | |
1152 | list_for_each(le, &serv->sv_permsocks) { | |
1153 | struct svc_sock *svsk = | |
1154 | list_entry(le, struct svc_sock, sk_list); | |
1155 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
1156 | } | |
1157 | list_for_each(le, &serv->sv_tempsocks) { | |
1158 | struct svc_sock *svsk = | |
1159 | list_entry(le, struct svc_sock, sk_list); | |
1160 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
1161 | } | |
1162 | spin_unlock_bh(&serv->sv_lock); | |
1163 | } | |
1164 | ||
1165 | /* | |
1166 | * Receive the next request on any socket. | |
1167 | */ | |
1168 | int | |
1169 | svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout) | |
1170 | { | |
1171 | struct svc_sock *svsk =NULL; | |
1172 | int len; | |
1173 | int pages; | |
1174 | struct xdr_buf *arg; | |
1175 | DECLARE_WAITQUEUE(wait, current); | |
1176 | ||
1177 | dprintk("svc: server %p waiting for data (to = %ld)\n", | |
1178 | rqstp, timeout); | |
1179 | ||
1180 | if (rqstp->rq_sock) | |
1181 | printk(KERN_ERR | |
1182 | "svc_recv: service %p, socket not NULL!\n", | |
1183 | rqstp); | |
1184 | if (waitqueue_active(&rqstp->rq_wait)) | |
1185 | printk(KERN_ERR | |
1186 | "svc_recv: service %p, wait queue active!\n", | |
1187 | rqstp); | |
1188 | ||
1189 | /* Initialize the buffers */ | |
1190 | /* first reclaim pages that were moved to response list */ | |
1191 | svc_pushback_allpages(rqstp); | |
1192 | ||
1193 | /* now allocate needed pages. If we get a failure, sleep briefly */ | |
1194 | pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; | |
1195 | while (rqstp->rq_arghi < pages) { | |
1196 | struct page *p = alloc_page(GFP_KERNEL); | |
1197 | if (!p) { | |
121caf57 | 1198 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); |
1da177e4 LT |
1199 | continue; |
1200 | } | |
1201 | rqstp->rq_argpages[rqstp->rq_arghi++] = p; | |
1202 | } | |
1203 | ||
1204 | /* Make arg->head point to first page and arg->pages point to rest */ | |
1205 | arg = &rqstp->rq_arg; | |
1206 | arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]); | |
1207 | arg->head[0].iov_len = PAGE_SIZE; | |
1208 | rqstp->rq_argused = 1; | |
1209 | arg->pages = rqstp->rq_argpages + 1; | |
1210 | arg->page_base = 0; | |
1211 | /* save at least one page for response */ | |
1212 | arg->page_len = (pages-2)*PAGE_SIZE; | |
1213 | arg->len = (pages-1)*PAGE_SIZE; | |
1214 | arg->tail[0].iov_len = 0; | |
3e1d1d28 CL |
1215 | |
1216 | try_to_freeze(); | |
1887b935 | 1217 | cond_resched(); |
1da177e4 LT |
1218 | if (signalled()) |
1219 | return -EINTR; | |
1220 | ||
1221 | spin_lock_bh(&serv->sv_lock); | |
1222 | if (!list_empty(&serv->sv_tempsocks)) { | |
1223 | svsk = list_entry(serv->sv_tempsocks.next, | |
1224 | struct svc_sock, sk_list); | |
1225 | /* apparently the "standard" is that clients close | |
1226 | * idle connections after 5 minutes, servers after | |
1227 | * 6 minutes | |
1228 | * http://www.connectathon.org/talks96/nfstcp.pdf | |
1229 | */ | |
1230 | if (get_seconds() - svsk->sk_lastrecv < 6*60 | |
1231 | || test_bit(SK_BUSY, &svsk->sk_flags)) | |
1232 | svsk = NULL; | |
1233 | } | |
1234 | if (svsk) { | |
1235 | set_bit(SK_BUSY, &svsk->sk_flags); | |
1236 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
1237 | rqstp->rq_sock = svsk; | |
1238 | svsk->sk_inuse++; | |
1239 | } else if ((svsk = svc_sock_dequeue(serv)) != NULL) { | |
1240 | rqstp->rq_sock = svsk; | |
1241 | svsk->sk_inuse++; | |
1242 | rqstp->rq_reserved = serv->sv_bufsz; | |
1243 | svsk->sk_reserved += rqstp->rq_reserved; | |
1244 | } else { | |
1245 | /* No data pending. Go to sleep */ | |
1246 | svc_serv_enqueue(serv, rqstp); | |
1247 | ||
1248 | /* | |
1249 | * We have to be able to interrupt this wait | |
1250 | * to bring down the daemons ... | |
1251 | */ | |
1252 | set_current_state(TASK_INTERRUPTIBLE); | |
1253 | add_wait_queue(&rqstp->rq_wait, &wait); | |
1254 | spin_unlock_bh(&serv->sv_lock); | |
1255 | ||
1256 | schedule_timeout(timeout); | |
1257 | ||
3e1d1d28 | 1258 | try_to_freeze(); |
1da177e4 LT |
1259 | |
1260 | spin_lock_bh(&serv->sv_lock); | |
1261 | remove_wait_queue(&rqstp->rq_wait, &wait); | |
1262 | ||
1263 | if (!(svsk = rqstp->rq_sock)) { | |
1264 | svc_serv_dequeue(serv, rqstp); | |
1265 | spin_unlock_bh(&serv->sv_lock); | |
1266 | dprintk("svc: server %p, no data yet\n", rqstp); | |
1267 | return signalled()? -EINTR : -EAGAIN; | |
1268 | } | |
1269 | } | |
1270 | spin_unlock_bh(&serv->sv_lock); | |
1271 | ||
1272 | dprintk("svc: server %p, socket %p, inuse=%d\n", | |
1273 | rqstp, svsk, svsk->sk_inuse); | |
1274 | len = svsk->sk_recvfrom(rqstp); | |
1275 | dprintk("svc: got len=%d\n", len); | |
1276 | ||
1277 | /* No data, incomplete (TCP) read, or accept() */ | |
1278 | if (len == 0 || len == -EAGAIN) { | |
1279 | rqstp->rq_res.len = 0; | |
1280 | svc_sock_release(rqstp); | |
1281 | return -EAGAIN; | |
1282 | } | |
1283 | svsk->sk_lastrecv = get_seconds(); | |
1284 | if (test_bit(SK_TEMP, &svsk->sk_flags)) { | |
1285 | /* push active sockets to end of list */ | |
1286 | spin_lock_bh(&serv->sv_lock); | |
1287 | if (!list_empty(&svsk->sk_list)) | |
1288 | list_move_tail(&svsk->sk_list, &serv->sv_tempsocks); | |
1289 | spin_unlock_bh(&serv->sv_lock); | |
1290 | } | |
1291 | ||
1292 | rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024; | |
1293 | rqstp->rq_chandle.defer = svc_defer; | |
1294 | ||
1295 | if (serv->sv_stats) | |
1296 | serv->sv_stats->netcnt++; | |
1297 | return len; | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * Drop request | |
1302 | */ | |
1303 | void | |
1304 | svc_drop(struct svc_rqst *rqstp) | |
1305 | { | |
1306 | dprintk("svc: socket %p dropped request\n", rqstp->rq_sock); | |
1307 | svc_sock_release(rqstp); | |
1308 | } | |
1309 | ||
1310 | /* | |
1311 | * Return reply to client. | |
1312 | */ | |
1313 | int | |
1314 | svc_send(struct svc_rqst *rqstp) | |
1315 | { | |
1316 | struct svc_sock *svsk; | |
1317 | int len; | |
1318 | struct xdr_buf *xb; | |
1319 | ||
1320 | if ((svsk = rqstp->rq_sock) == NULL) { | |
1321 | printk(KERN_WARNING "NULL socket pointer in %s:%d\n", | |
1322 | __FILE__, __LINE__); | |
1323 | return -EFAULT; | |
1324 | } | |
1325 | ||
1326 | /* release the receive skb before sending the reply */ | |
1327 | svc_release_skb(rqstp); | |
1328 | ||
1329 | /* calculate over-all length */ | |
1330 | xb = & rqstp->rq_res; | |
1331 | xb->len = xb->head[0].iov_len + | |
1332 | xb->page_len + | |
1333 | xb->tail[0].iov_len; | |
1334 | ||
57b47a53 IM |
1335 | /* Grab svsk->sk_mutex to serialize outgoing data. */ |
1336 | mutex_lock(&svsk->sk_mutex); | |
1da177e4 LT |
1337 | if (test_bit(SK_DEAD, &svsk->sk_flags)) |
1338 | len = -ENOTCONN; | |
1339 | else | |
1340 | len = svsk->sk_sendto(rqstp); | |
57b47a53 | 1341 | mutex_unlock(&svsk->sk_mutex); |
1da177e4 LT |
1342 | svc_sock_release(rqstp); |
1343 | ||
1344 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | |
1345 | return 0; | |
1346 | return len; | |
1347 | } | |
1348 | ||
1349 | /* | |
1350 | * Initialize socket for RPC use and create svc_sock struct | |
1351 | * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. | |
1352 | */ | |
1353 | static struct svc_sock * | |
1354 | svc_setup_socket(struct svc_serv *serv, struct socket *sock, | |
1355 | int *errp, int pmap_register) | |
1356 | { | |
1357 | struct svc_sock *svsk; | |
1358 | struct sock *inet; | |
1359 | ||
1360 | dprintk("svc: svc_setup_socket %p\n", sock); | |
0da974f4 | 1361 | if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { |
1da177e4 LT |
1362 | *errp = -ENOMEM; |
1363 | return NULL; | |
1364 | } | |
1da177e4 LT |
1365 | |
1366 | inet = sock->sk; | |
1367 | ||
1368 | /* Register socket with portmapper */ | |
1369 | if (*errp >= 0 && pmap_register) | |
1370 | *errp = svc_register(serv, inet->sk_protocol, | |
1371 | ntohs(inet_sk(inet)->sport)); | |
1372 | ||
1373 | if (*errp < 0) { | |
1374 | kfree(svsk); | |
1375 | return NULL; | |
1376 | } | |
1377 | ||
1378 | set_bit(SK_BUSY, &svsk->sk_flags); | |
1379 | inet->sk_user_data = svsk; | |
1380 | svsk->sk_sock = sock; | |
1381 | svsk->sk_sk = inet; | |
1382 | svsk->sk_ostate = inet->sk_state_change; | |
1383 | svsk->sk_odata = inet->sk_data_ready; | |
1384 | svsk->sk_owspace = inet->sk_write_space; | |
1385 | svsk->sk_server = serv; | |
1386 | svsk->sk_lastrecv = get_seconds(); | |
1387 | INIT_LIST_HEAD(&svsk->sk_deferred); | |
1388 | INIT_LIST_HEAD(&svsk->sk_ready); | |
57b47a53 | 1389 | mutex_init(&svsk->sk_mutex); |
1da177e4 LT |
1390 | |
1391 | /* Initialize the socket */ | |
1392 | if (sock->type == SOCK_DGRAM) | |
1393 | svc_udp_init(svsk); | |
1394 | else | |
1395 | svc_tcp_init(svsk); | |
1396 | ||
1397 | spin_lock_bh(&serv->sv_lock); | |
1398 | if (!pmap_register) { | |
1399 | set_bit(SK_TEMP, &svsk->sk_flags); | |
1400 | list_add(&svsk->sk_list, &serv->sv_tempsocks); | |
1401 | serv->sv_tmpcnt++; | |
1402 | } else { | |
1403 | clear_bit(SK_TEMP, &svsk->sk_flags); | |
1404 | list_add(&svsk->sk_list, &serv->sv_permsocks); | |
1405 | } | |
1406 | spin_unlock_bh(&serv->sv_lock); | |
1407 | ||
1408 | dprintk("svc: svc_setup_socket created %p (inet %p)\n", | |
1409 | svsk, svsk->sk_sk); | |
1410 | ||
1411 | clear_bit(SK_BUSY, &svsk->sk_flags); | |
1412 | svc_sock_enqueue(svsk); | |
1413 | return svsk; | |
1414 | } | |
1415 | ||
b41b66d6 N |
1416 | int svc_addsock(struct svc_serv *serv, |
1417 | int fd, | |
1418 | char *name_return, | |
1419 | int *proto) | |
1420 | { | |
1421 | int err = 0; | |
1422 | struct socket *so = sockfd_lookup(fd, &err); | |
1423 | struct svc_sock *svsk = NULL; | |
1424 | ||
1425 | if (!so) | |
1426 | return err; | |
1427 | if (so->sk->sk_family != AF_INET) | |
1428 | err = -EAFNOSUPPORT; | |
1429 | else if (so->sk->sk_protocol != IPPROTO_TCP && | |
1430 | so->sk->sk_protocol != IPPROTO_UDP) | |
1431 | err = -EPROTONOSUPPORT; | |
1432 | else if (so->state > SS_UNCONNECTED) | |
1433 | err = -EISCONN; | |
1434 | else { | |
1435 | svsk = svc_setup_socket(serv, so, &err, 1); | |
1436 | if (svsk) | |
1437 | err = 0; | |
1438 | } | |
1439 | if (err) { | |
1440 | sockfd_put(so); | |
1441 | return err; | |
1442 | } | |
1443 | if (proto) *proto = so->sk->sk_protocol; | |
1444 | return one_sock_name(name_return, svsk); | |
1445 | } | |
1446 | EXPORT_SYMBOL_GPL(svc_addsock); | |
1447 | ||
1da177e4 LT |
1448 | /* |
1449 | * Create socket for RPC service. | |
1450 | */ | |
1451 | static int | |
1452 | svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) | |
1453 | { | |
1454 | struct svc_sock *svsk; | |
1455 | struct socket *sock; | |
1456 | int error; | |
1457 | int type; | |
1458 | ||
1459 | dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n", | |
1460 | serv->sv_program->pg_name, protocol, | |
1461 | NIPQUAD(sin->sin_addr.s_addr), | |
1462 | ntohs(sin->sin_port)); | |
1463 | ||
1464 | if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { | |
1465 | printk(KERN_WARNING "svc: only UDP and TCP " | |
1466 | "sockets supported\n"); | |
1467 | return -EINVAL; | |
1468 | } | |
1469 | type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; | |
1470 | ||
1471 | if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) | |
1472 | return error; | |
1473 | ||
18114746 ES |
1474 | if (type == SOCK_STREAM) |
1475 | sock->sk->sk_reuse = 1; /* allow address reuse */ | |
1476 | error = kernel_bind(sock, (struct sockaddr *) sin, | |
1477 | sizeof(*sin)); | |
1478 | if (error < 0) | |
1479 | goto bummer; | |
1da177e4 LT |
1480 | |
1481 | if (protocol == IPPROTO_TCP) { | |
e6242e92 | 1482 | if ((error = kernel_listen(sock, 64)) < 0) |
1da177e4 LT |
1483 | goto bummer; |
1484 | } | |
1485 | ||
1486 | if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL) | |
1487 | return 0; | |
1488 | ||
1489 | bummer: | |
1490 | dprintk("svc: svc_create_socket error = %d\n", -error); | |
1491 | sock_release(sock); | |
1492 | return error; | |
1493 | } | |
1494 | ||
1495 | /* | |
1496 | * Remove a dead socket | |
1497 | */ | |
1498 | void | |
1499 | svc_delete_socket(struct svc_sock *svsk) | |
1500 | { | |
1501 | struct svc_serv *serv; | |
1502 | struct sock *sk; | |
1503 | ||
1504 | dprintk("svc: svc_delete_socket(%p)\n", svsk); | |
1505 | ||
1506 | serv = svsk->sk_server; | |
1507 | sk = svsk->sk_sk; | |
1508 | ||
1509 | sk->sk_state_change = svsk->sk_ostate; | |
1510 | sk->sk_data_ready = svsk->sk_odata; | |
1511 | sk->sk_write_space = svsk->sk_owspace; | |
1512 | ||
1513 | spin_lock_bh(&serv->sv_lock); | |
1514 | ||
1515 | list_del_init(&svsk->sk_list); | |
1516 | list_del_init(&svsk->sk_ready); | |
1517 | if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) | |
1518 | if (test_bit(SK_TEMP, &svsk->sk_flags)) | |
1519 | serv->sv_tmpcnt--; | |
1520 | ||
1521 | if (!svsk->sk_inuse) { | |
1522 | spin_unlock_bh(&serv->sv_lock); | |
b41b66d6 N |
1523 | if (svsk->sk_sock->file) |
1524 | sockfd_put(svsk->sk_sock); | |
1525 | else | |
1526 | sock_release(svsk->sk_sock); | |
1da177e4 LT |
1527 | kfree(svsk); |
1528 | } else { | |
1529 | spin_unlock_bh(&serv->sv_lock); | |
1530 | dprintk(KERN_NOTICE "svc: server socket destroy delayed\n"); | |
1531 | /* svsk->sk_server = NULL; */ | |
1532 | } | |
1533 | } | |
1534 | ||
1535 | /* | |
1536 | * Make a socket for nfsd and lockd | |
1537 | */ | |
1538 | int | |
1539 | svc_makesock(struct svc_serv *serv, int protocol, unsigned short port) | |
1540 | { | |
1541 | struct sockaddr_in sin; | |
1542 | ||
1543 | dprintk("svc: creating socket proto = %d\n", protocol); | |
1544 | sin.sin_family = AF_INET; | |
1545 | sin.sin_addr.s_addr = INADDR_ANY; | |
1546 | sin.sin_port = htons(port); | |
1547 | return svc_create_socket(serv, protocol, &sin); | |
1548 | } | |
1549 | ||
1550 | /* | |
1551 | * Handle defer and revisit of requests | |
1552 | */ | |
1553 | ||
1554 | static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |
1555 | { | |
1556 | struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); | |
1557 | struct svc_serv *serv = dreq->owner; | |
1558 | struct svc_sock *svsk; | |
1559 | ||
1560 | if (too_many) { | |
1561 | svc_sock_put(dr->svsk); | |
1562 | kfree(dr); | |
1563 | return; | |
1564 | } | |
1565 | dprintk("revisit queued\n"); | |
1566 | svsk = dr->svsk; | |
1567 | dr->svsk = NULL; | |
1568 | spin_lock_bh(&serv->sv_lock); | |
1569 | list_add(&dr->handle.recent, &svsk->sk_deferred); | |
1570 | spin_unlock_bh(&serv->sv_lock); | |
1571 | set_bit(SK_DEFERRED, &svsk->sk_flags); | |
1572 | svc_sock_enqueue(svsk); | |
1573 | svc_sock_put(svsk); | |
1574 | } | |
1575 | ||
1576 | static struct cache_deferred_req * | |
1577 | svc_defer(struct cache_req *req) | |
1578 | { | |
1579 | struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); | |
1580 | int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); | |
1581 | struct svc_deferred_req *dr; | |
1582 | ||
1583 | if (rqstp->rq_arg.page_len) | |
1584 | return NULL; /* if more than a page, give up FIXME */ | |
1585 | if (rqstp->rq_deferred) { | |
1586 | dr = rqstp->rq_deferred; | |
1587 | rqstp->rq_deferred = NULL; | |
1588 | } else { | |
1589 | int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; | |
1590 | /* FIXME maybe discard if size too large */ | |
1591 | dr = kmalloc(size, GFP_KERNEL); | |
1592 | if (dr == NULL) | |
1593 | return NULL; | |
1594 | ||
1595 | dr->handle.owner = rqstp->rq_server; | |
1596 | dr->prot = rqstp->rq_prot; | |
1597 | dr->addr = rqstp->rq_addr; | |
1918e341 | 1598 | dr->daddr = rqstp->rq_daddr; |
1da177e4 LT |
1599 | dr->argslen = rqstp->rq_arg.len >> 2; |
1600 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); | |
1601 | } | |
1602 | spin_lock_bh(&rqstp->rq_server->sv_lock); | |
1603 | rqstp->rq_sock->sk_inuse++; | |
1604 | dr->svsk = rqstp->rq_sock; | |
1605 | spin_unlock_bh(&rqstp->rq_server->sv_lock); | |
1606 | ||
1607 | dr->handle.revisit = svc_revisit; | |
1608 | return &dr->handle; | |
1609 | } | |
1610 | ||
1611 | /* | |
1612 | * recv data from a deferred request into an active one | |
1613 | */ | |
1614 | static int svc_deferred_recv(struct svc_rqst *rqstp) | |
1615 | { | |
1616 | struct svc_deferred_req *dr = rqstp->rq_deferred; | |
1617 | ||
1618 | rqstp->rq_arg.head[0].iov_base = dr->args; | |
1619 | rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; | |
1620 | rqstp->rq_arg.page_len = 0; | |
1621 | rqstp->rq_arg.len = dr->argslen<<2; | |
1622 | rqstp->rq_prot = dr->prot; | |
1623 | rqstp->rq_addr = dr->addr; | |
1918e341 | 1624 | rqstp->rq_daddr = dr->daddr; |
1da177e4 LT |
1625 | return dr->argslen<<2; |
1626 | } | |
1627 | ||
1628 | ||
1629 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) | |
1630 | { | |
1631 | struct svc_deferred_req *dr = NULL; | |
1632 | struct svc_serv *serv = svsk->sk_server; | |
1633 | ||
1634 | if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) | |
1635 | return NULL; | |
1636 | spin_lock_bh(&serv->sv_lock); | |
1637 | clear_bit(SK_DEFERRED, &svsk->sk_flags); | |
1638 | if (!list_empty(&svsk->sk_deferred)) { | |
1639 | dr = list_entry(svsk->sk_deferred.next, | |
1640 | struct svc_deferred_req, | |
1641 | handle.recent); | |
1642 | list_del_init(&dr->handle.recent); | |
1643 | set_bit(SK_DEFERRED, &svsk->sk_flags); | |
1644 | } | |
1645 | spin_unlock_bh(&serv->sv_lock); | |
1646 | return dr; | |
1647 | } |