]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/svcsock.c | |
3 | * | |
4 | * These are the RPC server socket internals. | |
5 | * | |
6 | * The server scheduling algorithm does not always distribute the load | |
7 | * evenly when servicing a single client. May need to modify the | |
8 | * svc_sock_enqueue procedure... | |
9 | * | |
10 | * TCP support is largely untested and may be a little slow. The problem | |
11 | * is that we currently do two separate recvfrom's, one for the 4-byte | |
12 | * record length, and the second for the actual record. This could possibly | |
13 | * be improved by always reading a minimum size of around 100 bytes and | |
14 | * tucking any superfluous bytes away in a temporary store. Still, that | |
15 | * leaves write requests out in the rain. An alternative may be to peek at | |
16 | * the first skb in the queue, and if it matches the next TCP sequence | |
17 | * number, to extract the record marker. Yuck. | |
18 | * | |
19 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | |
20 | */ | |
21 | ||
22 | #include <linux/sched.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/fcntl.h> | |
25 | #include <linux/net.h> | |
26 | #include <linux/in.h> | |
27 | #include <linux/inet.h> | |
28 | #include <linux/udp.h> | |
91483c4b | 29 | #include <linux/tcp.h> |
1da177e4 LT |
30 | #include <linux/unistd.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/netdevice.h> | |
33 | #include <linux/skbuff.h> | |
b41b66d6 | 34 | #include <linux/file.h> |
7dfb7103 | 35 | #include <linux/freezer.h> |
1da177e4 LT |
36 | #include <net/sock.h> |
37 | #include <net/checksum.h> | |
38 | #include <net/ip.h> | |
c752f073 | 39 | #include <net/tcp_states.h> |
1da177e4 LT |
40 | #include <asm/uaccess.h> |
41 | #include <asm/ioctls.h> | |
42 | ||
43 | #include <linux/sunrpc/types.h> | |
44 | #include <linux/sunrpc/xdr.h> | |
45 | #include <linux/sunrpc/svcsock.h> | |
46 | #include <linux/sunrpc/stats.h> | |
47 | ||
48 | /* SMP locking strategy: | |
49 | * | |
3262c816 GB |
50 | * svc_pool->sp_lock protects most of the fields of that pool. |
51 | * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. | |
52 | * when both need to be taken (rare), svc_serv->sv_lock is first. | |
53 | * BKL protects svc_serv->sv_nrthread. | |
1a68d952 | 54 | * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list |
c081a0c7 | 55 | * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. |
1da177e4 LT |
56 | * |
57 | * Some flags can be set to certain values at any time | |
58 | * providing that certain rules are followed: | |
59 | * | |
1da177e4 LT |
60 | * SK_CONN, SK_DATA, can be set or cleared at any time. |
61 | * after a set, svc_sock_enqueue must be called. | |
62 | * after a clear, the socket must be read/accepted | |
63 | * if this succeeds, it must be set again. | |
64 | * SK_CLOSE can set at any time. It is never cleared. | |
65 | * | |
66 | */ | |
67 | ||
68 | #define RPCDBG_FACILITY RPCDBG_SVCSOCK | |
69 | ||
70 | ||
71 | static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, | |
72 | int *errp, int pmap_reg); | |
73 | static void svc_udp_data_ready(struct sock *, int); | |
74 | static int svc_udp_recvfrom(struct svc_rqst *); | |
75 | static int svc_udp_sendto(struct svc_rqst *); | |
76 | ||
77 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); | |
78 | static int svc_deferred_recv(struct svc_rqst *rqstp); | |
79 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | |
80 | ||
36bdfc8b GB |
81 | /* apparently the "standard" is that clients close |
82 | * idle connections after 5 minutes, servers after | |
83 | * 6 minutes | |
84 | * http://www.connectathon.org/talks96/nfstcp.pdf | |
85 | */ | |
86 | static int svc_conn_age_period = 6*60; | |
87 | ||
1da177e4 | 88 | /* |
3262c816 | 89 | * Queue up an idle server thread. Must have pool->sp_lock held. |
1da177e4 | 90 | * Note: this is really a stack rather than a queue, so that we only |
3262c816 | 91 | * use as many different threads as we need, and the rest don't pollute |
1da177e4 LT |
92 | * the cache. |
93 | */ | |
94 | static inline void | |
3262c816 | 95 | svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) |
1da177e4 | 96 | { |
3262c816 | 97 | list_add(&rqstp->rq_list, &pool->sp_threads); |
1da177e4 LT |
98 | } |
99 | ||
100 | /* | |
3262c816 | 101 | * Dequeue an nfsd thread. Must have pool->sp_lock held. |
1da177e4 LT |
102 | */ |
103 | static inline void | |
3262c816 | 104 | svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) |
1da177e4 LT |
105 | { |
106 | list_del(&rqstp->rq_list); | |
107 | } | |
108 | ||
109 | /* | |
110 | * Release an skbuff after use | |
111 | */ | |
112 | static inline void | |
113 | svc_release_skb(struct svc_rqst *rqstp) | |
114 | { | |
115 | struct sk_buff *skb = rqstp->rq_skbuff; | |
116 | struct svc_deferred_req *dr = rqstp->rq_deferred; | |
117 | ||
118 | if (skb) { | |
119 | rqstp->rq_skbuff = NULL; | |
120 | ||
121 | dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); | |
122 | skb_free_datagram(rqstp->rq_sock->sk_sk, skb); | |
123 | } | |
124 | if (dr) { | |
125 | rqstp->rq_deferred = NULL; | |
126 | kfree(dr); | |
127 | } | |
128 | } | |
129 | ||
130 | /* | |
131 | * Any space to write? | |
132 | */ | |
133 | static inline unsigned long | |
134 | svc_sock_wspace(struct svc_sock *svsk) | |
135 | { | |
136 | int wspace; | |
137 | ||
138 | if (svsk->sk_sock->type == SOCK_STREAM) | |
139 | wspace = sk_stream_wspace(svsk->sk_sk); | |
140 | else | |
141 | wspace = sock_wspace(svsk->sk_sk); | |
142 | ||
143 | return wspace; | |
144 | } | |
145 | ||
146 | /* | |
147 | * Queue up a socket with data pending. If there are idle nfsd | |
148 | * processes, wake 'em up. | |
149 | * | |
150 | */ | |
151 | static void | |
152 | svc_sock_enqueue(struct svc_sock *svsk) | |
153 | { | |
154 | struct svc_serv *serv = svsk->sk_server; | |
bfd24160 | 155 | struct svc_pool *pool; |
1da177e4 | 156 | struct svc_rqst *rqstp; |
bfd24160 | 157 | int cpu; |
1da177e4 LT |
158 | |
159 | if (!(svsk->sk_flags & | |
160 | ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) | |
161 | return; | |
162 | if (test_bit(SK_DEAD, &svsk->sk_flags)) | |
163 | return; | |
164 | ||
bfd24160 GB |
165 | cpu = get_cpu(); |
166 | pool = svc_pool_for_cpu(svsk->sk_server, cpu); | |
167 | put_cpu(); | |
168 | ||
3262c816 | 169 | spin_lock_bh(&pool->sp_lock); |
1da177e4 | 170 | |
3262c816 GB |
171 | if (!list_empty(&pool->sp_threads) && |
172 | !list_empty(&pool->sp_sockets)) | |
1da177e4 LT |
173 | printk(KERN_ERR |
174 | "svc_sock_enqueue: threads and sockets both waiting??\n"); | |
175 | ||
176 | if (test_bit(SK_DEAD, &svsk->sk_flags)) { | |
177 | /* Don't enqueue dead sockets */ | |
178 | dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); | |
179 | goto out_unlock; | |
180 | } | |
181 | ||
c081a0c7 GB |
182 | /* Mark socket as busy. It will remain in this state until the |
183 | * server has processed all pending data and put the socket back | |
184 | * on the idle list. We update SK_BUSY atomically because | |
185 | * it also guards against trying to enqueue the svc_sock twice. | |
186 | */ | |
187 | if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) { | |
188 | /* Don't enqueue socket while already enqueued */ | |
1da177e4 LT |
189 | dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); |
190 | goto out_unlock; | |
191 | } | |
3262c816 GB |
192 | BUG_ON(svsk->sk_pool != NULL); |
193 | svsk->sk_pool = pool; | |
1da177e4 LT |
194 | |
195 | set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | |
c6b0a9f8 | 196 | if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2 |
1da177e4 LT |
197 | > svc_sock_wspace(svsk)) |
198 | && !test_bit(SK_CLOSE, &svsk->sk_flags) | |
199 | && !test_bit(SK_CONN, &svsk->sk_flags)) { | |
200 | /* Don't enqueue while not enough space for reply */ | |
201 | dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", | |
c6b0a9f8 | 202 | svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg, |
1da177e4 | 203 | svc_sock_wspace(svsk)); |
3262c816 | 204 | svsk->sk_pool = NULL; |
c081a0c7 | 205 | clear_bit(SK_BUSY, &svsk->sk_flags); |
1da177e4 LT |
206 | goto out_unlock; |
207 | } | |
208 | clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | |
209 | ||
1da177e4 | 210 | |
3262c816 GB |
211 | if (!list_empty(&pool->sp_threads)) { |
212 | rqstp = list_entry(pool->sp_threads.next, | |
1da177e4 LT |
213 | struct svc_rqst, |
214 | rq_list); | |
215 | dprintk("svc: socket %p served by daemon %p\n", | |
216 | svsk->sk_sk, rqstp); | |
3262c816 | 217 | svc_thread_dequeue(pool, rqstp); |
1da177e4 LT |
218 | if (rqstp->rq_sock) |
219 | printk(KERN_ERR | |
220 | "svc_sock_enqueue: server %p, rq_sock=%p!\n", | |
221 | rqstp, rqstp->rq_sock); | |
222 | rqstp->rq_sock = svsk; | |
c45c357d | 223 | atomic_inc(&svsk->sk_inuse); |
c6b0a9f8 | 224 | rqstp->rq_reserved = serv->sv_max_mesg; |
5685f0fa | 225 | atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); |
3262c816 | 226 | BUG_ON(svsk->sk_pool != pool); |
1da177e4 LT |
227 | wake_up(&rqstp->rq_wait); |
228 | } else { | |
229 | dprintk("svc: socket %p put into queue\n", svsk->sk_sk); | |
3262c816 GB |
230 | list_add_tail(&svsk->sk_ready, &pool->sp_sockets); |
231 | BUG_ON(svsk->sk_pool != pool); | |
1da177e4 LT |
232 | } |
233 | ||
234 | out_unlock: | |
3262c816 | 235 | spin_unlock_bh(&pool->sp_lock); |
1da177e4 LT |
236 | } |
237 | ||
238 | /* | |
3262c816 | 239 | * Dequeue the first socket. Must be called with the pool->sp_lock held. |
1da177e4 LT |
240 | */ |
241 | static inline struct svc_sock * | |
3262c816 | 242 | svc_sock_dequeue(struct svc_pool *pool) |
1da177e4 LT |
243 | { |
244 | struct svc_sock *svsk; | |
245 | ||
3262c816 | 246 | if (list_empty(&pool->sp_sockets)) |
1da177e4 LT |
247 | return NULL; |
248 | ||
3262c816 | 249 | svsk = list_entry(pool->sp_sockets.next, |
1da177e4 LT |
250 | struct svc_sock, sk_ready); |
251 | list_del_init(&svsk->sk_ready); | |
252 | ||
253 | dprintk("svc: socket %p dequeued, inuse=%d\n", | |
c45c357d | 254 | svsk->sk_sk, atomic_read(&svsk->sk_inuse)); |
1da177e4 LT |
255 | |
256 | return svsk; | |
257 | } | |
258 | ||
259 | /* | |
260 | * Having read something from a socket, check whether it | |
261 | * needs to be re-enqueued. | |
262 | * Note: SK_DATA only gets cleared when a read-attempt finds | |
263 | * no (or insufficient) data. | |
264 | */ | |
265 | static inline void | |
266 | svc_sock_received(struct svc_sock *svsk) | |
267 | { | |
3262c816 | 268 | svsk->sk_pool = NULL; |
1da177e4 LT |
269 | clear_bit(SK_BUSY, &svsk->sk_flags); |
270 | svc_sock_enqueue(svsk); | |
271 | } | |
272 | ||
273 | ||
274 | /** | |
275 | * svc_reserve - change the space reserved for the reply to a request. | |
276 | * @rqstp: The request in question | |
277 | * @space: new max space to reserve | |
278 | * | |
279 | * Each request reserves some space on the output queue of the socket | |
280 | * to make sure the reply fits. This function reduces that reserved | |
281 | * space to be the amount of space used already, plus @space. | |
282 | * | |
283 | */ | |
284 | void svc_reserve(struct svc_rqst *rqstp, int space) | |
285 | { | |
286 | space += rqstp->rq_res.head[0].iov_len; | |
287 | ||
288 | if (space < rqstp->rq_reserved) { | |
289 | struct svc_sock *svsk = rqstp->rq_sock; | |
5685f0fa | 290 | atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved); |
1da177e4 | 291 | rqstp->rq_reserved = space; |
1da177e4 LT |
292 | |
293 | svc_sock_enqueue(svsk); | |
294 | } | |
295 | } | |
296 | ||
297 | /* | |
298 | * Release a socket after use. | |
299 | */ | |
300 | static inline void | |
301 | svc_sock_put(struct svc_sock *svsk) | |
302 | { | |
202dd450 AM |
303 | if (atomic_dec_and_test(&svsk->sk_inuse) && |
304 | test_bit(SK_DEAD, &svsk->sk_flags)) { | |
305 | dprintk("svc: releasing dead socket\n"); | |
d6740df9 NB |
306 | if (svsk->sk_sock->file) |
307 | sockfd_put(svsk->sk_sock); | |
308 | else | |
309 | sock_release(svsk->sk_sock); | |
310 | if (svsk->sk_info_authunix != NULL) | |
311 | svcauth_unix_info_release(svsk->sk_info_authunix); | |
1da177e4 LT |
312 | kfree(svsk); |
313 | } | |
1da177e4 LT |
314 | } |
315 | ||
316 | static void | |
317 | svc_sock_release(struct svc_rqst *rqstp) | |
318 | { | |
319 | struct svc_sock *svsk = rqstp->rq_sock; | |
320 | ||
321 | svc_release_skb(rqstp); | |
322 | ||
44524359 | 323 | svc_free_res_pages(rqstp); |
1da177e4 LT |
324 | rqstp->rq_res.page_len = 0; |
325 | rqstp->rq_res.page_base = 0; | |
326 | ||
327 | ||
328 | /* Reset response buffer and release | |
329 | * the reservation. | |
330 | * But first, check that enough space was reserved | |
331 | * for the reply, otherwise we have a bug! | |
332 | */ | |
333 | if ((rqstp->rq_res.len) > rqstp->rq_reserved) | |
334 | printk(KERN_ERR "RPC request reserved %d but used %d\n", | |
335 | rqstp->rq_reserved, | |
336 | rqstp->rq_res.len); | |
337 | ||
338 | rqstp->rq_res.head[0].iov_len = 0; | |
339 | svc_reserve(rqstp, 0); | |
340 | rqstp->rq_sock = NULL; | |
341 | ||
342 | svc_sock_put(svsk); | |
343 | } | |
344 | ||
345 | /* | |
346 | * External function to wake up a server waiting for data | |
3262c816 GB |
347 | * This really only makes sense for services like lockd |
348 | * which have exactly one thread anyway. | |
1da177e4 LT |
349 | */ |
350 | void | |
351 | svc_wake_up(struct svc_serv *serv) | |
352 | { | |
353 | struct svc_rqst *rqstp; | |
3262c816 GB |
354 | unsigned int i; |
355 | struct svc_pool *pool; | |
356 | ||
357 | for (i = 0; i < serv->sv_nrpools; i++) { | |
358 | pool = &serv->sv_pools[i]; | |
359 | ||
360 | spin_lock_bh(&pool->sp_lock); | |
361 | if (!list_empty(&pool->sp_threads)) { | |
362 | rqstp = list_entry(pool->sp_threads.next, | |
363 | struct svc_rqst, | |
364 | rq_list); | |
365 | dprintk("svc: daemon %p woken up.\n", rqstp); | |
366 | /* | |
367 | svc_thread_dequeue(pool, rqstp); | |
368 | rqstp->rq_sock = NULL; | |
369 | */ | |
370 | wake_up(&rqstp->rq_wait); | |
371 | } | |
372 | spin_unlock_bh(&pool->sp_lock); | |
1da177e4 | 373 | } |
1da177e4 LT |
374 | } |
375 | ||
376 | /* | |
377 | * Generic sendto routine | |
378 | */ | |
379 | static int | |
380 | svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) | |
381 | { | |
382 | struct svc_sock *svsk = rqstp->rq_sock; | |
383 | struct socket *sock = svsk->sk_sock; | |
384 | int slen; | |
385 | char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))]; | |
386 | struct cmsghdr *cmh = (struct cmsghdr *)buffer; | |
387 | struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh); | |
388 | int len = 0; | |
389 | int result; | |
390 | int size; | |
391 | struct page **ppage = xdr->pages; | |
392 | size_t base = xdr->page_base; | |
393 | unsigned int pglen = xdr->page_len; | |
394 | unsigned int flags = MSG_MORE; | |
395 | ||
396 | slen = xdr->len; | |
397 | ||
398 | if (rqstp->rq_prot == IPPROTO_UDP) { | |
399 | /* set the source and destination */ | |
400 | struct msghdr msg; | |
401 | msg.msg_name = &rqstp->rq_addr; | |
402 | msg.msg_namelen = sizeof(rqstp->rq_addr); | |
403 | msg.msg_iov = NULL; | |
404 | msg.msg_iovlen = 0; | |
405 | msg.msg_flags = MSG_MORE; | |
406 | ||
407 | msg.msg_control = cmh; | |
408 | msg.msg_controllen = sizeof(buffer); | |
409 | cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); | |
410 | cmh->cmsg_level = SOL_IP; | |
411 | cmh->cmsg_type = IP_PKTINFO; | |
412 | pki->ipi_ifindex = 0; | |
413 | pki->ipi_spec_dst.s_addr = rqstp->rq_daddr; | |
414 | ||
415 | if (sock_sendmsg(sock, &msg, 0) < 0) | |
416 | goto out; | |
417 | } | |
418 | ||
419 | /* send head */ | |
420 | if (slen == xdr->head[0].iov_len) | |
421 | flags = 0; | |
44524359 N |
422 | len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, |
423 | xdr->head[0].iov_len, flags); | |
1da177e4 LT |
424 | if (len != xdr->head[0].iov_len) |
425 | goto out; | |
426 | slen -= xdr->head[0].iov_len; | |
427 | if (slen == 0) | |
428 | goto out; | |
429 | ||
430 | /* send page data */ | |
431 | size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; | |
432 | while (pglen > 0) { | |
433 | if (slen == size) | |
434 | flags = 0; | |
e6242e92 | 435 | result = kernel_sendpage(sock, *ppage, base, size, flags); |
1da177e4 LT |
436 | if (result > 0) |
437 | len += result; | |
438 | if (result != size) | |
439 | goto out; | |
440 | slen -= size; | |
441 | pglen -= size; | |
442 | size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; | |
443 | base = 0; | |
444 | ppage++; | |
445 | } | |
446 | /* send tail */ | |
447 | if (xdr->tail[0].iov_len) { | |
44524359 N |
448 | result = kernel_sendpage(sock, rqstp->rq_respages[0], |
449 | ((unsigned long)xdr->tail[0].iov_base) | |
450 | & (PAGE_SIZE-1), | |
1da177e4 LT |
451 | xdr->tail[0].iov_len, 0); |
452 | ||
453 | if (result > 0) | |
454 | len += result; | |
455 | } | |
456 | out: | |
457 | dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n", | |
458 | rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len, | |
459 | rqstp->rq_addr.sin_addr.s_addr); | |
460 | ||
461 | return len; | |
462 | } | |
463 | ||
80212d59 N |
464 | /* |
465 | * Report socket names for nfsdfs | |
466 | */ | |
467 | static int one_sock_name(char *buf, struct svc_sock *svsk) | |
468 | { | |
469 | int len; | |
470 | ||
471 | switch(svsk->sk_sk->sk_family) { | |
472 | case AF_INET: | |
473 | len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n", | |
474 | svsk->sk_sk->sk_protocol==IPPROTO_UDP? | |
475 | "udp" : "tcp", | |
476 | NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr), | |
477 | inet_sk(svsk->sk_sk)->num); | |
478 | break; | |
479 | default: | |
480 | len = sprintf(buf, "*unknown-%d*\n", | |
481 | svsk->sk_sk->sk_family); | |
482 | } | |
483 | return len; | |
484 | } | |
485 | ||
486 | int | |
b41b66d6 | 487 | svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) |
80212d59 | 488 | { |
b41b66d6 | 489 | struct svc_sock *svsk, *closesk = NULL; |
80212d59 N |
490 | int len = 0; |
491 | ||
492 | if (!serv) | |
493 | return 0; | |
494 | spin_lock(&serv->sv_lock); | |
495 | list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { | |
496 | int onelen = one_sock_name(buf+len, svsk); | |
b41b66d6 N |
497 | if (toclose && strcmp(toclose, buf+len) == 0) |
498 | closesk = svsk; | |
499 | else | |
500 | len += onelen; | |
80212d59 N |
501 | } |
502 | spin_unlock(&serv->sv_lock); | |
b41b66d6 | 503 | if (closesk) |
5680c446 N |
504 | /* Should unregister with portmap, but you cannot |
505 | * unregister just one protocol... | |
506 | */ | |
b41b66d6 | 507 | svc_delete_socket(closesk); |
37a03472 N |
508 | else if (toclose) |
509 | return -ENOENT; | |
80212d59 N |
510 | return len; |
511 | } | |
512 | EXPORT_SYMBOL(svc_sock_names); | |
513 | ||
1da177e4 LT |
514 | /* |
515 | * Check input queue length | |
516 | */ | |
517 | static int | |
518 | svc_recv_available(struct svc_sock *svsk) | |
519 | { | |
1da177e4 LT |
520 | struct socket *sock = svsk->sk_sock; |
521 | int avail, err; | |
522 | ||
e6242e92 | 523 | err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); |
1da177e4 LT |
524 | |
525 | return (err >= 0)? avail : err; | |
526 | } | |
527 | ||
528 | /* | |
529 | * Generic recvfrom routine. | |
530 | */ | |
531 | static int | |
532 | svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) | |
533 | { | |
534 | struct msghdr msg; | |
535 | struct socket *sock; | |
536 | int len, alen; | |
537 | ||
538 | rqstp->rq_addrlen = sizeof(rqstp->rq_addr); | |
539 | sock = rqstp->rq_sock->sk_sock; | |
540 | ||
541 | msg.msg_name = &rqstp->rq_addr; | |
542 | msg.msg_namelen = sizeof(rqstp->rq_addr); | |
543 | msg.msg_control = NULL; | |
544 | msg.msg_controllen = 0; | |
545 | ||
546 | msg.msg_flags = MSG_DONTWAIT; | |
547 | ||
548 | len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT); | |
549 | ||
550 | /* sock_recvmsg doesn't fill in the name/namelen, so we must.. | |
551 | * possibly we should cache this in the svc_sock structure | |
552 | * at accept time. FIXME | |
553 | */ | |
554 | alen = sizeof(rqstp->rq_addr); | |
e6242e92 | 555 | kernel_getpeername(sock, (struct sockaddr *)&rqstp->rq_addr, &alen); |
1da177e4 LT |
556 | |
557 | dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", | |
558 | rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len); | |
559 | ||
560 | return len; | |
561 | } | |
562 | ||
563 | /* | |
564 | * Set socket snd and rcv buffer lengths | |
565 | */ | |
566 | static inline void | |
567 | svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) | |
568 | { | |
569 | #if 0 | |
570 | mm_segment_t oldfs; | |
571 | oldfs = get_fs(); set_fs(KERNEL_DS); | |
572 | sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, | |
573 | (char*)&snd, sizeof(snd)); | |
574 | sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, | |
575 | (char*)&rcv, sizeof(rcv)); | |
576 | #else | |
577 | /* sock_setsockopt limits use to sysctl_?mem_max, | |
578 | * which isn't acceptable. Until that is made conditional | |
579 | * on not having CAP_SYS_RESOURCE or similar, we go direct... | |
580 | * DaveM said I could! | |
581 | */ | |
582 | lock_sock(sock->sk); | |
583 | sock->sk->sk_sndbuf = snd * 2; | |
584 | sock->sk->sk_rcvbuf = rcv * 2; | |
585 | sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; | |
586 | release_sock(sock->sk); | |
587 | #endif | |
588 | } | |
589 | /* | |
590 | * INET callback when data has been received on the socket. | |
591 | */ | |
592 | static void | |
593 | svc_udp_data_ready(struct sock *sk, int count) | |
594 | { | |
939bb7ef | 595 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 | 596 | |
939bb7ef NB |
597 | if (svsk) { |
598 | dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", | |
599 | svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); | |
600 | set_bit(SK_DATA, &svsk->sk_flags); | |
601 | svc_sock_enqueue(svsk); | |
602 | } | |
1da177e4 LT |
603 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
604 | wake_up_interruptible(sk->sk_sleep); | |
605 | } | |
606 | ||
607 | /* | |
608 | * INET callback when space is newly available on the socket. | |
609 | */ | |
610 | static void | |
611 | svc_write_space(struct sock *sk) | |
612 | { | |
613 | struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); | |
614 | ||
615 | if (svsk) { | |
616 | dprintk("svc: socket %p(inet %p), write_space busy=%d\n", | |
617 | svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); | |
618 | svc_sock_enqueue(svsk); | |
619 | } | |
620 | ||
621 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { | |
939bb7ef | 622 | dprintk("RPC svc_write_space: someone sleeping on %p\n", |
1da177e4 LT |
623 | svsk); |
624 | wake_up_interruptible(sk->sk_sleep); | |
625 | } | |
626 | } | |
627 | ||
628 | /* | |
629 | * Receive a datagram from a UDP socket. | |
630 | */ | |
1da177e4 LT |
631 | static int |
632 | svc_udp_recvfrom(struct svc_rqst *rqstp) | |
633 | { | |
634 | struct svc_sock *svsk = rqstp->rq_sock; | |
635 | struct svc_serv *serv = svsk->sk_server; | |
636 | struct sk_buff *skb; | |
637 | int err, len; | |
638 | ||
639 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | |
640 | /* udp sockets need large rcvbuf as all pending | |
641 | * requests are still in that buffer. sndbuf must | |
642 | * also be large enough that there is enough space | |
3262c816 GB |
643 | * for one reply per thread. We count all threads |
644 | * rather than threads in a particular pool, which | |
645 | * provides an upper bound on the number of threads | |
646 | * which will access the socket. | |
1da177e4 LT |
647 | */ |
648 | svc_sock_setbufsize(svsk->sk_sock, | |
c6b0a9f8 N |
649 | (serv->sv_nrthreads+3) * serv->sv_max_mesg, |
650 | (serv->sv_nrthreads+3) * serv->sv_max_mesg); | |
1da177e4 LT |
651 | |
652 | if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { | |
653 | svc_sock_received(svsk); | |
654 | return svc_deferred_recv(rqstp); | |
655 | } | |
656 | ||
657 | clear_bit(SK_DATA, &svsk->sk_flags); | |
658 | while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { | |
659 | if (err == -EAGAIN) { | |
660 | svc_sock_received(svsk); | |
661 | return err; | |
662 | } | |
663 | /* possibly an icmp error */ | |
664 | dprintk("svc: recvfrom returned error %d\n", -err); | |
665 | } | |
a61bbcf2 PM |
666 | if (skb->tstamp.off_sec == 0) { |
667 | struct timeval tv; | |
668 | ||
669 | tv.tv_sec = xtime.tv_sec; | |
4bcde03d | 670 | tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; |
a61bbcf2 | 671 | skb_set_timestamp(skb, &tv); |
1da177e4 LT |
672 | /* Don't enable netstamp, sunrpc doesn't |
673 | need that much accuracy */ | |
674 | } | |
a61bbcf2 | 675 | skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); |
1da177e4 LT |
676 | set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ |
677 | ||
678 | /* | |
679 | * Maybe more packets - kick another thread ASAP. | |
680 | */ | |
681 | svc_sock_received(svsk); | |
682 | ||
683 | len = skb->len - sizeof(struct udphdr); | |
684 | rqstp->rq_arg.len = len; | |
685 | ||
686 | rqstp->rq_prot = IPPROTO_UDP; | |
687 | ||
688 | /* Get sender address */ | |
689 | rqstp->rq_addr.sin_family = AF_INET; | |
690 | rqstp->rq_addr.sin_port = skb->h.uh->source; | |
691 | rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr; | |
692 | rqstp->rq_daddr = skb->nh.iph->daddr; | |
693 | ||
694 | if (skb_is_nonlinear(skb)) { | |
695 | /* we have to copy */ | |
696 | local_bh_disable(); | |
697 | if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { | |
698 | local_bh_enable(); | |
699 | /* checksum error */ | |
700 | skb_free_datagram(svsk->sk_sk, skb); | |
701 | return 0; | |
702 | } | |
703 | local_bh_enable(); | |
704 | skb_free_datagram(svsk->sk_sk, skb); | |
705 | } else { | |
706 | /* we can use it in-place */ | |
707 | rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); | |
708 | rqstp->rq_arg.head[0].iov_len = len; | |
fb286bb2 HX |
709 | if (skb_checksum_complete(skb)) { |
710 | skb_free_datagram(svsk->sk_sk, skb); | |
711 | return 0; | |
1da177e4 LT |
712 | } |
713 | rqstp->rq_skbuff = skb; | |
714 | } | |
715 | ||
716 | rqstp->rq_arg.page_base = 0; | |
717 | if (len <= rqstp->rq_arg.head[0].iov_len) { | |
718 | rqstp->rq_arg.head[0].iov_len = len; | |
719 | rqstp->rq_arg.page_len = 0; | |
44524359 | 720 | rqstp->rq_respages = rqstp->rq_pages+1; |
1da177e4 LT |
721 | } else { |
722 | rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; | |
44524359 N |
723 | rqstp->rq_respages = rqstp->rq_pages + 1 + |
724 | (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; | |
1da177e4 LT |
725 | } |
726 | ||
727 | if (serv->sv_stats) | |
728 | serv->sv_stats->netudpcnt++; | |
729 | ||
730 | return len; | |
731 | } | |
732 | ||
733 | static int | |
734 | svc_udp_sendto(struct svc_rqst *rqstp) | |
735 | { | |
736 | int error; | |
737 | ||
738 | error = svc_sendto(rqstp, &rqstp->rq_res); | |
739 | if (error == -ECONNREFUSED) | |
740 | /* ICMP error on earlier request. */ | |
741 | error = svc_sendto(rqstp, &rqstp->rq_res); | |
742 | ||
743 | return error; | |
744 | } | |
745 | ||
746 | static void | |
747 | svc_udp_init(struct svc_sock *svsk) | |
748 | { | |
749 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | |
750 | svsk->sk_sk->sk_write_space = svc_write_space; | |
751 | svsk->sk_recvfrom = svc_udp_recvfrom; | |
752 | svsk->sk_sendto = svc_udp_sendto; | |
753 | ||
754 | /* initialise setting must have enough space to | |
755 | * receive and respond to one request. | |
756 | * svc_udp_recvfrom will re-adjust if necessary | |
757 | */ | |
758 | svc_sock_setbufsize(svsk->sk_sock, | |
c6b0a9f8 N |
759 | 3 * svsk->sk_server->sv_max_mesg, |
760 | 3 * svsk->sk_server->sv_max_mesg); | |
1da177e4 LT |
761 | |
762 | set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ | |
763 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
764 | } | |
765 | ||
766 | /* | |
767 | * A data_ready event on a listening socket means there's a connection | |
768 | * pending. Do not use state_change as a substitute for it. | |
769 | */ | |
770 | static void | |
771 | svc_tcp_listen_data_ready(struct sock *sk, int count_unused) | |
772 | { | |
939bb7ef | 773 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 LT |
774 | |
775 | dprintk("svc: socket %p TCP (listen) state change %d\n", | |
939bb7ef | 776 | sk, sk->sk_state); |
1da177e4 | 777 | |
939bb7ef NB |
778 | /* |
779 | * This callback may called twice when a new connection | |
780 | * is established as a child socket inherits everything | |
781 | * from a parent LISTEN socket. | |
782 | * 1) data_ready method of the parent socket will be called | |
783 | * when one of child sockets become ESTABLISHED. | |
784 | * 2) data_ready method of the child socket may be called | |
785 | * when it receives data before the socket is accepted. | |
786 | * In case of 2, we should ignore it silently. | |
787 | */ | |
788 | if (sk->sk_state == TCP_LISTEN) { | |
789 | if (svsk) { | |
790 | set_bit(SK_CONN, &svsk->sk_flags); | |
791 | svc_sock_enqueue(svsk); | |
792 | } else | |
793 | printk("svc: socket %p: no user data\n", sk); | |
1da177e4 | 794 | } |
939bb7ef | 795 | |
1da177e4 LT |
796 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
797 | wake_up_interruptible_all(sk->sk_sleep); | |
798 | } | |
799 | ||
800 | /* | |
801 | * A state change on a connected socket means it's dying or dead. | |
802 | */ | |
803 | static void | |
804 | svc_tcp_state_change(struct sock *sk) | |
805 | { | |
939bb7ef | 806 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 LT |
807 | |
808 | dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", | |
939bb7ef | 809 | sk, sk->sk_state, sk->sk_user_data); |
1da177e4 | 810 | |
939bb7ef | 811 | if (!svsk) |
1da177e4 | 812 | printk("svc: socket %p: no user data\n", sk); |
939bb7ef NB |
813 | else { |
814 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
815 | svc_sock_enqueue(svsk); | |
1da177e4 | 816 | } |
1da177e4 LT |
817 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
818 | wake_up_interruptible_all(sk->sk_sleep); | |
819 | } | |
820 | ||
821 | static void | |
822 | svc_tcp_data_ready(struct sock *sk, int count) | |
823 | { | |
939bb7ef | 824 | struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; |
1da177e4 LT |
825 | |
826 | dprintk("svc: socket %p TCP data ready (svsk %p)\n", | |
939bb7ef NB |
827 | sk, sk->sk_user_data); |
828 | if (svsk) { | |
829 | set_bit(SK_DATA, &svsk->sk_flags); | |
830 | svc_sock_enqueue(svsk); | |
831 | } | |
1da177e4 LT |
832 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) |
833 | wake_up_interruptible(sk->sk_sleep); | |
834 | } | |
835 | ||
836 | /* | |
837 | * Accept a TCP connection | |
838 | */ | |
839 | static void | |
840 | svc_tcp_accept(struct svc_sock *svsk) | |
841 | { | |
842 | struct sockaddr_in sin; | |
843 | struct svc_serv *serv = svsk->sk_server; | |
844 | struct socket *sock = svsk->sk_sock; | |
845 | struct socket *newsock; | |
1da177e4 LT |
846 | struct svc_sock *newsvsk; |
847 | int err, slen; | |
848 | ||
849 | dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); | |
850 | if (!sock) | |
851 | return; | |
852 | ||
e6242e92 SS |
853 | clear_bit(SK_CONN, &svsk->sk_flags); |
854 | err = kernel_accept(sock, &newsock, O_NONBLOCK); | |
855 | if (err < 0) { | |
1da177e4 LT |
856 | if (err == -ENOMEM) |
857 | printk(KERN_WARNING "%s: no more sockets!\n", | |
858 | serv->sv_name); | |
e6242e92 | 859 | else if (err != -EAGAIN && net_ratelimit()) |
1da177e4 LT |
860 | printk(KERN_WARNING "%s: accept failed (err %d)!\n", |
861 | serv->sv_name, -err); | |
e6242e92 | 862 | return; |
1da177e4 | 863 | } |
e6242e92 | 864 | |
1da177e4 LT |
865 | set_bit(SK_CONN, &svsk->sk_flags); |
866 | svc_sock_enqueue(svsk); | |
867 | ||
868 | slen = sizeof(sin); | |
e6242e92 | 869 | err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen); |
1da177e4 LT |
870 | if (err < 0) { |
871 | if (net_ratelimit()) | |
872 | printk(KERN_WARNING "%s: peername failed (err %d)!\n", | |
873 | serv->sv_name, -err); | |
874 | goto failed; /* aborted connection or whatever */ | |
875 | } | |
876 | ||
877 | /* Ideally, we would want to reject connections from unauthorized | |
878 | * hosts here, but when we get encription, the IP of the host won't | |
879 | * tell us anything. For now just warn about unpriv connections. | |
880 | */ | |
881 | if (ntohs(sin.sin_port) >= 1024) { | |
882 | dprintk(KERN_WARNING | |
883 | "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", | |
884 | serv->sv_name, | |
885 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | |
886 | } | |
887 | ||
888 | dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name, | |
889 | NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); | |
890 | ||
891 | /* make sure that a write doesn't block forever when | |
892 | * low on memory | |
893 | */ | |
894 | newsock->sk->sk_sndtimeo = HZ*30; | |
895 | ||
896 | if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0))) | |
897 | goto failed; | |
898 | ||
899 | ||
900 | /* make sure that we don't have too many active connections. | |
901 | * If we have, something must be dropped. | |
902 | * | |
903 | * There's no point in trying to do random drop here for | |
904 | * DoS prevention. The NFS clients does 1 reconnect in 15 | |
905 | * seconds. An attacker can easily beat that. | |
906 | * | |
907 | * The only somewhat efficient mechanism would be if drop | |
908 | * old connections from the same IP first. But right now | |
909 | * we don't even record the client IP in svc_sock. | |
910 | */ | |
911 | if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { | |
912 | struct svc_sock *svsk = NULL; | |
913 | spin_lock_bh(&serv->sv_lock); | |
914 | if (!list_empty(&serv->sv_tempsocks)) { | |
915 | if (net_ratelimit()) { | |
916 | /* Try to help the admin */ | |
917 | printk(KERN_NOTICE "%s: too many open TCP " | |
918 | "sockets, consider increasing the " | |
919 | "number of nfsd threads\n", | |
920 | serv->sv_name); | |
921 | printk(KERN_NOTICE "%s: last TCP connect from " | |
922 | "%u.%u.%u.%u:%d\n", | |
923 | serv->sv_name, | |
924 | NIPQUAD(sin.sin_addr.s_addr), | |
925 | ntohs(sin.sin_port)); | |
926 | } | |
927 | /* | |
928 | * Always select the oldest socket. It's not fair, | |
929 | * but so is life | |
930 | */ | |
931 | svsk = list_entry(serv->sv_tempsocks.prev, | |
932 | struct svc_sock, | |
933 | sk_list); | |
934 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
c45c357d | 935 | atomic_inc(&svsk->sk_inuse); |
1da177e4 LT |
936 | } |
937 | spin_unlock_bh(&serv->sv_lock); | |
938 | ||
939 | if (svsk) { | |
940 | svc_sock_enqueue(svsk); | |
941 | svc_sock_put(svsk); | |
942 | } | |
943 | ||
944 | } | |
945 | ||
946 | if (serv->sv_stats) | |
947 | serv->sv_stats->nettcpconn++; | |
948 | ||
949 | return; | |
950 | ||
951 | failed: | |
952 | sock_release(newsock); | |
953 | return; | |
954 | } | |
955 | ||
956 | /* | |
957 | * Receive data from a TCP socket. | |
958 | */ | |
959 | static int | |
960 | svc_tcp_recvfrom(struct svc_rqst *rqstp) | |
961 | { | |
962 | struct svc_sock *svsk = rqstp->rq_sock; | |
963 | struct svc_serv *serv = svsk->sk_server; | |
964 | int len; | |
3cc03b16 | 965 | struct kvec *vec; |
1da177e4 LT |
966 | int pnum, vlen; |
967 | ||
968 | dprintk("svc: tcp_recv %p data %d conn %d close %d\n", | |
969 | svsk, test_bit(SK_DATA, &svsk->sk_flags), | |
970 | test_bit(SK_CONN, &svsk->sk_flags), | |
971 | test_bit(SK_CLOSE, &svsk->sk_flags)); | |
972 | ||
973 | if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { | |
974 | svc_sock_received(svsk); | |
975 | return svc_deferred_recv(rqstp); | |
976 | } | |
977 | ||
978 | if (test_bit(SK_CLOSE, &svsk->sk_flags)) { | |
979 | svc_delete_socket(svsk); | |
980 | return 0; | |
981 | } | |
982 | ||
1a047060 | 983 | if (svsk->sk_sk->sk_state == TCP_LISTEN) { |
1da177e4 LT |
984 | svc_tcp_accept(svsk); |
985 | svc_sock_received(svsk); | |
986 | return 0; | |
987 | } | |
988 | ||
989 | if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) | |
990 | /* sndbuf needs to have room for one request | |
991 | * per thread, otherwise we can stall even when the | |
992 | * network isn't a bottleneck. | |
3262c816 GB |
993 | * |
994 | * We count all threads rather than threads in a | |
995 | * particular pool, which provides an upper bound | |
996 | * on the number of threads which will access the socket. | |
997 | * | |
1da177e4 LT |
998 | * rcvbuf just needs to be able to hold a few requests. |
999 | * Normally they will be removed from the queue | |
1000 | * as soon a a complete request arrives. | |
1001 | */ | |
1002 | svc_sock_setbufsize(svsk->sk_sock, | |
c6b0a9f8 N |
1003 | (serv->sv_nrthreads+3) * serv->sv_max_mesg, |
1004 | 3 * serv->sv_max_mesg); | |
1da177e4 LT |
1005 | |
1006 | clear_bit(SK_DATA, &svsk->sk_flags); | |
1007 | ||
1008 | /* Receive data. If we haven't got the record length yet, get | |
1009 | * the next four bytes. Otherwise try to gobble up as much as | |
1010 | * possible up to the complete record length. | |
1011 | */ | |
1012 | if (svsk->sk_tcplen < 4) { | |
1013 | unsigned long want = 4 - svsk->sk_tcplen; | |
1014 | struct kvec iov; | |
1015 | ||
1016 | iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; | |
1017 | iov.iov_len = want; | |
1018 | if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) | |
1019 | goto error; | |
1020 | svsk->sk_tcplen += len; | |
1021 | ||
1022 | if (len < want) { | |
1023 | dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", | |
1024 | len, want); | |
1025 | svc_sock_received(svsk); | |
1026 | return -EAGAIN; /* record header not complete */ | |
1027 | } | |
1028 | ||
1029 | svsk->sk_reclen = ntohl(svsk->sk_reclen); | |
1030 | if (!(svsk->sk_reclen & 0x80000000)) { | |
1031 | /* FIXME: technically, a record can be fragmented, | |
1032 | * and non-terminal fragments will not have the top | |
1033 | * bit set in the fragment length header. | |
1034 | * But apparently no known nfs clients send fragmented | |
1035 | * records. */ | |
1036 | printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n", | |
1037 | (unsigned long) svsk->sk_reclen); | |
1038 | goto err_delete; | |
1039 | } | |
1040 | svsk->sk_reclen &= 0x7fffffff; | |
1041 | dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); | |
c6b0a9f8 | 1042 | if (svsk->sk_reclen > serv->sv_max_mesg) { |
1da177e4 LT |
1043 | printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n", |
1044 | (unsigned long) svsk->sk_reclen); | |
1045 | goto err_delete; | |
1046 | } | |
1047 | } | |
1048 | ||
1049 | /* Check whether enough data is available */ | |
1050 | len = svc_recv_available(svsk); | |
1051 | if (len < 0) | |
1052 | goto error; | |
1053 | ||
1054 | if (len < svsk->sk_reclen) { | |
1055 | dprintk("svc: incomplete TCP record (%d of %d)\n", | |
1056 | len, svsk->sk_reclen); | |
1057 | svc_sock_received(svsk); | |
1058 | return -EAGAIN; /* record not complete */ | |
1059 | } | |
1060 | len = svsk->sk_reclen; | |
1061 | set_bit(SK_DATA, &svsk->sk_flags); | |
1062 | ||
3cc03b16 | 1063 | vec = rqstp->rq_vec; |
1da177e4 LT |
1064 | vec[0] = rqstp->rq_arg.head[0]; |
1065 | vlen = PAGE_SIZE; | |
1066 | pnum = 1; | |
1067 | while (vlen < len) { | |
44524359 | 1068 | vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); |
1da177e4 LT |
1069 | vec[pnum].iov_len = PAGE_SIZE; |
1070 | pnum++; | |
1071 | vlen += PAGE_SIZE; | |
1072 | } | |
44524359 | 1073 | rqstp->rq_respages = &rqstp->rq_pages[pnum]; |
1da177e4 LT |
1074 | |
1075 | /* Now receive data */ | |
1076 | len = svc_recvfrom(rqstp, vec, pnum, len); | |
1077 | if (len < 0) | |
1078 | goto error; | |
1079 | ||
1080 | dprintk("svc: TCP complete record (%d bytes)\n", len); | |
1081 | rqstp->rq_arg.len = len; | |
1082 | rqstp->rq_arg.page_base = 0; | |
1083 | if (len <= rqstp->rq_arg.head[0].iov_len) { | |
1084 | rqstp->rq_arg.head[0].iov_len = len; | |
1085 | rqstp->rq_arg.page_len = 0; | |
1086 | } else { | |
1087 | rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; | |
1088 | } | |
1089 | ||
1090 | rqstp->rq_skbuff = NULL; | |
1091 | rqstp->rq_prot = IPPROTO_TCP; | |
1092 | ||
1093 | /* Reset TCP read info */ | |
1094 | svsk->sk_reclen = 0; | |
1095 | svsk->sk_tcplen = 0; | |
1096 | ||
1097 | svc_sock_received(svsk); | |
1098 | if (serv->sv_stats) | |
1099 | serv->sv_stats->nettcpcnt++; | |
1100 | ||
1101 | return len; | |
1102 | ||
1103 | err_delete: | |
1104 | svc_delete_socket(svsk); | |
1105 | return -EAGAIN; | |
1106 | ||
1107 | error: | |
1108 | if (len == -EAGAIN) { | |
1109 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | |
1110 | svc_sock_received(svsk); | |
1111 | } else { | |
1112 | printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", | |
1113 | svsk->sk_server->sv_name, -len); | |
93fbf1a5 | 1114 | goto err_delete; |
1da177e4 LT |
1115 | } |
1116 | ||
1117 | return len; | |
1118 | } | |
1119 | ||
1120 | /* | |
1121 | * Send out data on TCP socket. | |
1122 | */ | |
1123 | static int | |
1124 | svc_tcp_sendto(struct svc_rqst *rqstp) | |
1125 | { | |
1126 | struct xdr_buf *xbufp = &rqstp->rq_res; | |
1127 | int sent; | |
d8ed029d | 1128 | __be32 reclen; |
1da177e4 LT |
1129 | |
1130 | /* Set up the first element of the reply kvec. | |
1131 | * Any other kvecs that may be in use have been taken | |
1132 | * care of by the server implementation itself. | |
1133 | */ | |
1134 | reclen = htonl(0x80000000|((xbufp->len ) - 4)); | |
1135 | memcpy(xbufp->head[0].iov_base, &reclen, 4); | |
1136 | ||
1137 | if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) | |
1138 | return -ENOTCONN; | |
1139 | ||
1140 | sent = svc_sendto(rqstp, &rqstp->rq_res); | |
1141 | if (sent != xbufp->len) { | |
1142 | printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", | |
1143 | rqstp->rq_sock->sk_server->sv_name, | |
1144 | (sent<0)?"got error":"sent only", | |
1145 | sent, xbufp->len); | |
1146 | svc_delete_socket(rqstp->rq_sock); | |
1147 | sent = -EAGAIN; | |
1148 | } | |
1149 | return sent; | |
1150 | } | |
1151 | ||
1152 | static void | |
1153 | svc_tcp_init(struct svc_sock *svsk) | |
1154 | { | |
1155 | struct sock *sk = svsk->sk_sk; | |
1156 | struct tcp_sock *tp = tcp_sk(sk); | |
1157 | ||
1158 | svsk->sk_recvfrom = svc_tcp_recvfrom; | |
1159 | svsk->sk_sendto = svc_tcp_sendto; | |
1160 | ||
1161 | if (sk->sk_state == TCP_LISTEN) { | |
1162 | dprintk("setting up TCP socket for listening\n"); | |
1163 | sk->sk_data_ready = svc_tcp_listen_data_ready; | |
1164 | set_bit(SK_CONN, &svsk->sk_flags); | |
1165 | } else { | |
1166 | dprintk("setting up TCP socket for reading\n"); | |
1167 | sk->sk_state_change = svc_tcp_state_change; | |
1168 | sk->sk_data_ready = svc_tcp_data_ready; | |
1169 | sk->sk_write_space = svc_write_space; | |
1170 | ||
1171 | svsk->sk_reclen = 0; | |
1172 | svsk->sk_tcplen = 0; | |
1173 | ||
1174 | tp->nonagle = 1; /* disable Nagle's algorithm */ | |
1175 | ||
1176 | /* initialise setting must have enough space to | |
1177 | * receive and respond to one request. | |
1178 | * svc_tcp_recvfrom will re-adjust if necessary | |
1179 | */ | |
1180 | svc_sock_setbufsize(svsk->sk_sock, | |
c6b0a9f8 N |
1181 | 3 * svsk->sk_server->sv_max_mesg, |
1182 | 3 * svsk->sk_server->sv_max_mesg); | |
1da177e4 LT |
1183 | |
1184 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
1185 | set_bit(SK_DATA, &svsk->sk_flags); | |
1186 | if (sk->sk_state != TCP_ESTABLISHED) | |
1187 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
1188 | } | |
1189 | } | |
1190 | ||
1191 | void | |
1192 | svc_sock_update_bufs(struct svc_serv *serv) | |
1193 | { | |
1194 | /* | |
1195 | * The number of server threads has changed. Update | |
1196 | * rcvbuf and sndbuf accordingly on all sockets | |
1197 | */ | |
1198 | struct list_head *le; | |
1199 | ||
1200 | spin_lock_bh(&serv->sv_lock); | |
1201 | list_for_each(le, &serv->sv_permsocks) { | |
1202 | struct svc_sock *svsk = | |
1203 | list_entry(le, struct svc_sock, sk_list); | |
1204 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
1205 | } | |
1206 | list_for_each(le, &serv->sv_tempsocks) { | |
1207 | struct svc_sock *svsk = | |
1208 | list_entry(le, struct svc_sock, sk_list); | |
1209 | set_bit(SK_CHNGBUF, &svsk->sk_flags); | |
1210 | } | |
1211 | spin_unlock_bh(&serv->sv_lock); | |
1212 | } | |
1213 | ||
1214 | /* | |
3262c816 GB |
1215 | * Receive the next request on any socket. This code is carefully |
1216 | * organised not to touch any cachelines in the shared svc_serv | |
1217 | * structure, only cachelines in the local svc_pool. | |
1da177e4 LT |
1218 | */ |
1219 | int | |
6fb2b47f | 1220 | svc_recv(struct svc_rqst *rqstp, long timeout) |
1da177e4 LT |
1221 | { |
1222 | struct svc_sock *svsk =NULL; | |
6fb2b47f | 1223 | struct svc_serv *serv = rqstp->rq_server; |
3262c816 | 1224 | struct svc_pool *pool = rqstp->rq_pool; |
44524359 | 1225 | int len, i; |
1da177e4 LT |
1226 | int pages; |
1227 | struct xdr_buf *arg; | |
1228 | DECLARE_WAITQUEUE(wait, current); | |
1229 | ||
1230 | dprintk("svc: server %p waiting for data (to = %ld)\n", | |
1231 | rqstp, timeout); | |
1232 | ||
1233 | if (rqstp->rq_sock) | |
1234 | printk(KERN_ERR | |
1235 | "svc_recv: service %p, socket not NULL!\n", | |
1236 | rqstp); | |
1237 | if (waitqueue_active(&rqstp->rq_wait)) | |
1238 | printk(KERN_ERR | |
1239 | "svc_recv: service %p, wait queue active!\n", | |
1240 | rqstp); | |
1241 | ||
1da177e4 LT |
1242 | |
1243 | /* now allocate needed pages. If we get a failure, sleep briefly */ | |
c6b0a9f8 | 1244 | pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; |
44524359 N |
1245 | for (i=0; i < pages ; i++) |
1246 | while (rqstp->rq_pages[i] == NULL) { | |
1247 | struct page *p = alloc_page(GFP_KERNEL); | |
1248 | if (!p) | |
1249 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); | |
1250 | rqstp->rq_pages[i] = p; | |
1da177e4 | 1251 | } |
1da177e4 LT |
1252 | |
1253 | /* Make arg->head point to first page and arg->pages point to rest */ | |
1254 | arg = &rqstp->rq_arg; | |
44524359 | 1255 | arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); |
1da177e4 | 1256 | arg->head[0].iov_len = PAGE_SIZE; |
44524359 | 1257 | arg->pages = rqstp->rq_pages + 1; |
1da177e4 LT |
1258 | arg->page_base = 0; |
1259 | /* save at least one page for response */ | |
1260 | arg->page_len = (pages-2)*PAGE_SIZE; | |
1261 | arg->len = (pages-1)*PAGE_SIZE; | |
1262 | arg->tail[0].iov_len = 0; | |
3e1d1d28 CL |
1263 | |
1264 | try_to_freeze(); | |
1887b935 | 1265 | cond_resched(); |
1da177e4 LT |
1266 | if (signalled()) |
1267 | return -EINTR; | |
1268 | ||
3262c816 GB |
1269 | spin_lock_bh(&pool->sp_lock); |
1270 | if ((svsk = svc_sock_dequeue(pool)) != NULL) { | |
1da177e4 | 1271 | rqstp->rq_sock = svsk; |
c45c357d | 1272 | atomic_inc(&svsk->sk_inuse); |
c6b0a9f8 | 1273 | rqstp->rq_reserved = serv->sv_max_mesg; |
5685f0fa | 1274 | atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); |
1da177e4 LT |
1275 | } else { |
1276 | /* No data pending. Go to sleep */ | |
3262c816 | 1277 | svc_thread_enqueue(pool, rqstp); |
1da177e4 LT |
1278 | |
1279 | /* | |
1280 | * We have to be able to interrupt this wait | |
1281 | * to bring down the daemons ... | |
1282 | */ | |
1283 | set_current_state(TASK_INTERRUPTIBLE); | |
1284 | add_wait_queue(&rqstp->rq_wait, &wait); | |
3262c816 | 1285 | spin_unlock_bh(&pool->sp_lock); |
1da177e4 LT |
1286 | |
1287 | schedule_timeout(timeout); | |
1288 | ||
3e1d1d28 | 1289 | try_to_freeze(); |
1da177e4 | 1290 | |
3262c816 | 1291 | spin_lock_bh(&pool->sp_lock); |
1da177e4 LT |
1292 | remove_wait_queue(&rqstp->rq_wait, &wait); |
1293 | ||
1294 | if (!(svsk = rqstp->rq_sock)) { | |
3262c816 GB |
1295 | svc_thread_dequeue(pool, rqstp); |
1296 | spin_unlock_bh(&pool->sp_lock); | |
1da177e4 LT |
1297 | dprintk("svc: server %p, no data yet\n", rqstp); |
1298 | return signalled()? -EINTR : -EAGAIN; | |
1299 | } | |
1300 | } | |
3262c816 | 1301 | spin_unlock_bh(&pool->sp_lock); |
1da177e4 | 1302 | |
3262c816 GB |
1303 | dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", |
1304 | rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse)); | |
1da177e4 LT |
1305 | len = svsk->sk_recvfrom(rqstp); |
1306 | dprintk("svc: got len=%d\n", len); | |
1307 | ||
1308 | /* No data, incomplete (TCP) read, or accept() */ | |
1309 | if (len == 0 || len == -EAGAIN) { | |
1310 | rqstp->rq_res.len = 0; | |
1311 | svc_sock_release(rqstp); | |
1312 | return -EAGAIN; | |
1313 | } | |
1314 | svsk->sk_lastrecv = get_seconds(); | |
36bdfc8b | 1315 | clear_bit(SK_OLD, &svsk->sk_flags); |
1da177e4 LT |
1316 | |
1317 | rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024; | |
1318 | rqstp->rq_chandle.defer = svc_defer; | |
1319 | ||
1320 | if (serv->sv_stats) | |
1321 | serv->sv_stats->netcnt++; | |
1322 | return len; | |
1323 | } | |
1324 | ||
1325 | /* | |
1326 | * Drop request | |
1327 | */ | |
1328 | void | |
1329 | svc_drop(struct svc_rqst *rqstp) | |
1330 | { | |
1331 | dprintk("svc: socket %p dropped request\n", rqstp->rq_sock); | |
1332 | svc_sock_release(rqstp); | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * Return reply to client. | |
1337 | */ | |
1338 | int | |
1339 | svc_send(struct svc_rqst *rqstp) | |
1340 | { | |
1341 | struct svc_sock *svsk; | |
1342 | int len; | |
1343 | struct xdr_buf *xb; | |
1344 | ||
1345 | if ((svsk = rqstp->rq_sock) == NULL) { | |
1346 | printk(KERN_WARNING "NULL socket pointer in %s:%d\n", | |
1347 | __FILE__, __LINE__); | |
1348 | return -EFAULT; | |
1349 | } | |
1350 | ||
1351 | /* release the receive skb before sending the reply */ | |
1352 | svc_release_skb(rqstp); | |
1353 | ||
1354 | /* calculate over-all length */ | |
1355 | xb = & rqstp->rq_res; | |
1356 | xb->len = xb->head[0].iov_len + | |
1357 | xb->page_len + | |
1358 | xb->tail[0].iov_len; | |
1359 | ||
57b47a53 IM |
1360 | /* Grab svsk->sk_mutex to serialize outgoing data. */ |
1361 | mutex_lock(&svsk->sk_mutex); | |
1da177e4 LT |
1362 | if (test_bit(SK_DEAD, &svsk->sk_flags)) |
1363 | len = -ENOTCONN; | |
1364 | else | |
1365 | len = svsk->sk_sendto(rqstp); | |
57b47a53 | 1366 | mutex_unlock(&svsk->sk_mutex); |
1da177e4 LT |
1367 | svc_sock_release(rqstp); |
1368 | ||
1369 | if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) | |
1370 | return 0; | |
1371 | return len; | |
1372 | } | |
1373 | ||
36bdfc8b GB |
1374 | /* |
1375 | * Timer function to close old temporary sockets, using | |
1376 | * a mark-and-sweep algorithm. | |
1377 | */ | |
1378 | static void | |
1379 | svc_age_temp_sockets(unsigned long closure) | |
1380 | { | |
1381 | struct svc_serv *serv = (struct svc_serv *)closure; | |
1382 | struct svc_sock *svsk; | |
1383 | struct list_head *le, *next; | |
1384 | LIST_HEAD(to_be_aged); | |
1385 | ||
1386 | dprintk("svc_age_temp_sockets\n"); | |
1387 | ||
1388 | if (!spin_trylock_bh(&serv->sv_lock)) { | |
1389 | /* busy, try again 1 sec later */ | |
1390 | dprintk("svc_age_temp_sockets: busy\n"); | |
1391 | mod_timer(&serv->sv_temptimer, jiffies + HZ); | |
1392 | return; | |
1393 | } | |
1394 | ||
1395 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | |
1396 | svsk = list_entry(le, struct svc_sock, sk_list); | |
1397 | ||
1398 | if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) | |
1399 | continue; | |
c45c357d | 1400 | if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags)) |
36bdfc8b | 1401 | continue; |
c45c357d | 1402 | atomic_inc(&svsk->sk_inuse); |
36bdfc8b GB |
1403 | list_move(le, &to_be_aged); |
1404 | set_bit(SK_CLOSE, &svsk->sk_flags); | |
1405 | set_bit(SK_DETACHED, &svsk->sk_flags); | |
1406 | } | |
1407 | spin_unlock_bh(&serv->sv_lock); | |
1408 | ||
1409 | while (!list_empty(&to_be_aged)) { | |
1410 | le = to_be_aged.next; | |
1411 | /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */ | |
1412 | list_del_init(le); | |
1413 | svsk = list_entry(le, struct svc_sock, sk_list); | |
1414 | ||
1415 | dprintk("queuing svsk %p for closing, %lu seconds old\n", | |
1416 | svsk, get_seconds() - svsk->sk_lastrecv); | |
1417 | ||
1418 | /* a thread will dequeue and close it soon */ | |
1419 | svc_sock_enqueue(svsk); | |
1420 | svc_sock_put(svsk); | |
1421 | } | |
1422 | ||
1423 | mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); | |
1424 | } | |
1425 | ||
1da177e4 LT |
1426 | /* |
1427 | * Initialize socket for RPC use and create svc_sock struct | |
1428 | * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. | |
1429 | */ | |
1430 | static struct svc_sock * | |
1431 | svc_setup_socket(struct svc_serv *serv, struct socket *sock, | |
1432 | int *errp, int pmap_register) | |
1433 | { | |
1434 | struct svc_sock *svsk; | |
1435 | struct sock *inet; | |
1436 | ||
1437 | dprintk("svc: svc_setup_socket %p\n", sock); | |
0da974f4 | 1438 | if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { |
1da177e4 LT |
1439 | *errp = -ENOMEM; |
1440 | return NULL; | |
1441 | } | |
1da177e4 LT |
1442 | |
1443 | inet = sock->sk; | |
1444 | ||
1445 | /* Register socket with portmapper */ | |
1446 | if (*errp >= 0 && pmap_register) | |
1447 | *errp = svc_register(serv, inet->sk_protocol, | |
1448 | ntohs(inet_sk(inet)->sport)); | |
1449 | ||
1450 | if (*errp < 0) { | |
1451 | kfree(svsk); | |
1452 | return NULL; | |
1453 | } | |
1454 | ||
1455 | set_bit(SK_BUSY, &svsk->sk_flags); | |
1456 | inet->sk_user_data = svsk; | |
1457 | svsk->sk_sock = sock; | |
1458 | svsk->sk_sk = inet; | |
1459 | svsk->sk_ostate = inet->sk_state_change; | |
1460 | svsk->sk_odata = inet->sk_data_ready; | |
1461 | svsk->sk_owspace = inet->sk_write_space; | |
1462 | svsk->sk_server = serv; | |
c45c357d | 1463 | atomic_set(&svsk->sk_inuse, 0); |
1da177e4 | 1464 | svsk->sk_lastrecv = get_seconds(); |
1a68d952 | 1465 | spin_lock_init(&svsk->sk_defer_lock); |
1da177e4 LT |
1466 | INIT_LIST_HEAD(&svsk->sk_deferred); |
1467 | INIT_LIST_HEAD(&svsk->sk_ready); | |
57b47a53 | 1468 | mutex_init(&svsk->sk_mutex); |
1da177e4 LT |
1469 | |
1470 | /* Initialize the socket */ | |
1471 | if (sock->type == SOCK_DGRAM) | |
1472 | svc_udp_init(svsk); | |
1473 | else | |
1474 | svc_tcp_init(svsk); | |
1475 | ||
1476 | spin_lock_bh(&serv->sv_lock); | |
1477 | if (!pmap_register) { | |
1478 | set_bit(SK_TEMP, &svsk->sk_flags); | |
1479 | list_add(&svsk->sk_list, &serv->sv_tempsocks); | |
1480 | serv->sv_tmpcnt++; | |
36bdfc8b GB |
1481 | if (serv->sv_temptimer.function == NULL) { |
1482 | /* setup timer to age temp sockets */ | |
1483 | setup_timer(&serv->sv_temptimer, svc_age_temp_sockets, | |
1484 | (unsigned long)serv); | |
1485 | mod_timer(&serv->sv_temptimer, | |
1486 | jiffies + svc_conn_age_period * HZ); | |
1487 | } | |
1da177e4 LT |
1488 | } else { |
1489 | clear_bit(SK_TEMP, &svsk->sk_flags); | |
1490 | list_add(&svsk->sk_list, &serv->sv_permsocks); | |
1491 | } | |
1492 | spin_unlock_bh(&serv->sv_lock); | |
1493 | ||
1494 | dprintk("svc: svc_setup_socket created %p (inet %p)\n", | |
1495 | svsk, svsk->sk_sk); | |
1496 | ||
1497 | clear_bit(SK_BUSY, &svsk->sk_flags); | |
1498 | svc_sock_enqueue(svsk); | |
1499 | return svsk; | |
1500 | } | |
1501 | ||
b41b66d6 N |
1502 | int svc_addsock(struct svc_serv *serv, |
1503 | int fd, | |
1504 | char *name_return, | |
1505 | int *proto) | |
1506 | { | |
1507 | int err = 0; | |
1508 | struct socket *so = sockfd_lookup(fd, &err); | |
1509 | struct svc_sock *svsk = NULL; | |
1510 | ||
1511 | if (!so) | |
1512 | return err; | |
1513 | if (so->sk->sk_family != AF_INET) | |
1514 | err = -EAFNOSUPPORT; | |
1515 | else if (so->sk->sk_protocol != IPPROTO_TCP && | |
1516 | so->sk->sk_protocol != IPPROTO_UDP) | |
1517 | err = -EPROTONOSUPPORT; | |
1518 | else if (so->state > SS_UNCONNECTED) | |
1519 | err = -EISCONN; | |
1520 | else { | |
1521 | svsk = svc_setup_socket(serv, so, &err, 1); | |
1522 | if (svsk) | |
1523 | err = 0; | |
1524 | } | |
1525 | if (err) { | |
1526 | sockfd_put(so); | |
1527 | return err; | |
1528 | } | |
1529 | if (proto) *proto = so->sk->sk_protocol; | |
1530 | return one_sock_name(name_return, svsk); | |
1531 | } | |
1532 | EXPORT_SYMBOL_GPL(svc_addsock); | |
1533 | ||
1da177e4 LT |
1534 | /* |
1535 | * Create socket for RPC service. | |
1536 | */ | |
1537 | static int | |
1538 | svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) | |
1539 | { | |
1540 | struct svc_sock *svsk; | |
1541 | struct socket *sock; | |
1542 | int error; | |
1543 | int type; | |
1544 | ||
1545 | dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n", | |
1546 | serv->sv_program->pg_name, protocol, | |
1547 | NIPQUAD(sin->sin_addr.s_addr), | |
1548 | ntohs(sin->sin_port)); | |
1549 | ||
1550 | if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { | |
1551 | printk(KERN_WARNING "svc: only UDP and TCP " | |
1552 | "sockets supported\n"); | |
1553 | return -EINVAL; | |
1554 | } | |
1555 | type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; | |
1556 | ||
1557 | if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) | |
1558 | return error; | |
1559 | ||
18114746 ES |
1560 | if (type == SOCK_STREAM) |
1561 | sock->sk->sk_reuse = 1; /* allow address reuse */ | |
1562 | error = kernel_bind(sock, (struct sockaddr *) sin, | |
1563 | sizeof(*sin)); | |
1564 | if (error < 0) | |
1565 | goto bummer; | |
1da177e4 LT |
1566 | |
1567 | if (protocol == IPPROTO_TCP) { | |
e6242e92 | 1568 | if ((error = kernel_listen(sock, 64)) < 0) |
1da177e4 LT |
1569 | goto bummer; |
1570 | } | |
1571 | ||
1572 | if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL) | |
1573 | return 0; | |
1574 | ||
1575 | bummer: | |
1576 | dprintk("svc: svc_create_socket error = %d\n", -error); | |
1577 | sock_release(sock); | |
1578 | return error; | |
1579 | } | |
1580 | ||
1581 | /* | |
1582 | * Remove a dead socket | |
1583 | */ | |
1584 | void | |
1585 | svc_delete_socket(struct svc_sock *svsk) | |
1586 | { | |
1587 | struct svc_serv *serv; | |
1588 | struct sock *sk; | |
1589 | ||
1590 | dprintk("svc: svc_delete_socket(%p)\n", svsk); | |
1591 | ||
1592 | serv = svsk->sk_server; | |
1593 | sk = svsk->sk_sk; | |
1594 | ||
1595 | sk->sk_state_change = svsk->sk_ostate; | |
1596 | sk->sk_data_ready = svsk->sk_odata; | |
1597 | sk->sk_write_space = svsk->sk_owspace; | |
1598 | ||
1599 | spin_lock_bh(&serv->sv_lock); | |
1600 | ||
36bdfc8b GB |
1601 | if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) |
1602 | list_del_init(&svsk->sk_list); | |
3262c816 GB |
1603 | /* |
1604 | * We used to delete the svc_sock from whichever list | |
1605 | * it's sk_ready node was on, but we don't actually | |
1606 | * need to. This is because the only time we're called | |
1607 | * while still attached to a queue, the queue itself | |
1608 | * is about to be destroyed (in svc_destroy). | |
1609 | */ | |
1da177e4 LT |
1610 | if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) |
1611 | if (test_bit(SK_TEMP, &svsk->sk_flags)) | |
1612 | serv->sv_tmpcnt--; | |
1613 | ||
d6740df9 NB |
1614 | /* This atomic_inc should be needed - svc_delete_socket |
1615 | * should have the semantic of dropping a reference. | |
1616 | * But it doesn't yet.... | |
1617 | */ | |
1618 | atomic_inc(&svsk->sk_inuse); | |
1619 | spin_unlock_bh(&serv->sv_lock); | |
1620 | svc_sock_put(svsk); | |
1da177e4 LT |
1621 | } |
1622 | ||
1623 | /* | |
1624 | * Make a socket for nfsd and lockd | |
1625 | */ | |
1626 | int | |
1627 | svc_makesock(struct svc_serv *serv, int protocol, unsigned short port) | |
1628 | { | |
1629 | struct sockaddr_in sin; | |
1630 | ||
1631 | dprintk("svc: creating socket proto = %d\n", protocol); | |
1632 | sin.sin_family = AF_INET; | |
1633 | sin.sin_addr.s_addr = INADDR_ANY; | |
1634 | sin.sin_port = htons(port); | |
1635 | return svc_create_socket(serv, protocol, &sin); | |
1636 | } | |
1637 | ||
1638 | /* | |
1639 | * Handle defer and revisit of requests | |
1640 | */ | |
1641 | ||
1642 | static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |
1643 | { | |
1644 | struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); | |
1da177e4 LT |
1645 | struct svc_sock *svsk; |
1646 | ||
1647 | if (too_many) { | |
1648 | svc_sock_put(dr->svsk); | |
1649 | kfree(dr); | |
1650 | return; | |
1651 | } | |
1652 | dprintk("revisit queued\n"); | |
1653 | svsk = dr->svsk; | |
1654 | dr->svsk = NULL; | |
1a68d952 | 1655 | spin_lock_bh(&svsk->sk_defer_lock); |
1da177e4 | 1656 | list_add(&dr->handle.recent, &svsk->sk_deferred); |
1a68d952 | 1657 | spin_unlock_bh(&svsk->sk_defer_lock); |
1da177e4 LT |
1658 | set_bit(SK_DEFERRED, &svsk->sk_flags); |
1659 | svc_sock_enqueue(svsk); | |
1660 | svc_sock_put(svsk); | |
1661 | } | |
1662 | ||
1663 | static struct cache_deferred_req * | |
1664 | svc_defer(struct cache_req *req) | |
1665 | { | |
1666 | struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); | |
1667 | int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); | |
1668 | struct svc_deferred_req *dr; | |
1669 | ||
1670 | if (rqstp->rq_arg.page_len) | |
1671 | return NULL; /* if more than a page, give up FIXME */ | |
1672 | if (rqstp->rq_deferred) { | |
1673 | dr = rqstp->rq_deferred; | |
1674 | rqstp->rq_deferred = NULL; | |
1675 | } else { | |
1676 | int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; | |
1677 | /* FIXME maybe discard if size too large */ | |
1678 | dr = kmalloc(size, GFP_KERNEL); | |
1679 | if (dr == NULL) | |
1680 | return NULL; | |
1681 | ||
1682 | dr->handle.owner = rqstp->rq_server; | |
1683 | dr->prot = rqstp->rq_prot; | |
1684 | dr->addr = rqstp->rq_addr; | |
1918e341 | 1685 | dr->daddr = rqstp->rq_daddr; |
1da177e4 LT |
1686 | dr->argslen = rqstp->rq_arg.len >> 2; |
1687 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); | |
1688 | } | |
c45c357d | 1689 | atomic_inc(&rqstp->rq_sock->sk_inuse); |
1da177e4 | 1690 | dr->svsk = rqstp->rq_sock; |
1da177e4 LT |
1691 | |
1692 | dr->handle.revisit = svc_revisit; | |
1693 | return &dr->handle; | |
1694 | } | |
1695 | ||
1696 | /* | |
1697 | * recv data from a deferred request into an active one | |
1698 | */ | |
1699 | static int svc_deferred_recv(struct svc_rqst *rqstp) | |
1700 | { | |
1701 | struct svc_deferred_req *dr = rqstp->rq_deferred; | |
1702 | ||
1703 | rqstp->rq_arg.head[0].iov_base = dr->args; | |
1704 | rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; | |
1705 | rqstp->rq_arg.page_len = 0; | |
1706 | rqstp->rq_arg.len = dr->argslen<<2; | |
1707 | rqstp->rq_prot = dr->prot; | |
1708 | rqstp->rq_addr = dr->addr; | |
1918e341 | 1709 | rqstp->rq_daddr = dr->daddr; |
44524359 | 1710 | rqstp->rq_respages = rqstp->rq_pages; |
1da177e4 LT |
1711 | return dr->argslen<<2; |
1712 | } | |
1713 | ||
1714 | ||
1715 | static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) | |
1716 | { | |
1717 | struct svc_deferred_req *dr = NULL; | |
1da177e4 LT |
1718 | |
1719 | if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) | |
1720 | return NULL; | |
1a68d952 | 1721 | spin_lock_bh(&svsk->sk_defer_lock); |
1da177e4 LT |
1722 | clear_bit(SK_DEFERRED, &svsk->sk_flags); |
1723 | if (!list_empty(&svsk->sk_deferred)) { | |
1724 | dr = list_entry(svsk->sk_deferred.next, | |
1725 | struct svc_deferred_req, | |
1726 | handle.recent); | |
1727 | list_del_init(&dr->handle.recent); | |
1728 | set_bit(SK_DEFERRED, &svsk->sk_flags); | |
1729 | } | |
1a68d952 | 1730 | spin_unlock_bh(&svsk->sk_defer_lock); |
1da177e4 LT |
1731 | return dr; |
1732 | } |