]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/core/sock.c
Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[mirror_ubuntu-bionic-kernel.git] / net / core / sock.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92 #include <linux/capability.h>
93 #include <linux/errno.h>
94 #include <linux/types.h>
95 #include <linux/socket.h>
96 #include <linux/in.h>
97 #include <linux/kernel.h>
98 #include <linux/module.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/sched.h>
102 #include <linux/timer.h>
103 #include <linux/string.h>
104 #include <linux/sockios.h>
105 #include <linux/net.h>
106 #include <linux/mm.h>
107 #include <linux/slab.h>
108 #include <linux/interrupt.h>
109 #include <linux/poll.h>
110 #include <linux/tcp.h>
111 #include <linux/init.h>
112 #include <linux/highmem.h>
113 #include <linux/user_namespace.h>
114
115 #include <asm/uaccess.h>
116 #include <asm/system.h>
117
118 #include <linux/netdevice.h>
119 #include <net/protocol.h>
120 #include <linux/skbuff.h>
121 #include <net/net_namespace.h>
122 #include <net/request_sock.h>
123 #include <net/sock.h>
124 #include <linux/net_tstamp.h>
125 #include <net/xfrm.h>
126 #include <linux/ipsec.h>
127 #include <net/cls_cgroup.h>
128
129 #include <linux/filter.h>
130
131 #include <trace/events/sock.h>
132
133 #ifdef CONFIG_INET
134 #include <net/tcp.h>
135 #endif
136
137 /*
138 * Each address family might have different locking rules, so we have
139 * one slock key per address family:
140 */
141 static struct lock_class_key af_family_keys[AF_MAX];
142 static struct lock_class_key af_family_slock_keys[AF_MAX];
143
144 /*
145 * Make lock validator output more readable. (we pre-construct these
146 * strings build-time, so that runtime initialization of socket
147 * locks is fast):
148 */
149 static const char *const af_family_key_strings[AF_MAX+1] = {
150 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
151 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
152 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
153 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
154 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
155 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
156 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
157 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
158 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
159 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
160 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
161 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
162 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
163 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
164 };
165 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
166 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
167 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
168 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
169 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
170 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
171 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
172 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
173 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
174 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
175 "slock-27" , "slock-28" , "slock-AF_CAN" ,
176 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
177 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
178 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
179 "slock-AF_NFC" , "slock-AF_MAX"
180 };
181 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
182 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
183 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
184 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
185 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
186 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
187 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
188 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
189 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
190 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
191 "clock-27" , "clock-28" , "clock-AF_CAN" ,
192 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
193 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
194 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
195 "clock-AF_NFC" , "clock-AF_MAX"
196 };
197
198 /*
199 * sk_callback_lock locking rules are per-address-family,
200 * so split the lock classes by using a per-AF key:
201 */
202 static struct lock_class_key af_callback_keys[AF_MAX];
203
204 /* Take into consideration the size of the struct sk_buff overhead in the
205 * determination of these values, since that is non-constant across
206 * platforms. This makes socket queueing behavior and performance
207 * not depend upon such differences.
208 */
209 #define _SK_MEM_PACKETS 256
210 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
211 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
212 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
213
214 /* Run time adjustable parameters. */
215 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
216 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
217 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
218 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
219
220 /* Maximal space eaten by iovec or ancillary data plus some space */
221 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
222 EXPORT_SYMBOL(sysctl_optmem_max);
223
224 #if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP)
225 int net_cls_subsys_id = -1;
226 EXPORT_SYMBOL_GPL(net_cls_subsys_id);
227 #endif
228
229 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
230 {
231 struct timeval tv;
232
233 if (optlen < sizeof(tv))
234 return -EINVAL;
235 if (copy_from_user(&tv, optval, sizeof(tv)))
236 return -EFAULT;
237 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
238 return -EDOM;
239
240 if (tv.tv_sec < 0) {
241 static int warned __read_mostly;
242
243 *timeo_p = 0;
244 if (warned < 10 && net_ratelimit()) {
245 warned++;
246 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
247 "tries to set negative timeout\n",
248 current->comm, task_pid_nr(current));
249 }
250 return 0;
251 }
252 *timeo_p = MAX_SCHEDULE_TIMEOUT;
253 if (tv.tv_sec == 0 && tv.tv_usec == 0)
254 return 0;
255 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
256 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
257 return 0;
258 }
259
260 static void sock_warn_obsolete_bsdism(const char *name)
261 {
262 static int warned;
263 static char warncomm[TASK_COMM_LEN];
264 if (strcmp(warncomm, current->comm) && warned < 5) {
265 strcpy(warncomm, current->comm);
266 printk(KERN_WARNING "process `%s' is using obsolete "
267 "%s SO_BSDCOMPAT\n", warncomm, name);
268 warned++;
269 }
270 }
271
272 static void sock_disable_timestamp(struct sock *sk, int flag)
273 {
274 if (sock_flag(sk, flag)) {
275 sock_reset_flag(sk, flag);
276 if (!sock_flag(sk, SOCK_TIMESTAMP) &&
277 !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) {
278 net_disable_timestamp();
279 }
280 }
281 }
282
283
284 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
285 {
286 int err;
287 int skb_len;
288 unsigned long flags;
289 struct sk_buff_head *list = &sk->sk_receive_queue;
290
291 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
292 number of warnings when compiling with -W --ANK
293 */
294 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
295 (unsigned)sk->sk_rcvbuf) {
296 atomic_inc(&sk->sk_drops);
297 trace_sock_rcvqueue_full(sk, skb);
298 return -ENOMEM;
299 }
300
301 err = sk_filter(sk, skb);
302 if (err)
303 return err;
304
305 if (!sk_rmem_schedule(sk, skb->truesize)) {
306 atomic_inc(&sk->sk_drops);
307 return -ENOBUFS;
308 }
309
310 skb->dev = NULL;
311 skb_set_owner_r(skb, sk);
312
313 /* Cache the SKB length before we tack it onto the receive
314 * queue. Once it is added it no longer belongs to us and
315 * may be freed by other threads of control pulling packets
316 * from the queue.
317 */
318 skb_len = skb->len;
319
320 /* we escape from rcu protected region, make sure we dont leak
321 * a norefcounted dst
322 */
323 skb_dst_force(skb);
324
325 spin_lock_irqsave(&list->lock, flags);
326 skb->dropcount = atomic_read(&sk->sk_drops);
327 __skb_queue_tail(list, skb);
328 spin_unlock_irqrestore(&list->lock, flags);
329
330 if (!sock_flag(sk, SOCK_DEAD))
331 sk->sk_data_ready(sk, skb_len);
332 return 0;
333 }
334 EXPORT_SYMBOL(sock_queue_rcv_skb);
335
336 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
337 {
338 int rc = NET_RX_SUCCESS;
339
340 if (sk_filter(sk, skb))
341 goto discard_and_relse;
342
343 skb->dev = NULL;
344
345 if (sk_rcvqueues_full(sk, skb)) {
346 atomic_inc(&sk->sk_drops);
347 goto discard_and_relse;
348 }
349 if (nested)
350 bh_lock_sock_nested(sk);
351 else
352 bh_lock_sock(sk);
353 if (!sock_owned_by_user(sk)) {
354 /*
355 * trylock + unlock semantics:
356 */
357 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
358
359 rc = sk_backlog_rcv(sk, skb);
360
361 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
362 } else if (sk_add_backlog(sk, skb)) {
363 bh_unlock_sock(sk);
364 atomic_inc(&sk->sk_drops);
365 goto discard_and_relse;
366 }
367
368 bh_unlock_sock(sk);
369 out:
370 sock_put(sk);
371 return rc;
372 discard_and_relse:
373 kfree_skb(skb);
374 goto out;
375 }
376 EXPORT_SYMBOL(sk_receive_skb);
377
378 void sk_reset_txq(struct sock *sk)
379 {
380 sk_tx_queue_clear(sk);
381 }
382 EXPORT_SYMBOL(sk_reset_txq);
383
384 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
385 {
386 struct dst_entry *dst = __sk_dst_get(sk);
387
388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
389 sk_tx_queue_clear(sk);
390 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
391 dst_release(dst);
392 return NULL;
393 }
394
395 return dst;
396 }
397 EXPORT_SYMBOL(__sk_dst_check);
398
399 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
400 {
401 struct dst_entry *dst = sk_dst_get(sk);
402
403 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
404 sk_dst_reset(sk);
405 dst_release(dst);
406 return NULL;
407 }
408
409 return dst;
410 }
411 EXPORT_SYMBOL(sk_dst_check);
412
413 static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
414 {
415 int ret = -ENOPROTOOPT;
416 #ifdef CONFIG_NETDEVICES
417 struct net *net = sock_net(sk);
418 char devname[IFNAMSIZ];
419 int index;
420
421 /* Sorry... */
422 ret = -EPERM;
423 if (!capable(CAP_NET_RAW))
424 goto out;
425
426 ret = -EINVAL;
427 if (optlen < 0)
428 goto out;
429
430 /* Bind this socket to a particular device like "eth0",
431 * as specified in the passed interface name. If the
432 * name is "" or the option length is zero the socket
433 * is not bound.
434 */
435 if (optlen > IFNAMSIZ - 1)
436 optlen = IFNAMSIZ - 1;
437 memset(devname, 0, sizeof(devname));
438
439 ret = -EFAULT;
440 if (copy_from_user(devname, optval, optlen))
441 goto out;
442
443 index = 0;
444 if (devname[0] != '\0') {
445 struct net_device *dev;
446
447 rcu_read_lock();
448 dev = dev_get_by_name_rcu(net, devname);
449 if (dev)
450 index = dev->ifindex;
451 rcu_read_unlock();
452 ret = -ENODEV;
453 if (!dev)
454 goto out;
455 }
456
457 lock_sock(sk);
458 sk->sk_bound_dev_if = index;
459 sk_dst_reset(sk);
460 release_sock(sk);
461
462 ret = 0;
463
464 out:
465 #endif
466
467 return ret;
468 }
469
470 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
471 {
472 if (valbool)
473 sock_set_flag(sk, bit);
474 else
475 sock_reset_flag(sk, bit);
476 }
477
478 /*
479 * This is meant for all protocols to use and covers goings on
480 * at the socket level. Everything here is generic.
481 */
482
483 int sock_setsockopt(struct socket *sock, int level, int optname,
484 char __user *optval, unsigned int optlen)
485 {
486 struct sock *sk = sock->sk;
487 int val;
488 int valbool;
489 struct linger ling;
490 int ret = 0;
491
492 /*
493 * Options without arguments
494 */
495
496 if (optname == SO_BINDTODEVICE)
497 return sock_bindtodevice(sk, optval, optlen);
498
499 if (optlen < sizeof(int))
500 return -EINVAL;
501
502 if (get_user(val, (int __user *)optval))
503 return -EFAULT;
504
505 valbool = val ? 1 : 0;
506
507 lock_sock(sk);
508
509 switch (optname) {
510 case SO_DEBUG:
511 if (val && !capable(CAP_NET_ADMIN))
512 ret = -EACCES;
513 else
514 sock_valbool_flag(sk, SOCK_DBG, valbool);
515 break;
516 case SO_REUSEADDR:
517 sk->sk_reuse = valbool;
518 break;
519 case SO_TYPE:
520 case SO_PROTOCOL:
521 case SO_DOMAIN:
522 case SO_ERROR:
523 ret = -ENOPROTOOPT;
524 break;
525 case SO_DONTROUTE:
526 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
527 break;
528 case SO_BROADCAST:
529 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
530 break;
531 case SO_SNDBUF:
532 /* Don't error on this BSD doesn't and if you think
533 about it this is right. Otherwise apps have to
534 play 'guess the biggest size' games. RCVBUF/SNDBUF
535 are treated in BSD as hints */
536
537 if (val > sysctl_wmem_max)
538 val = sysctl_wmem_max;
539 set_sndbuf:
540 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
541 if ((val * 2) < SOCK_MIN_SNDBUF)
542 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
543 else
544 sk->sk_sndbuf = val * 2;
545
546 /*
547 * Wake up sending tasks if we
548 * upped the value.
549 */
550 sk->sk_write_space(sk);
551 break;
552
553 case SO_SNDBUFFORCE:
554 if (!capable(CAP_NET_ADMIN)) {
555 ret = -EPERM;
556 break;
557 }
558 goto set_sndbuf;
559
560 case SO_RCVBUF:
561 /* Don't error on this BSD doesn't and if you think
562 about it this is right. Otherwise apps have to
563 play 'guess the biggest size' games. RCVBUF/SNDBUF
564 are treated in BSD as hints */
565
566 if (val > sysctl_rmem_max)
567 val = sysctl_rmem_max;
568 set_rcvbuf:
569 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
570 /*
571 * We double it on the way in to account for
572 * "struct sk_buff" etc. overhead. Applications
573 * assume that the SO_RCVBUF setting they make will
574 * allow that much actual data to be received on that
575 * socket.
576 *
577 * Applications are unaware that "struct sk_buff" and
578 * other overheads allocate from the receive buffer
579 * during socket buffer allocation.
580 *
581 * And after considering the possible alternatives,
582 * returning the value we actually used in getsockopt
583 * is the most desirable behavior.
584 */
585 if ((val * 2) < SOCK_MIN_RCVBUF)
586 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
587 else
588 sk->sk_rcvbuf = val * 2;
589 break;
590
591 case SO_RCVBUFFORCE:
592 if (!capable(CAP_NET_ADMIN)) {
593 ret = -EPERM;
594 break;
595 }
596 goto set_rcvbuf;
597
598 case SO_KEEPALIVE:
599 #ifdef CONFIG_INET
600 if (sk->sk_protocol == IPPROTO_TCP)
601 tcp_set_keepalive(sk, valbool);
602 #endif
603 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
604 break;
605
606 case SO_OOBINLINE:
607 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
608 break;
609
610 case SO_NO_CHECK:
611 sk->sk_no_check = valbool;
612 break;
613
614 case SO_PRIORITY:
615 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
616 sk->sk_priority = val;
617 else
618 ret = -EPERM;
619 break;
620
621 case SO_LINGER:
622 if (optlen < sizeof(ling)) {
623 ret = -EINVAL; /* 1003.1g */
624 break;
625 }
626 if (copy_from_user(&ling, optval, sizeof(ling))) {
627 ret = -EFAULT;
628 break;
629 }
630 if (!ling.l_onoff)
631 sock_reset_flag(sk, SOCK_LINGER);
632 else {
633 #if (BITS_PER_LONG == 32)
634 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
635 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
636 else
637 #endif
638 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
639 sock_set_flag(sk, SOCK_LINGER);
640 }
641 break;
642
643 case SO_BSDCOMPAT:
644 sock_warn_obsolete_bsdism("setsockopt");
645 break;
646
647 case SO_PASSCRED:
648 if (valbool)
649 set_bit(SOCK_PASSCRED, &sock->flags);
650 else
651 clear_bit(SOCK_PASSCRED, &sock->flags);
652 break;
653
654 case SO_TIMESTAMP:
655 case SO_TIMESTAMPNS:
656 if (valbool) {
657 if (optname == SO_TIMESTAMP)
658 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
659 else
660 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
661 sock_set_flag(sk, SOCK_RCVTSTAMP);
662 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
663 } else {
664 sock_reset_flag(sk, SOCK_RCVTSTAMP);
665 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
666 }
667 break;
668
669 case SO_TIMESTAMPING:
670 if (val & ~SOF_TIMESTAMPING_MASK) {
671 ret = -EINVAL;
672 break;
673 }
674 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
675 val & SOF_TIMESTAMPING_TX_HARDWARE);
676 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
677 val & SOF_TIMESTAMPING_TX_SOFTWARE);
678 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
679 val & SOF_TIMESTAMPING_RX_HARDWARE);
680 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
681 sock_enable_timestamp(sk,
682 SOCK_TIMESTAMPING_RX_SOFTWARE);
683 else
684 sock_disable_timestamp(sk,
685 SOCK_TIMESTAMPING_RX_SOFTWARE);
686 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
687 val & SOF_TIMESTAMPING_SOFTWARE);
688 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
689 val & SOF_TIMESTAMPING_SYS_HARDWARE);
690 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
691 val & SOF_TIMESTAMPING_RAW_HARDWARE);
692 break;
693
694 case SO_RCVLOWAT:
695 if (val < 0)
696 val = INT_MAX;
697 sk->sk_rcvlowat = val ? : 1;
698 break;
699
700 case SO_RCVTIMEO:
701 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
702 break;
703
704 case SO_SNDTIMEO:
705 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
706 break;
707
708 case SO_ATTACH_FILTER:
709 ret = -EINVAL;
710 if (optlen == sizeof(struct sock_fprog)) {
711 struct sock_fprog fprog;
712
713 ret = -EFAULT;
714 if (copy_from_user(&fprog, optval, sizeof(fprog)))
715 break;
716
717 ret = sk_attach_filter(&fprog, sk);
718 }
719 break;
720
721 case SO_DETACH_FILTER:
722 ret = sk_detach_filter(sk);
723 break;
724
725 case SO_PASSSEC:
726 if (valbool)
727 set_bit(SOCK_PASSSEC, &sock->flags);
728 else
729 clear_bit(SOCK_PASSSEC, &sock->flags);
730 break;
731 case SO_MARK:
732 if (!capable(CAP_NET_ADMIN))
733 ret = -EPERM;
734 else
735 sk->sk_mark = val;
736 break;
737
738 /* We implement the SO_SNDLOWAT etc to
739 not be settable (1003.1g 5.3) */
740 case SO_RXQ_OVFL:
741 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
742 break;
743 default:
744 ret = -ENOPROTOOPT;
745 break;
746 }
747 release_sock(sk);
748 return ret;
749 }
750 EXPORT_SYMBOL(sock_setsockopt);
751
752
753 void cred_to_ucred(struct pid *pid, const struct cred *cred,
754 struct ucred *ucred)
755 {
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
758 if (cred) {
759 struct user_namespace *current_ns = current_user_ns();
760
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763 }
764 }
765 EXPORT_SYMBOL_GPL(cred_to_ucred);
766
767 int sock_getsockopt(struct socket *sock, int level, int optname,
768 char __user *optval, int __user *optlen)
769 {
770 struct sock *sk = sock->sk;
771
772 union {
773 int val;
774 struct linger ling;
775 struct timeval tm;
776 } v;
777
778 int lv = sizeof(int);
779 int len;
780
781 if (get_user(len, optlen))
782 return -EFAULT;
783 if (len < 0)
784 return -EINVAL;
785
786 memset(&v, 0, sizeof(v));
787
788 switch (optname) {
789 case SO_DEBUG:
790 v.val = sock_flag(sk, SOCK_DBG);
791 break;
792
793 case SO_DONTROUTE:
794 v.val = sock_flag(sk, SOCK_LOCALROUTE);
795 break;
796
797 case SO_BROADCAST:
798 v.val = !!sock_flag(sk, SOCK_BROADCAST);
799 break;
800
801 case SO_SNDBUF:
802 v.val = sk->sk_sndbuf;
803 break;
804
805 case SO_RCVBUF:
806 v.val = sk->sk_rcvbuf;
807 break;
808
809 case SO_REUSEADDR:
810 v.val = sk->sk_reuse;
811 break;
812
813 case SO_KEEPALIVE:
814 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
815 break;
816
817 case SO_TYPE:
818 v.val = sk->sk_type;
819 break;
820
821 case SO_PROTOCOL:
822 v.val = sk->sk_protocol;
823 break;
824
825 case SO_DOMAIN:
826 v.val = sk->sk_family;
827 break;
828
829 case SO_ERROR:
830 v.val = -sock_error(sk);
831 if (v.val == 0)
832 v.val = xchg(&sk->sk_err_soft, 0);
833 break;
834
835 case SO_OOBINLINE:
836 v.val = !!sock_flag(sk, SOCK_URGINLINE);
837 break;
838
839 case SO_NO_CHECK:
840 v.val = sk->sk_no_check;
841 break;
842
843 case SO_PRIORITY:
844 v.val = sk->sk_priority;
845 break;
846
847 case SO_LINGER:
848 lv = sizeof(v.ling);
849 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
850 v.ling.l_linger = sk->sk_lingertime / HZ;
851 break;
852
853 case SO_BSDCOMPAT:
854 sock_warn_obsolete_bsdism("getsockopt");
855 break;
856
857 case SO_TIMESTAMP:
858 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
859 !sock_flag(sk, SOCK_RCVTSTAMPNS);
860 break;
861
862 case SO_TIMESTAMPNS:
863 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
864 break;
865
866 case SO_TIMESTAMPING:
867 v.val = 0;
868 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
869 v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
870 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
871 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
872 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
873 v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
874 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
875 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
876 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
877 v.val |= SOF_TIMESTAMPING_SOFTWARE;
878 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
879 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
880 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
881 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
882 break;
883
884 case SO_RCVTIMEO:
885 lv = sizeof(struct timeval);
886 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
887 v.tm.tv_sec = 0;
888 v.tm.tv_usec = 0;
889 } else {
890 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
891 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
892 }
893 break;
894
895 case SO_SNDTIMEO:
896 lv = sizeof(struct timeval);
897 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
898 v.tm.tv_sec = 0;
899 v.tm.tv_usec = 0;
900 } else {
901 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
902 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
903 }
904 break;
905
906 case SO_RCVLOWAT:
907 v.val = sk->sk_rcvlowat;
908 break;
909
910 case SO_SNDLOWAT:
911 v.val = 1;
912 break;
913
914 case SO_PASSCRED:
915 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
916 break;
917
918 case SO_PEERCRED:
919 {
920 struct ucred peercred;
921 if (len > sizeof(peercred))
922 len = sizeof(peercred);
923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
924 if (copy_to_user(optval, &peercred, len))
925 return -EFAULT;
926 goto lenout;
927 }
928
929 case SO_PEERNAME:
930 {
931 char address[128];
932
933 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
934 return -ENOTCONN;
935 if (lv < len)
936 return -EINVAL;
937 if (copy_to_user(optval, address, len))
938 return -EFAULT;
939 goto lenout;
940 }
941
942 /* Dubious BSD thing... Probably nobody even uses it, but
943 * the UNIX standard wants it for whatever reason... -DaveM
944 */
945 case SO_ACCEPTCONN:
946 v.val = sk->sk_state == TCP_LISTEN;
947 break;
948
949 case SO_PASSSEC:
950 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
951 break;
952
953 case SO_PEERSEC:
954 return security_socket_getpeersec_stream(sock, optval, optlen, len);
955
956 case SO_MARK:
957 v.val = sk->sk_mark;
958 break;
959
960 case SO_RXQ_OVFL:
961 v.val = !!sock_flag(sk, SOCK_RXQ_OVFL);
962 break;
963
964 default:
965 return -ENOPROTOOPT;
966 }
967
968 if (len > lv)
969 len = lv;
970 if (copy_to_user(optval, &v, len))
971 return -EFAULT;
972 lenout:
973 if (put_user(len, optlen))
974 return -EFAULT;
975 return 0;
976 }
977
978 /*
979 * Initialize an sk_lock.
980 *
981 * (We also register the sk_lock with the lock validator.)
982 */
983 static inline void sock_lock_init(struct sock *sk)
984 {
985 sock_lock_init_class_and_name(sk,
986 af_family_slock_key_strings[sk->sk_family],
987 af_family_slock_keys + sk->sk_family,
988 af_family_key_strings[sk->sk_family],
989 af_family_keys + sk->sk_family);
990 }
991
992 /*
993 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
994 * even temporarly, because of RCU lookups. sk_node should also be left as is.
995 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
996 */
997 static void sock_copy(struct sock *nsk, const struct sock *osk)
998 {
999 #ifdef CONFIG_SECURITY_NETWORK
1000 void *sptr = nsk->sk_security;
1001 #endif
1002 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1003
1004 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1005 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1006
1007 #ifdef CONFIG_SECURITY_NETWORK
1008 nsk->sk_security = sptr;
1009 security_sk_clone(osk, nsk);
1010 #endif
1011 }
1012
1013 /*
1014 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1015 * un-modified. Special care is taken when initializing object to zero.
1016 */
1017 static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1018 {
1019 if (offsetof(struct sock, sk_node.next) != 0)
1020 memset(sk, 0, offsetof(struct sock, sk_node.next));
1021 memset(&sk->sk_node.pprev, 0,
1022 size - offsetof(struct sock, sk_node.pprev));
1023 }
1024
1025 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1026 {
1027 unsigned long nulls1, nulls2;
1028
1029 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1030 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1031 if (nulls1 > nulls2)
1032 swap(nulls1, nulls2);
1033
1034 if (nulls1 != 0)
1035 memset((char *)sk, 0, nulls1);
1036 memset((char *)sk + nulls1 + sizeof(void *), 0,
1037 nulls2 - nulls1 - sizeof(void *));
1038 memset((char *)sk + nulls2 + sizeof(void *), 0,
1039 size - nulls2 - sizeof(void *));
1040 }
1041 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1042
1043 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1044 int family)
1045 {
1046 struct sock *sk;
1047 struct kmem_cache *slab;
1048
1049 slab = prot->slab;
1050 if (slab != NULL) {
1051 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1052 if (!sk)
1053 return sk;
1054 if (priority & __GFP_ZERO) {
1055 if (prot->clear_sk)
1056 prot->clear_sk(sk, prot->obj_size);
1057 else
1058 sk_prot_clear_nulls(sk, prot->obj_size);
1059 }
1060 } else
1061 sk = kmalloc(prot->obj_size, priority);
1062
1063 if (sk != NULL) {
1064 kmemcheck_annotate_bitfield(sk, flags);
1065
1066 if (security_sk_alloc(sk, family, priority))
1067 goto out_free;
1068
1069 if (!try_module_get(prot->owner))
1070 goto out_free_sec;
1071 sk_tx_queue_clear(sk);
1072 }
1073
1074 return sk;
1075
1076 out_free_sec:
1077 security_sk_free(sk);
1078 out_free:
1079 if (slab != NULL)
1080 kmem_cache_free(slab, sk);
1081 else
1082 kfree(sk);
1083 return NULL;
1084 }
1085
1086 static void sk_prot_free(struct proto *prot, struct sock *sk)
1087 {
1088 struct kmem_cache *slab;
1089 struct module *owner;
1090
1091 owner = prot->owner;
1092 slab = prot->slab;
1093
1094 security_sk_free(sk);
1095 if (slab != NULL)
1096 kmem_cache_free(slab, sk);
1097 else
1098 kfree(sk);
1099 module_put(owner);
1100 }
1101
1102 #ifdef CONFIG_CGROUPS
1103 void sock_update_classid(struct sock *sk)
1104 {
1105 u32 classid;
1106
1107 rcu_read_lock(); /* doing current task, which cannot vanish. */
1108 classid = task_cls_classid(current);
1109 rcu_read_unlock();
1110 if (classid && classid != sk->sk_classid)
1111 sk->sk_classid = classid;
1112 }
1113 EXPORT_SYMBOL(sock_update_classid);
1114 #endif
1115
1116 /**
1117 * sk_alloc - All socket objects are allocated here
1118 * @net: the applicable net namespace
1119 * @family: protocol family
1120 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1121 * @prot: struct proto associated with this new sock instance
1122 */
1123 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1124 struct proto *prot)
1125 {
1126 struct sock *sk;
1127
1128 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1129 if (sk) {
1130 sk->sk_family = family;
1131 /*
1132 * See comment in struct sock definition to understand
1133 * why we need sk_prot_creator -acme
1134 */
1135 sk->sk_prot = sk->sk_prot_creator = prot;
1136 sock_lock_init(sk);
1137 sock_net_set(sk, get_net(net));
1138 atomic_set(&sk->sk_wmem_alloc, 1);
1139
1140 sock_update_classid(sk);
1141 }
1142
1143 return sk;
1144 }
1145 EXPORT_SYMBOL(sk_alloc);
1146
1147 static void __sk_free(struct sock *sk)
1148 {
1149 struct sk_filter *filter;
1150
1151 if (sk->sk_destruct)
1152 sk->sk_destruct(sk);
1153
1154 filter = rcu_dereference_check(sk->sk_filter,
1155 atomic_read(&sk->sk_wmem_alloc) == 0);
1156 if (filter) {
1157 sk_filter_uncharge(sk, filter);
1158 RCU_INIT_POINTER(sk->sk_filter, NULL);
1159 }
1160
1161 sock_disable_timestamp(sk, SOCK_TIMESTAMP);
1162 sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE);
1163
1164 if (atomic_read(&sk->sk_omem_alloc))
1165 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1166 __func__, atomic_read(&sk->sk_omem_alloc));
1167
1168 if (sk->sk_peer_cred)
1169 put_cred(sk->sk_peer_cred);
1170 put_pid(sk->sk_peer_pid);
1171 put_net(sock_net(sk));
1172 sk_prot_free(sk->sk_prot_creator, sk);
1173 }
1174
1175 void sk_free(struct sock *sk)
1176 {
1177 /*
1178 * We subtract one from sk_wmem_alloc and can know if
1179 * some packets are still in some tx queue.
1180 * If not null, sock_wfree() will call __sk_free(sk) later
1181 */
1182 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1183 __sk_free(sk);
1184 }
1185 EXPORT_SYMBOL(sk_free);
1186
1187 /*
1188 * Last sock_put should drop reference to sk->sk_net. It has already
1189 * been dropped in sk_change_net. Taking reference to stopping namespace
1190 * is not an option.
1191 * Take reference to a socket to remove it from hash _alive_ and after that
1192 * destroy it in the context of init_net.
1193 */
1194 void sk_release_kernel(struct sock *sk)
1195 {
1196 if (sk == NULL || sk->sk_socket == NULL)
1197 return;
1198
1199 sock_hold(sk);
1200 sock_release(sk->sk_socket);
1201 release_net(sock_net(sk));
1202 sock_net_set(sk, get_net(&init_net));
1203 sock_put(sk);
1204 }
1205 EXPORT_SYMBOL(sk_release_kernel);
1206
1207 struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1208 {
1209 struct sock *newsk;
1210
1211 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1212 if (newsk != NULL) {
1213 struct sk_filter *filter;
1214
1215 sock_copy(newsk, sk);
1216
1217 /* SANITY */
1218 get_net(sock_net(newsk));
1219 sk_node_init(&newsk->sk_node);
1220 sock_lock_init(newsk);
1221 bh_lock_sock(newsk);
1222 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1223 newsk->sk_backlog.len = 0;
1224
1225 atomic_set(&newsk->sk_rmem_alloc, 0);
1226 /*
1227 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1228 */
1229 atomic_set(&newsk->sk_wmem_alloc, 1);
1230 atomic_set(&newsk->sk_omem_alloc, 0);
1231 skb_queue_head_init(&newsk->sk_receive_queue);
1232 skb_queue_head_init(&newsk->sk_write_queue);
1233 #ifdef CONFIG_NET_DMA
1234 skb_queue_head_init(&newsk->sk_async_wait_queue);
1235 #endif
1236
1237 spin_lock_init(&newsk->sk_dst_lock);
1238 rwlock_init(&newsk->sk_callback_lock);
1239 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1240 af_callback_keys + newsk->sk_family,
1241 af_family_clock_key_strings[newsk->sk_family]);
1242
1243 newsk->sk_dst_cache = NULL;
1244 newsk->sk_wmem_queued = 0;
1245 newsk->sk_forward_alloc = 0;
1246 newsk->sk_send_head = NULL;
1247 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1248
1249 sock_reset_flag(newsk, SOCK_DONE);
1250 skb_queue_head_init(&newsk->sk_error_queue);
1251
1252 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1253 if (filter != NULL)
1254 sk_filter_charge(newsk, filter);
1255
1256 if (unlikely(xfrm_sk_clone_policy(newsk))) {
1257 /* It is still raw copy of parent, so invalidate
1258 * destructor and make plain sk_free() */
1259 newsk->sk_destruct = NULL;
1260 bh_unlock_sock(newsk);
1261 sk_free(newsk);
1262 newsk = NULL;
1263 goto out;
1264 }
1265
1266 newsk->sk_err = 0;
1267 newsk->sk_priority = 0;
1268 /*
1269 * Before updating sk_refcnt, we must commit prior changes to memory
1270 * (Documentation/RCU/rculist_nulls.txt for details)
1271 */
1272 smp_wmb();
1273 atomic_set(&newsk->sk_refcnt, 2);
1274
1275 /*
1276 * Increment the counter in the same struct proto as the master
1277 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1278 * is the same as sk->sk_prot->socks, as this field was copied
1279 * with memcpy).
1280 *
1281 * This _changes_ the previous behaviour, where
1282 * tcp_create_openreq_child always was incrementing the
1283 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1284 * to be taken into account in all callers. -acme
1285 */
1286 sk_refcnt_debug_inc(newsk);
1287 sk_set_socket(newsk, NULL);
1288 newsk->sk_wq = NULL;
1289
1290 if (newsk->sk_prot->sockets_allocated)
1291 percpu_counter_inc(newsk->sk_prot->sockets_allocated);
1292
1293 if (sock_flag(newsk, SOCK_TIMESTAMP) ||
1294 sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
1295 net_enable_timestamp();
1296 }
1297 out:
1298 return newsk;
1299 }
1300 EXPORT_SYMBOL_GPL(sk_clone);
1301
1302 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1303 {
1304 __sk_dst_set(sk, dst);
1305 sk->sk_route_caps = dst->dev->features;
1306 if (sk->sk_route_caps & NETIF_F_GSO)
1307 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1308 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1309 if (sk_can_gso(sk)) {
1310 if (dst->header_len) {
1311 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1312 } else {
1313 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1314 sk->sk_gso_max_size = dst->dev->gso_max_size;
1315 }
1316 }
1317 }
1318 EXPORT_SYMBOL_GPL(sk_setup_caps);
1319
1320 void __init sk_init(void)
1321 {
1322 if (totalram_pages <= 4096) {
1323 sysctl_wmem_max = 32767;
1324 sysctl_rmem_max = 32767;
1325 sysctl_wmem_default = 32767;
1326 sysctl_rmem_default = 32767;
1327 } else if (totalram_pages >= 131072) {
1328 sysctl_wmem_max = 131071;
1329 sysctl_rmem_max = 131071;
1330 }
1331 }
1332
1333 /*
1334 * Simple resource managers for sockets.
1335 */
1336
1337
1338 /*
1339 * Write buffer destructor automatically called from kfree_skb.
1340 */
1341 void sock_wfree(struct sk_buff *skb)
1342 {
1343 struct sock *sk = skb->sk;
1344 unsigned int len = skb->truesize;
1345
1346 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1347 /*
1348 * Keep a reference on sk_wmem_alloc, this will be released
1349 * after sk_write_space() call
1350 */
1351 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1352 sk->sk_write_space(sk);
1353 len = 1;
1354 }
1355 /*
1356 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1357 * could not do because of in-flight packets
1358 */
1359 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1360 __sk_free(sk);
1361 }
1362 EXPORT_SYMBOL(sock_wfree);
1363
1364 /*
1365 * Read buffer destructor automatically called from kfree_skb.
1366 */
1367 void sock_rfree(struct sk_buff *skb)
1368 {
1369 struct sock *sk = skb->sk;
1370 unsigned int len = skb->truesize;
1371
1372 atomic_sub(len, &sk->sk_rmem_alloc);
1373 sk_mem_uncharge(sk, len);
1374 }
1375 EXPORT_SYMBOL(sock_rfree);
1376
1377
1378 int sock_i_uid(struct sock *sk)
1379 {
1380 int uid;
1381
1382 read_lock_bh(&sk->sk_callback_lock);
1383 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1384 read_unlock_bh(&sk->sk_callback_lock);
1385 return uid;
1386 }
1387 EXPORT_SYMBOL(sock_i_uid);
1388
1389 unsigned long sock_i_ino(struct sock *sk)
1390 {
1391 unsigned long ino;
1392
1393 read_lock_bh(&sk->sk_callback_lock);
1394 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1395 read_unlock_bh(&sk->sk_callback_lock);
1396 return ino;
1397 }
1398 EXPORT_SYMBOL(sock_i_ino);
1399
1400 /*
1401 * Allocate a skb from the socket's send buffer.
1402 */
1403 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1404 gfp_t priority)
1405 {
1406 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1407 struct sk_buff *skb = alloc_skb(size, priority);
1408 if (skb) {
1409 skb_set_owner_w(skb, sk);
1410 return skb;
1411 }
1412 }
1413 return NULL;
1414 }
1415 EXPORT_SYMBOL(sock_wmalloc);
1416
1417 /*
1418 * Allocate a skb from the socket's receive buffer.
1419 */
1420 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1421 gfp_t priority)
1422 {
1423 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1424 struct sk_buff *skb = alloc_skb(size, priority);
1425 if (skb) {
1426 skb_set_owner_r(skb, sk);
1427 return skb;
1428 }
1429 }
1430 return NULL;
1431 }
1432
1433 /*
1434 * Allocate a memory block from the socket's option memory buffer.
1435 */
1436 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1437 {
1438 if ((unsigned)size <= sysctl_optmem_max &&
1439 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1440 void *mem;
1441 /* First do the add, to avoid the race if kmalloc
1442 * might sleep.
1443 */
1444 atomic_add(size, &sk->sk_omem_alloc);
1445 mem = kmalloc(size, priority);
1446 if (mem)
1447 return mem;
1448 atomic_sub(size, &sk->sk_omem_alloc);
1449 }
1450 return NULL;
1451 }
1452 EXPORT_SYMBOL(sock_kmalloc);
1453
1454 /*
1455 * Free an option memory block.
1456 */
1457 void sock_kfree_s(struct sock *sk, void *mem, int size)
1458 {
1459 kfree(mem);
1460 atomic_sub(size, &sk->sk_omem_alloc);
1461 }
1462 EXPORT_SYMBOL(sock_kfree_s);
1463
1464 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1465 I think, these locks should be removed for datagram sockets.
1466 */
1467 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1468 {
1469 DEFINE_WAIT(wait);
1470
1471 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1472 for (;;) {
1473 if (!timeo)
1474 break;
1475 if (signal_pending(current))
1476 break;
1477 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1478 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1479 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1480 break;
1481 if (sk->sk_shutdown & SEND_SHUTDOWN)
1482 break;
1483 if (sk->sk_err)
1484 break;
1485 timeo = schedule_timeout(timeo);
1486 }
1487 finish_wait(sk_sleep(sk), &wait);
1488 return timeo;
1489 }
1490
1491
1492 /*
1493 * Generic send/receive buffer handlers
1494 */
1495
1496 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1497 unsigned long data_len, int noblock,
1498 int *errcode)
1499 {
1500 struct sk_buff *skb;
1501 gfp_t gfp_mask;
1502 long timeo;
1503 int err;
1504
1505 gfp_mask = sk->sk_allocation;
1506 if (gfp_mask & __GFP_WAIT)
1507 gfp_mask |= __GFP_REPEAT;
1508
1509 timeo = sock_sndtimeo(sk, noblock);
1510 while (1) {
1511 err = sock_error(sk);
1512 if (err != 0)
1513 goto failure;
1514
1515 err = -EPIPE;
1516 if (sk->sk_shutdown & SEND_SHUTDOWN)
1517 goto failure;
1518
1519 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1520 skb = alloc_skb(header_len, gfp_mask);
1521 if (skb) {
1522 int npages;
1523 int i;
1524
1525 /* No pages, we're done... */
1526 if (!data_len)
1527 break;
1528
1529 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1530 skb->truesize += data_len;
1531 skb_shinfo(skb)->nr_frags = npages;
1532 for (i = 0; i < npages; i++) {
1533 struct page *page;
1534
1535 page = alloc_pages(sk->sk_allocation, 0);
1536 if (!page) {
1537 err = -ENOBUFS;
1538 skb_shinfo(skb)->nr_frags = i;
1539 kfree_skb(skb);
1540 goto failure;
1541 }
1542
1543 __skb_fill_page_desc(skb, i,
1544 page, 0,
1545 (data_len >= PAGE_SIZE ?
1546 PAGE_SIZE :
1547 data_len));
1548 data_len -= PAGE_SIZE;
1549 }
1550
1551 /* Full success... */
1552 break;
1553 }
1554 err = -ENOBUFS;
1555 goto failure;
1556 }
1557 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1558 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1559 err = -EAGAIN;
1560 if (!timeo)
1561 goto failure;
1562 if (signal_pending(current))
1563 goto interrupted;
1564 timeo = sock_wait_for_wmem(sk, timeo);
1565 }
1566
1567 skb_set_owner_w(skb, sk);
1568 return skb;
1569
1570 interrupted:
1571 err = sock_intr_errno(timeo);
1572 failure:
1573 *errcode = err;
1574 return NULL;
1575 }
1576 EXPORT_SYMBOL(sock_alloc_send_pskb);
1577
1578 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1579 int noblock, int *errcode)
1580 {
1581 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1582 }
1583 EXPORT_SYMBOL(sock_alloc_send_skb);
1584
1585 static void __lock_sock(struct sock *sk)
1586 __releases(&sk->sk_lock.slock)
1587 __acquires(&sk->sk_lock.slock)
1588 {
1589 DEFINE_WAIT(wait);
1590
1591 for (;;) {
1592 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1593 TASK_UNINTERRUPTIBLE);
1594 spin_unlock_bh(&sk->sk_lock.slock);
1595 schedule();
1596 spin_lock_bh(&sk->sk_lock.slock);
1597 if (!sock_owned_by_user(sk))
1598 break;
1599 }
1600 finish_wait(&sk->sk_lock.wq, &wait);
1601 }
1602
1603 static void __release_sock(struct sock *sk)
1604 __releases(&sk->sk_lock.slock)
1605 __acquires(&sk->sk_lock.slock)
1606 {
1607 struct sk_buff *skb = sk->sk_backlog.head;
1608
1609 do {
1610 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1611 bh_unlock_sock(sk);
1612
1613 do {
1614 struct sk_buff *next = skb->next;
1615
1616 WARN_ON_ONCE(skb_dst_is_noref(skb));
1617 skb->next = NULL;
1618 sk_backlog_rcv(sk, skb);
1619
1620 /*
1621 * We are in process context here with softirqs
1622 * disabled, use cond_resched_softirq() to preempt.
1623 * This is safe to do because we've taken the backlog
1624 * queue private:
1625 */
1626 cond_resched_softirq();
1627
1628 skb = next;
1629 } while (skb != NULL);
1630
1631 bh_lock_sock(sk);
1632 } while ((skb = sk->sk_backlog.head) != NULL);
1633
1634 /*
1635 * Doing the zeroing here guarantee we can not loop forever
1636 * while a wild producer attempts to flood us.
1637 */
1638 sk->sk_backlog.len = 0;
1639 }
1640
1641 /**
1642 * sk_wait_data - wait for data to arrive at sk_receive_queue
1643 * @sk: sock to wait on
1644 * @timeo: for how long
1645 *
1646 * Now socket state including sk->sk_err is changed only under lock,
1647 * hence we may omit checks after joining wait queue.
1648 * We check receive queue before schedule() only as optimization;
1649 * it is very likely that release_sock() added new data.
1650 */
1651 int sk_wait_data(struct sock *sk, long *timeo)
1652 {
1653 int rc;
1654 DEFINE_WAIT(wait);
1655
1656 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1657 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1658 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1659 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1660 finish_wait(sk_sleep(sk), &wait);
1661 return rc;
1662 }
1663 EXPORT_SYMBOL(sk_wait_data);
1664
1665 /**
1666 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1667 * @sk: socket
1668 * @size: memory size to allocate
1669 * @kind: allocation type
1670 *
1671 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1672 * rmem allocation. This function assumes that protocols which have
1673 * memory_pressure use sk_wmem_queued as write buffer accounting.
1674 */
1675 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1676 {
1677 struct proto *prot = sk->sk_prot;
1678 int amt = sk_mem_pages(size);
1679 long allocated;
1680
1681 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1682 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1683
1684 /* Under limit. */
1685 if (allocated <= prot->sysctl_mem[0]) {
1686 if (prot->memory_pressure && *prot->memory_pressure)
1687 *prot->memory_pressure = 0;
1688 return 1;
1689 }
1690
1691 /* Under pressure. */
1692 if (allocated > prot->sysctl_mem[1])
1693 if (prot->enter_memory_pressure)
1694 prot->enter_memory_pressure(sk);
1695
1696 /* Over hard limit. */
1697 if (allocated > prot->sysctl_mem[2])
1698 goto suppress_allocation;
1699
1700 /* guarantee minimum buffer size under pressure */
1701 if (kind == SK_MEM_RECV) {
1702 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1703 return 1;
1704 } else { /* SK_MEM_SEND */
1705 if (sk->sk_type == SOCK_STREAM) {
1706 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1707 return 1;
1708 } else if (atomic_read(&sk->sk_wmem_alloc) <
1709 prot->sysctl_wmem[0])
1710 return 1;
1711 }
1712
1713 if (prot->memory_pressure) {
1714 int alloc;
1715
1716 if (!*prot->memory_pressure)
1717 return 1;
1718 alloc = percpu_counter_read_positive(prot->sockets_allocated);
1719 if (prot->sysctl_mem[2] > alloc *
1720 sk_mem_pages(sk->sk_wmem_queued +
1721 atomic_read(&sk->sk_rmem_alloc) +
1722 sk->sk_forward_alloc))
1723 return 1;
1724 }
1725
1726 suppress_allocation:
1727
1728 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
1729 sk_stream_moderate_sndbuf(sk);
1730
1731 /* Fail only if socket is _under_ its sndbuf.
1732 * In this case we cannot block, so that we have to fail.
1733 */
1734 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
1735 return 1;
1736 }
1737
1738 trace_sock_exceed_buf_limit(sk, prot, allocated);
1739
1740 /* Alas. Undo changes. */
1741 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1742 atomic_long_sub(amt, prot->memory_allocated);
1743 return 0;
1744 }
1745 EXPORT_SYMBOL(__sk_mem_schedule);
1746
1747 /**
1748 * __sk_reclaim - reclaim memory_allocated
1749 * @sk: socket
1750 */
1751 void __sk_mem_reclaim(struct sock *sk)
1752 {
1753 struct proto *prot = sk->sk_prot;
1754
1755 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1756 prot->memory_allocated);
1757 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1758
1759 if (prot->memory_pressure && *prot->memory_pressure &&
1760 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1761 *prot->memory_pressure = 0;
1762 }
1763 EXPORT_SYMBOL(__sk_mem_reclaim);
1764
1765
1766 /*
1767 * Set of default routines for initialising struct proto_ops when
1768 * the protocol does not support a particular function. In certain
1769 * cases where it makes no sense for a protocol to have a "do nothing"
1770 * function, some default processing is provided.
1771 */
1772
1773 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1774 {
1775 return -EOPNOTSUPP;
1776 }
1777 EXPORT_SYMBOL(sock_no_bind);
1778
1779 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1780 int len, int flags)
1781 {
1782 return -EOPNOTSUPP;
1783 }
1784 EXPORT_SYMBOL(sock_no_connect);
1785
1786 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1787 {
1788 return -EOPNOTSUPP;
1789 }
1790 EXPORT_SYMBOL(sock_no_socketpair);
1791
1792 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1793 {
1794 return -EOPNOTSUPP;
1795 }
1796 EXPORT_SYMBOL(sock_no_accept);
1797
1798 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1799 int *len, int peer)
1800 {
1801 return -EOPNOTSUPP;
1802 }
1803 EXPORT_SYMBOL(sock_no_getname);
1804
1805 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
1806 {
1807 return 0;
1808 }
1809 EXPORT_SYMBOL(sock_no_poll);
1810
1811 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1812 {
1813 return -EOPNOTSUPP;
1814 }
1815 EXPORT_SYMBOL(sock_no_ioctl);
1816
1817 int sock_no_listen(struct socket *sock, int backlog)
1818 {
1819 return -EOPNOTSUPP;
1820 }
1821 EXPORT_SYMBOL(sock_no_listen);
1822
1823 int sock_no_shutdown(struct socket *sock, int how)
1824 {
1825 return -EOPNOTSUPP;
1826 }
1827 EXPORT_SYMBOL(sock_no_shutdown);
1828
1829 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1830 char __user *optval, unsigned int optlen)
1831 {
1832 return -EOPNOTSUPP;
1833 }
1834 EXPORT_SYMBOL(sock_no_setsockopt);
1835
1836 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1837 char __user *optval, int __user *optlen)
1838 {
1839 return -EOPNOTSUPP;
1840 }
1841 EXPORT_SYMBOL(sock_no_getsockopt);
1842
1843 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1844 size_t len)
1845 {
1846 return -EOPNOTSUPP;
1847 }
1848 EXPORT_SYMBOL(sock_no_sendmsg);
1849
1850 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1851 size_t len, int flags)
1852 {
1853 return -EOPNOTSUPP;
1854 }
1855 EXPORT_SYMBOL(sock_no_recvmsg);
1856
1857 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1858 {
1859 /* Mirror missing mmap method error code */
1860 return -ENODEV;
1861 }
1862 EXPORT_SYMBOL(sock_no_mmap);
1863
1864 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1865 {
1866 ssize_t res;
1867 struct msghdr msg = {.msg_flags = flags};
1868 struct kvec iov;
1869 char *kaddr = kmap(page);
1870 iov.iov_base = kaddr + offset;
1871 iov.iov_len = size;
1872 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1873 kunmap(page);
1874 return res;
1875 }
1876 EXPORT_SYMBOL(sock_no_sendpage);
1877
1878 /*
1879 * Default Socket Callbacks
1880 */
1881
1882 static void sock_def_wakeup(struct sock *sk)
1883 {
1884 struct socket_wq *wq;
1885
1886 rcu_read_lock();
1887 wq = rcu_dereference(sk->sk_wq);
1888 if (wq_has_sleeper(wq))
1889 wake_up_interruptible_all(&wq->wait);
1890 rcu_read_unlock();
1891 }
1892
1893 static void sock_def_error_report(struct sock *sk)
1894 {
1895 struct socket_wq *wq;
1896
1897 rcu_read_lock();
1898 wq = rcu_dereference(sk->sk_wq);
1899 if (wq_has_sleeper(wq))
1900 wake_up_interruptible_poll(&wq->wait, POLLERR);
1901 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1902 rcu_read_unlock();
1903 }
1904
1905 static void sock_def_readable(struct sock *sk, int len)
1906 {
1907 struct socket_wq *wq;
1908
1909 rcu_read_lock();
1910 wq = rcu_dereference(sk->sk_wq);
1911 if (wq_has_sleeper(wq))
1912 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
1913 POLLRDNORM | POLLRDBAND);
1914 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1915 rcu_read_unlock();
1916 }
1917
1918 static void sock_def_write_space(struct sock *sk)
1919 {
1920 struct socket_wq *wq;
1921
1922 rcu_read_lock();
1923
1924 /* Do not wake up a writer until he can make "significant"
1925 * progress. --DaveM
1926 */
1927 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1928 wq = rcu_dereference(sk->sk_wq);
1929 if (wq_has_sleeper(wq))
1930 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1931 POLLWRNORM | POLLWRBAND);
1932
1933 /* Should agree with poll, otherwise some programs break */
1934 if (sock_writeable(sk))
1935 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1936 }
1937
1938 rcu_read_unlock();
1939 }
1940
1941 static void sock_def_destruct(struct sock *sk)
1942 {
1943 kfree(sk->sk_protinfo);
1944 }
1945
1946 void sk_send_sigurg(struct sock *sk)
1947 {
1948 if (sk->sk_socket && sk->sk_socket->file)
1949 if (send_sigurg(&sk->sk_socket->file->f_owner))
1950 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
1951 }
1952 EXPORT_SYMBOL(sk_send_sigurg);
1953
1954 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1955 unsigned long expires)
1956 {
1957 if (!mod_timer(timer, expires))
1958 sock_hold(sk);
1959 }
1960 EXPORT_SYMBOL(sk_reset_timer);
1961
1962 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1963 {
1964 if (timer_pending(timer) && del_timer(timer))
1965 __sock_put(sk);
1966 }
1967 EXPORT_SYMBOL(sk_stop_timer);
1968
1969 void sock_init_data(struct socket *sock, struct sock *sk)
1970 {
1971 skb_queue_head_init(&sk->sk_receive_queue);
1972 skb_queue_head_init(&sk->sk_write_queue);
1973 skb_queue_head_init(&sk->sk_error_queue);
1974 #ifdef CONFIG_NET_DMA
1975 skb_queue_head_init(&sk->sk_async_wait_queue);
1976 #endif
1977
1978 sk->sk_send_head = NULL;
1979
1980 init_timer(&sk->sk_timer);
1981
1982 sk->sk_allocation = GFP_KERNEL;
1983 sk->sk_rcvbuf = sysctl_rmem_default;
1984 sk->sk_sndbuf = sysctl_wmem_default;
1985 sk->sk_state = TCP_CLOSE;
1986 sk_set_socket(sk, sock);
1987
1988 sock_set_flag(sk, SOCK_ZAPPED);
1989
1990 if (sock) {
1991 sk->sk_type = sock->type;
1992 sk->sk_wq = sock->wq;
1993 sock->sk = sk;
1994 } else
1995 sk->sk_wq = NULL;
1996
1997 spin_lock_init(&sk->sk_dst_lock);
1998 rwlock_init(&sk->sk_callback_lock);
1999 lockdep_set_class_and_name(&sk->sk_callback_lock,
2000 af_callback_keys + sk->sk_family,
2001 af_family_clock_key_strings[sk->sk_family]);
2002
2003 sk->sk_state_change = sock_def_wakeup;
2004 sk->sk_data_ready = sock_def_readable;
2005 sk->sk_write_space = sock_def_write_space;
2006 sk->sk_error_report = sock_def_error_report;
2007 sk->sk_destruct = sock_def_destruct;
2008
2009 sk->sk_sndmsg_page = NULL;
2010 sk->sk_sndmsg_off = 0;
2011
2012 sk->sk_peer_pid = NULL;
2013 sk->sk_peer_cred = NULL;
2014 sk->sk_write_pending = 0;
2015 sk->sk_rcvlowat = 1;
2016 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2017 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2018
2019 sk->sk_stamp = ktime_set(-1L, 0);
2020
2021 /*
2022 * Before updating sk_refcnt, we must commit prior changes to memory
2023 * (Documentation/RCU/rculist_nulls.txt for details)
2024 */
2025 smp_wmb();
2026 atomic_set(&sk->sk_refcnt, 1);
2027 atomic_set(&sk->sk_drops, 0);
2028 }
2029 EXPORT_SYMBOL(sock_init_data);
2030
2031 void lock_sock_nested(struct sock *sk, int subclass)
2032 {
2033 might_sleep();
2034 spin_lock_bh(&sk->sk_lock.slock);
2035 if (sk->sk_lock.owned)
2036 __lock_sock(sk);
2037 sk->sk_lock.owned = 1;
2038 spin_unlock(&sk->sk_lock.slock);
2039 /*
2040 * The sk_lock has mutex_lock() semantics here:
2041 */
2042 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2043 local_bh_enable();
2044 }
2045 EXPORT_SYMBOL(lock_sock_nested);
2046
2047 void release_sock(struct sock *sk)
2048 {
2049 /*
2050 * The sk_lock has mutex_unlock() semantics:
2051 */
2052 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2053
2054 spin_lock_bh(&sk->sk_lock.slock);
2055 if (sk->sk_backlog.tail)
2056 __release_sock(sk);
2057 sk->sk_lock.owned = 0;
2058 if (waitqueue_active(&sk->sk_lock.wq))
2059 wake_up(&sk->sk_lock.wq);
2060 spin_unlock_bh(&sk->sk_lock.slock);
2061 }
2062 EXPORT_SYMBOL(release_sock);
2063
2064 /**
2065 * lock_sock_fast - fast version of lock_sock
2066 * @sk: socket
2067 *
2068 * This version should be used for very small section, where process wont block
2069 * return false if fast path is taken
2070 * sk_lock.slock locked, owned = 0, BH disabled
2071 * return true if slow path is taken
2072 * sk_lock.slock unlocked, owned = 1, BH enabled
2073 */
2074 bool lock_sock_fast(struct sock *sk)
2075 {
2076 might_sleep();
2077 spin_lock_bh(&sk->sk_lock.slock);
2078
2079 if (!sk->sk_lock.owned)
2080 /*
2081 * Note : We must disable BH
2082 */
2083 return false;
2084
2085 __lock_sock(sk);
2086 sk->sk_lock.owned = 1;
2087 spin_unlock(&sk->sk_lock.slock);
2088 /*
2089 * The sk_lock has mutex_lock() semantics here:
2090 */
2091 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2092 local_bh_enable();
2093 return true;
2094 }
2095 EXPORT_SYMBOL(lock_sock_fast);
2096
2097 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2098 {
2099 struct timeval tv;
2100 if (!sock_flag(sk, SOCK_TIMESTAMP))
2101 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2102 tv = ktime_to_timeval(sk->sk_stamp);
2103 if (tv.tv_sec == -1)
2104 return -ENOENT;
2105 if (tv.tv_sec == 0) {
2106 sk->sk_stamp = ktime_get_real();
2107 tv = ktime_to_timeval(sk->sk_stamp);
2108 }
2109 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2110 }
2111 EXPORT_SYMBOL(sock_get_timestamp);
2112
2113 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2114 {
2115 struct timespec ts;
2116 if (!sock_flag(sk, SOCK_TIMESTAMP))
2117 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2118 ts = ktime_to_timespec(sk->sk_stamp);
2119 if (ts.tv_sec == -1)
2120 return -ENOENT;
2121 if (ts.tv_sec == 0) {
2122 sk->sk_stamp = ktime_get_real();
2123 ts = ktime_to_timespec(sk->sk_stamp);
2124 }
2125 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2126 }
2127 EXPORT_SYMBOL(sock_get_timestampns);
2128
2129 void sock_enable_timestamp(struct sock *sk, int flag)
2130 {
2131 if (!sock_flag(sk, flag)) {
2132 sock_set_flag(sk, flag);
2133 /*
2134 * we just set one of the two flags which require net
2135 * time stamping, but time stamping might have been on
2136 * already because of the other one
2137 */
2138 if (!sock_flag(sk,
2139 flag == SOCK_TIMESTAMP ?
2140 SOCK_TIMESTAMPING_RX_SOFTWARE :
2141 SOCK_TIMESTAMP))
2142 net_enable_timestamp();
2143 }
2144 }
2145
2146 /*
2147 * Get a socket option on an socket.
2148 *
2149 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2150 * asynchronous errors should be reported by getsockopt. We assume
2151 * this means if you specify SO_ERROR (otherwise whats the point of it).
2152 */
2153 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2154 char __user *optval, int __user *optlen)
2155 {
2156 struct sock *sk = sock->sk;
2157
2158 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2159 }
2160 EXPORT_SYMBOL(sock_common_getsockopt);
2161
2162 #ifdef CONFIG_COMPAT
2163 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2164 char __user *optval, int __user *optlen)
2165 {
2166 struct sock *sk = sock->sk;
2167
2168 if (sk->sk_prot->compat_getsockopt != NULL)
2169 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2170 optval, optlen);
2171 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2172 }
2173 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2174 #endif
2175
2176 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2177 struct msghdr *msg, size_t size, int flags)
2178 {
2179 struct sock *sk = sock->sk;
2180 int addr_len = 0;
2181 int err;
2182
2183 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2184 flags & ~MSG_DONTWAIT, &addr_len);
2185 if (err >= 0)
2186 msg->msg_namelen = addr_len;
2187 return err;
2188 }
2189 EXPORT_SYMBOL(sock_common_recvmsg);
2190
2191 /*
2192 * Set socket options on an inet socket.
2193 */
2194 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2195 char __user *optval, unsigned int optlen)
2196 {
2197 struct sock *sk = sock->sk;
2198
2199 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2200 }
2201 EXPORT_SYMBOL(sock_common_setsockopt);
2202
2203 #ifdef CONFIG_COMPAT
2204 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2205 char __user *optval, unsigned int optlen)
2206 {
2207 struct sock *sk = sock->sk;
2208
2209 if (sk->sk_prot->compat_setsockopt != NULL)
2210 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2211 optval, optlen);
2212 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2213 }
2214 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2215 #endif
2216
2217 void sk_common_release(struct sock *sk)
2218 {
2219 if (sk->sk_prot->destroy)
2220 sk->sk_prot->destroy(sk);
2221
2222 /*
2223 * Observation: when sock_common_release is called, processes have
2224 * no access to socket. But net still has.
2225 * Step one, detach it from networking:
2226 *
2227 * A. Remove from hash tables.
2228 */
2229
2230 sk->sk_prot->unhash(sk);
2231
2232 /*
2233 * In this point socket cannot receive new packets, but it is possible
2234 * that some packets are in flight because some CPU runs receiver and
2235 * did hash table lookup before we unhashed socket. They will achieve
2236 * receive queue and will be purged by socket destructor.
2237 *
2238 * Also we still have packets pending on receive queue and probably,
2239 * our own packets waiting in device queues. sock_destroy will drain
2240 * receive queue, but transmitted packets will delay socket destruction
2241 * until the last reference will be released.
2242 */
2243
2244 sock_orphan(sk);
2245
2246 xfrm_sk_free_policy(sk);
2247
2248 sk_refcnt_debug_release(sk);
2249 sock_put(sk);
2250 }
2251 EXPORT_SYMBOL(sk_common_release);
2252
2253 static DEFINE_RWLOCK(proto_list_lock);
2254 static LIST_HEAD(proto_list);
2255
2256 #ifdef CONFIG_PROC_FS
2257 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
2258 struct prot_inuse {
2259 int val[PROTO_INUSE_NR];
2260 };
2261
2262 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2263
2264 #ifdef CONFIG_NET_NS
2265 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2266 {
2267 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2268 }
2269 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2270
2271 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2272 {
2273 int cpu, idx = prot->inuse_idx;
2274 int res = 0;
2275
2276 for_each_possible_cpu(cpu)
2277 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2278
2279 return res >= 0 ? res : 0;
2280 }
2281 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2282
2283 static int __net_init sock_inuse_init_net(struct net *net)
2284 {
2285 net->core.inuse = alloc_percpu(struct prot_inuse);
2286 return net->core.inuse ? 0 : -ENOMEM;
2287 }
2288
2289 static void __net_exit sock_inuse_exit_net(struct net *net)
2290 {
2291 free_percpu(net->core.inuse);
2292 }
2293
2294 static struct pernet_operations net_inuse_ops = {
2295 .init = sock_inuse_init_net,
2296 .exit = sock_inuse_exit_net,
2297 };
2298
2299 static __init int net_inuse_init(void)
2300 {
2301 if (register_pernet_subsys(&net_inuse_ops))
2302 panic("Cannot initialize net inuse counters");
2303
2304 return 0;
2305 }
2306
2307 core_initcall(net_inuse_init);
2308 #else
2309 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2310
2311 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2312 {
2313 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2314 }
2315 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2316
2317 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2318 {
2319 int cpu, idx = prot->inuse_idx;
2320 int res = 0;
2321
2322 for_each_possible_cpu(cpu)
2323 res += per_cpu(prot_inuse, cpu).val[idx];
2324
2325 return res >= 0 ? res : 0;
2326 }
2327 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2328 #endif
2329
2330 static void assign_proto_idx(struct proto *prot)
2331 {
2332 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2333
2334 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2335 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
2336 return;
2337 }
2338
2339 set_bit(prot->inuse_idx, proto_inuse_idx);
2340 }
2341
2342 static void release_proto_idx(struct proto *prot)
2343 {
2344 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2345 clear_bit(prot->inuse_idx, proto_inuse_idx);
2346 }
2347 #else
2348 static inline void assign_proto_idx(struct proto *prot)
2349 {
2350 }
2351
2352 static inline void release_proto_idx(struct proto *prot)
2353 {
2354 }
2355 #endif
2356
2357 int proto_register(struct proto *prot, int alloc_slab)
2358 {
2359 if (alloc_slab) {
2360 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2361 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2362 NULL);
2363
2364 if (prot->slab == NULL) {
2365 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
2366 prot->name);
2367 goto out;
2368 }
2369
2370 if (prot->rsk_prot != NULL) {
2371 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2372 if (prot->rsk_prot->slab_name == NULL)
2373 goto out_free_sock_slab;
2374
2375 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2376 prot->rsk_prot->obj_size, 0,
2377 SLAB_HWCACHE_ALIGN, NULL);
2378
2379 if (prot->rsk_prot->slab == NULL) {
2380 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
2381 prot->name);
2382 goto out_free_request_sock_slab_name;
2383 }
2384 }
2385
2386 if (prot->twsk_prot != NULL) {
2387 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2388
2389 if (prot->twsk_prot->twsk_slab_name == NULL)
2390 goto out_free_request_sock_slab;
2391
2392 prot->twsk_prot->twsk_slab =
2393 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2394 prot->twsk_prot->twsk_obj_size,
2395 0,
2396 SLAB_HWCACHE_ALIGN |
2397 prot->slab_flags,
2398 NULL);
2399 if (prot->twsk_prot->twsk_slab == NULL)
2400 goto out_free_timewait_sock_slab_name;
2401 }
2402 }
2403
2404 write_lock(&proto_list_lock);
2405 list_add(&prot->node, &proto_list);
2406 assign_proto_idx(prot);
2407 write_unlock(&proto_list_lock);
2408 return 0;
2409
2410 out_free_timewait_sock_slab_name:
2411 kfree(prot->twsk_prot->twsk_slab_name);
2412 out_free_request_sock_slab:
2413 if (prot->rsk_prot && prot->rsk_prot->slab) {
2414 kmem_cache_destroy(prot->rsk_prot->slab);
2415 prot->rsk_prot->slab = NULL;
2416 }
2417 out_free_request_sock_slab_name:
2418 if (prot->rsk_prot)
2419 kfree(prot->rsk_prot->slab_name);
2420 out_free_sock_slab:
2421 kmem_cache_destroy(prot->slab);
2422 prot->slab = NULL;
2423 out:
2424 return -ENOBUFS;
2425 }
2426 EXPORT_SYMBOL(proto_register);
2427
2428 void proto_unregister(struct proto *prot)
2429 {
2430 write_lock(&proto_list_lock);
2431 release_proto_idx(prot);
2432 list_del(&prot->node);
2433 write_unlock(&proto_list_lock);
2434
2435 if (prot->slab != NULL) {
2436 kmem_cache_destroy(prot->slab);
2437 prot->slab = NULL;
2438 }
2439
2440 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2441 kmem_cache_destroy(prot->rsk_prot->slab);
2442 kfree(prot->rsk_prot->slab_name);
2443 prot->rsk_prot->slab = NULL;
2444 }
2445
2446 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2447 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2448 kfree(prot->twsk_prot->twsk_slab_name);
2449 prot->twsk_prot->twsk_slab = NULL;
2450 }
2451 }
2452 EXPORT_SYMBOL(proto_unregister);
2453
2454 #ifdef CONFIG_PROC_FS
2455 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2456 __acquires(proto_list_lock)
2457 {
2458 read_lock(&proto_list_lock);
2459 return seq_list_start_head(&proto_list, *pos);
2460 }
2461
2462 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2463 {
2464 return seq_list_next(v, &proto_list, pos);
2465 }
2466
2467 static void proto_seq_stop(struct seq_file *seq, void *v)
2468 __releases(proto_list_lock)
2469 {
2470 read_unlock(&proto_list_lock);
2471 }
2472
2473 static char proto_method_implemented(const void *method)
2474 {
2475 return method == NULL ? 'n' : 'y';
2476 }
2477
2478 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2479 {
2480 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2481 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2482 proto->name,
2483 proto->obj_size,
2484 sock_prot_inuse_get(seq_file_net(seq), proto),
2485 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2486 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2487 proto->max_header,
2488 proto->slab == NULL ? "no" : "yes",
2489 module_name(proto->owner),
2490 proto_method_implemented(proto->close),
2491 proto_method_implemented(proto->connect),
2492 proto_method_implemented(proto->disconnect),
2493 proto_method_implemented(proto->accept),
2494 proto_method_implemented(proto->ioctl),
2495 proto_method_implemented(proto->init),
2496 proto_method_implemented(proto->destroy),
2497 proto_method_implemented(proto->shutdown),
2498 proto_method_implemented(proto->setsockopt),
2499 proto_method_implemented(proto->getsockopt),
2500 proto_method_implemented(proto->sendmsg),
2501 proto_method_implemented(proto->recvmsg),
2502 proto_method_implemented(proto->sendpage),
2503 proto_method_implemented(proto->bind),
2504 proto_method_implemented(proto->backlog_rcv),
2505 proto_method_implemented(proto->hash),
2506 proto_method_implemented(proto->unhash),
2507 proto_method_implemented(proto->get_port),
2508 proto_method_implemented(proto->enter_memory_pressure));
2509 }
2510
2511 static int proto_seq_show(struct seq_file *seq, void *v)
2512 {
2513 if (v == &proto_list)
2514 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2515 "protocol",
2516 "size",
2517 "sockets",
2518 "memory",
2519 "press",
2520 "maxhdr",
2521 "slab",
2522 "module",
2523 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2524 else
2525 proto_seq_printf(seq, list_entry(v, struct proto, node));
2526 return 0;
2527 }
2528
2529 static const struct seq_operations proto_seq_ops = {
2530 .start = proto_seq_start,
2531 .next = proto_seq_next,
2532 .stop = proto_seq_stop,
2533 .show = proto_seq_show,
2534 };
2535
2536 static int proto_seq_open(struct inode *inode, struct file *file)
2537 {
2538 return seq_open_net(inode, file, &proto_seq_ops,
2539 sizeof(struct seq_net_private));
2540 }
2541
2542 static const struct file_operations proto_seq_fops = {
2543 .owner = THIS_MODULE,
2544 .open = proto_seq_open,
2545 .read = seq_read,
2546 .llseek = seq_lseek,
2547 .release = seq_release_net,
2548 };
2549
2550 static __net_init int proto_init_net(struct net *net)
2551 {
2552 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
2553 return -ENOMEM;
2554
2555 return 0;
2556 }
2557
2558 static __net_exit void proto_exit_net(struct net *net)
2559 {
2560 proc_net_remove(net, "protocols");
2561 }
2562
2563
2564 static __net_initdata struct pernet_operations proto_net_ops = {
2565 .init = proto_init_net,
2566 .exit = proto_exit_net,
2567 };
2568
2569 static int __init proto_init(void)
2570 {
2571 return register_pernet_subsys(&proto_net_ops);
2572 }
2573
2574 subsys_initcall(proto_init);
2575
2576 #endif /* PROC_FS */