]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/core/sock.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
[mirror_ubuntu-artful-kernel.git] / net / core / sock.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
8 *
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Alan Cox, <A.Cox@swansea.ac.uk>
14 *
15 * Fixes:
16 * Alan Cox : Numerous verify_area() problems
17 * Alan Cox : Connecting on a connecting socket
18 * now returns an error for tcp.
19 * Alan Cox : sock->protocol is set correctly.
20 * and is not sometimes left as 0.
21 * Alan Cox : connect handles icmp errors on a
22 * connect properly. Unfortunately there
23 * is a restart syscall nasty there. I
24 * can't match BSD without hacking the C
25 * library. Ideas urgently sought!
26 * Alan Cox : Disallow bind() to addresses that are
27 * not ours - especially broadcast ones!!
28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
30 * instead they leave that for the DESTROY timer.
31 * Alan Cox : Clean up error flag in accept
32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
33 * was buggy. Put a remove_sock() in the handler
34 * for memory when we hit 0. Also altered the timer
35 * code. The ACK stuff can wait and needs major
36 * TCP layer surgery.
37 * Alan Cox : Fixed TCP ack bug, removed remove sock
38 * and fixed timer/inet_bh race.
39 * Alan Cox : Added zapped flag for TCP
40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45 * Rick Sladkey : Relaxed UDP rules for matching packets.
46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
47 * Pauline Middelink : identd support
48 * Alan Cox : Fixed connect() taking signals I think.
49 * Alan Cox : SO_LINGER supported
50 * Alan Cox : Error reporting fixes
51 * Anonymous : inet_create tidied up (sk->reuse setting)
52 * Alan Cox : inet sockets don't set sk->type!
53 * Alan Cox : Split socket option code
54 * Alan Cox : Callbacks
55 * Alan Cox : Nagle flag for Charles & Johannes stuff
56 * Alex : Removed restriction on inet fioctl
57 * Alan Cox : Splitting INET from NET core
58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
60 * Alan Cox : Split IP from generic code
61 * Alan Cox : New kfree_skbmem()
62 * Alan Cox : Make SO_DEBUG superuser only.
63 * Alan Cox : Allow anyone to clear SO_DEBUG
64 * (compatibility fix)
65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
66 * Alan Cox : Allocator for a socket is settable.
67 * Alan Cox : SO_ERROR includes soft errors.
68 * Alan Cox : Allow NULL arguments on some SO_ opts
69 * Alan Cox : Generic socket allocation to make hooks
70 * easier (suggested by Craig Metz).
71 * Michael Pall : SO_ERROR returns positive errno again
72 * Steve Whitehouse: Added default destructor to free
73 * protocol private data.
74 * Steve Whitehouse: Added various other default routines
75 * common to several socket families.
76 * Chris Evans : Call suser() check last on F_SETOWN
77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
79 * Andi Kleen : Fix write_space callback
80 * Chris Evans : Security fixes - signedness again
81 * Arnaldo C. Melo : cleanups, use skb_queue_purge
82 *
83 * To Fix:
84 *
85 *
86 * This program is free software; you can redistribute it and/or
87 * modify it under the terms of the GNU General Public License
88 * as published by the Free Software Foundation; either version
89 * 2 of the License, or (at your option) any later version.
90 */
91
92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
93
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/errqueue.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
99 #include <linux/in.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
115 #include <linux/highmem.h>
116 #include <linux/user_namespace.h>
117 #include <linux/static_key.h>
118 #include <linux/memcontrol.h>
119 #include <linux/prefetch.h>
120
121 #include <asm/uaccess.h>
122
123 #include <linux/netdevice.h>
124 #include <net/protocol.h>
125 #include <linux/skbuff.h>
126 #include <net/net_namespace.h>
127 #include <net/request_sock.h>
128 #include <net/sock.h>
129 #include <linux/net_tstamp.h>
130 #include <net/xfrm.h>
131 #include <linux/ipsec.h>
132 #include <net/cls_cgroup.h>
133 #include <net/netprio_cgroup.h>
134
135 #include <linux/filter.h>
136
137 #include <trace/events/sock.h>
138
139 #ifdef CONFIG_INET
140 #include <net/tcp.h>
141 #endif
142
143 #include <net/busy_poll.h>
144
145 static DEFINE_MUTEX(proto_list_mutex);
146 static LIST_HEAD(proto_list);
147
148 /**
149 * sk_ns_capable - General socket capability test
150 * @sk: Socket to use a capability on or through
151 * @user_ns: The user namespace of the capability to use
152 * @cap: The capability to use
153 *
154 * Test to see if the opener of the socket had when the socket was
155 * created and the current process has the capability @cap in the user
156 * namespace @user_ns.
157 */
158 bool sk_ns_capable(const struct sock *sk,
159 struct user_namespace *user_ns, int cap)
160 {
161 return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
162 ns_capable(user_ns, cap);
163 }
164 EXPORT_SYMBOL(sk_ns_capable);
165
166 /**
167 * sk_capable - Socket global capability test
168 * @sk: Socket to use a capability on or through
169 * @cap: The global capability to use
170 *
171 * Test to see if the opener of the socket had when the socket was
172 * created and the current process has the capability @cap in all user
173 * namespaces.
174 */
175 bool sk_capable(const struct sock *sk, int cap)
176 {
177 return sk_ns_capable(sk, &init_user_ns, cap);
178 }
179 EXPORT_SYMBOL(sk_capable);
180
181 /**
182 * sk_net_capable - Network namespace socket capability test
183 * @sk: Socket to use a capability on or through
184 * @cap: The capability to use
185 *
186 * Test to see if the opener of the socket had when the socket was created
187 * and the current process has the capability @cap over the network namespace
188 * the socket is a member of.
189 */
190 bool sk_net_capable(const struct sock *sk, int cap)
191 {
192 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
193 }
194 EXPORT_SYMBOL(sk_net_capable);
195
196
197 #ifdef CONFIG_MEMCG_KMEM
198 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
199 {
200 struct proto *proto;
201 int ret = 0;
202
203 mutex_lock(&proto_list_mutex);
204 list_for_each_entry(proto, &proto_list, node) {
205 if (proto->init_cgroup) {
206 ret = proto->init_cgroup(memcg, ss);
207 if (ret)
208 goto out;
209 }
210 }
211
212 mutex_unlock(&proto_list_mutex);
213 return ret;
214 out:
215 list_for_each_entry_continue_reverse(proto, &proto_list, node)
216 if (proto->destroy_cgroup)
217 proto->destroy_cgroup(memcg);
218 mutex_unlock(&proto_list_mutex);
219 return ret;
220 }
221
222 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
223 {
224 struct proto *proto;
225
226 mutex_lock(&proto_list_mutex);
227 list_for_each_entry_reverse(proto, &proto_list, node)
228 if (proto->destroy_cgroup)
229 proto->destroy_cgroup(memcg);
230 mutex_unlock(&proto_list_mutex);
231 }
232 #endif
233
234 /*
235 * Each address family might have different locking rules, so we have
236 * one slock key per address family:
237 */
238 static struct lock_class_key af_family_keys[AF_MAX];
239 static struct lock_class_key af_family_slock_keys[AF_MAX];
240
241 #if defined(CONFIG_MEMCG_KMEM)
242 struct static_key memcg_socket_limit_enabled;
243 EXPORT_SYMBOL(memcg_socket_limit_enabled);
244 #endif
245
246 /*
247 * Make lock validator output more readable. (we pre-construct these
248 * strings build-time, so that runtime initialization of socket
249 * locks is fast):
250 */
251 static const char *const af_family_key_strings[AF_MAX+1] = {
252 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
253 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
254 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
255 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
256 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
257 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
258 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
259 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
260 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
261 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
262 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
263 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
264 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
265 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX"
266 };
267 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
268 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
269 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
270 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
271 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
272 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
273 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
274 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
275 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,
276 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
277 "slock-27" , "slock-28" , "slock-AF_CAN" ,
278 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
279 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
280 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
281 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX"
282 };
283 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
284 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
285 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
286 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
287 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
288 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
289 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
290 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
291 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,
292 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
293 "clock-27" , "clock-28" , "clock-AF_CAN" ,
294 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
295 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
296 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
297 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX"
298 };
299
300 /*
301 * sk_callback_lock locking rules are per-address-family,
302 * so split the lock classes by using a per-AF key:
303 */
304 static struct lock_class_key af_callback_keys[AF_MAX];
305
306 /* Take into consideration the size of the struct sk_buff overhead in the
307 * determination of these values, since that is non-constant across
308 * platforms. This makes socket queueing behavior and performance
309 * not depend upon such differences.
310 */
311 #define _SK_MEM_PACKETS 256
312 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
313 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
314 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
315
316 /* Run time adjustable parameters. */
317 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
318 EXPORT_SYMBOL(sysctl_wmem_max);
319 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
320 EXPORT_SYMBOL(sysctl_rmem_max);
321 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
322 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
323
324 /* Maximal space eaten by iovec or ancillary data plus some space */
325 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
326 EXPORT_SYMBOL(sysctl_optmem_max);
327
328 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
329 EXPORT_SYMBOL_GPL(memalloc_socks);
330
331 /**
332 * sk_set_memalloc - sets %SOCK_MEMALLOC
333 * @sk: socket to set it on
334 *
335 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
336 * It's the responsibility of the admin to adjust min_free_kbytes
337 * to meet the requirements
338 */
339 void sk_set_memalloc(struct sock *sk)
340 {
341 sock_set_flag(sk, SOCK_MEMALLOC);
342 sk->sk_allocation |= __GFP_MEMALLOC;
343 static_key_slow_inc(&memalloc_socks);
344 }
345 EXPORT_SYMBOL_GPL(sk_set_memalloc);
346
347 void sk_clear_memalloc(struct sock *sk)
348 {
349 sock_reset_flag(sk, SOCK_MEMALLOC);
350 sk->sk_allocation &= ~__GFP_MEMALLOC;
351 static_key_slow_dec(&memalloc_socks);
352
353 /*
354 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
355 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
356 * it has rmem allocations there is a risk that the user of the
357 * socket cannot make forward progress due to exceeding the rmem
358 * limits. By rights, sk_clear_memalloc() should only be called
359 * on sockets being torn down but warn and reset the accounting if
360 * that assumption breaks.
361 */
362 if (WARN_ON(sk->sk_forward_alloc))
363 sk_mem_reclaim(sk);
364 }
365 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
366
367 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
368 {
369 int ret;
370 unsigned long pflags = current->flags;
371
372 /* these should have been dropped before queueing */
373 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
374
375 current->flags |= PF_MEMALLOC;
376 ret = sk->sk_backlog_rcv(sk, skb);
377 tsk_restore_flags(current, pflags, PF_MEMALLOC);
378
379 return ret;
380 }
381 EXPORT_SYMBOL(__sk_backlog_rcv);
382
383 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
384 {
385 struct timeval tv;
386
387 if (optlen < sizeof(tv))
388 return -EINVAL;
389 if (copy_from_user(&tv, optval, sizeof(tv)))
390 return -EFAULT;
391 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
392 return -EDOM;
393
394 if (tv.tv_sec < 0) {
395 static int warned __read_mostly;
396
397 *timeo_p = 0;
398 if (warned < 10 && net_ratelimit()) {
399 warned++;
400 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
401 __func__, current->comm, task_pid_nr(current));
402 }
403 return 0;
404 }
405 *timeo_p = MAX_SCHEDULE_TIMEOUT;
406 if (tv.tv_sec == 0 && tv.tv_usec == 0)
407 return 0;
408 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
409 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
410 return 0;
411 }
412
413 static void sock_warn_obsolete_bsdism(const char *name)
414 {
415 static int warned;
416 static char warncomm[TASK_COMM_LEN];
417 if (strcmp(warncomm, current->comm) && warned < 5) {
418 strcpy(warncomm, current->comm);
419 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
420 warncomm, name);
421 warned++;
422 }
423 }
424
425 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
426
427 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
428 {
429 if (sk->sk_flags & flags) {
430 sk->sk_flags &= ~flags;
431 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
432 net_disable_timestamp();
433 }
434 }
435
436
437 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
438 {
439 int err;
440 unsigned long flags;
441 struct sk_buff_head *list = &sk->sk_receive_queue;
442
443 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
444 atomic_inc(&sk->sk_drops);
445 trace_sock_rcvqueue_full(sk, skb);
446 return -ENOMEM;
447 }
448
449 err = sk_filter(sk, skb);
450 if (err)
451 return err;
452
453 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
454 atomic_inc(&sk->sk_drops);
455 return -ENOBUFS;
456 }
457
458 skb->dev = NULL;
459 skb_set_owner_r(skb, sk);
460
461 /* we escape from rcu protected region, make sure we dont leak
462 * a norefcounted dst
463 */
464 skb_dst_force(skb);
465
466 spin_lock_irqsave(&list->lock, flags);
467 skb->dropcount = atomic_read(&sk->sk_drops);
468 __skb_queue_tail(list, skb);
469 spin_unlock_irqrestore(&list->lock, flags);
470
471 if (!sock_flag(sk, SOCK_DEAD))
472 sk->sk_data_ready(sk);
473 return 0;
474 }
475 EXPORT_SYMBOL(sock_queue_rcv_skb);
476
477 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
478 {
479 int rc = NET_RX_SUCCESS;
480
481 if (sk_filter(sk, skb))
482 goto discard_and_relse;
483
484 skb->dev = NULL;
485
486 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
487 atomic_inc(&sk->sk_drops);
488 goto discard_and_relse;
489 }
490 if (nested)
491 bh_lock_sock_nested(sk);
492 else
493 bh_lock_sock(sk);
494 if (!sock_owned_by_user(sk)) {
495 /*
496 * trylock + unlock semantics:
497 */
498 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
499
500 rc = sk_backlog_rcv(sk, skb);
501
502 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
503 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
504 bh_unlock_sock(sk);
505 atomic_inc(&sk->sk_drops);
506 goto discard_and_relse;
507 }
508
509 bh_unlock_sock(sk);
510 out:
511 sock_put(sk);
512 return rc;
513 discard_and_relse:
514 kfree_skb(skb);
515 goto out;
516 }
517 EXPORT_SYMBOL(sk_receive_skb);
518
519 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
520 {
521 struct dst_entry *dst = __sk_dst_get(sk);
522
523 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
524 sk_tx_queue_clear(sk);
525 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
526 dst_release(dst);
527 return NULL;
528 }
529
530 return dst;
531 }
532 EXPORT_SYMBOL(__sk_dst_check);
533
534 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
535 {
536 struct dst_entry *dst = sk_dst_get(sk);
537
538 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
539 sk_dst_reset(sk);
540 dst_release(dst);
541 return NULL;
542 }
543
544 return dst;
545 }
546 EXPORT_SYMBOL(sk_dst_check);
547
548 static int sock_setbindtodevice(struct sock *sk, char __user *optval,
549 int optlen)
550 {
551 int ret = -ENOPROTOOPT;
552 #ifdef CONFIG_NETDEVICES
553 struct net *net = sock_net(sk);
554 char devname[IFNAMSIZ];
555 int index;
556
557 /* Sorry... */
558 ret = -EPERM;
559 if (!ns_capable(net->user_ns, CAP_NET_RAW))
560 goto out;
561
562 ret = -EINVAL;
563 if (optlen < 0)
564 goto out;
565
566 /* Bind this socket to a particular device like "eth0",
567 * as specified in the passed interface name. If the
568 * name is "" or the option length is zero the socket
569 * is not bound.
570 */
571 if (optlen > IFNAMSIZ - 1)
572 optlen = IFNAMSIZ - 1;
573 memset(devname, 0, sizeof(devname));
574
575 ret = -EFAULT;
576 if (copy_from_user(devname, optval, optlen))
577 goto out;
578
579 index = 0;
580 if (devname[0] != '\0') {
581 struct net_device *dev;
582
583 rcu_read_lock();
584 dev = dev_get_by_name_rcu(net, devname);
585 if (dev)
586 index = dev->ifindex;
587 rcu_read_unlock();
588 ret = -ENODEV;
589 if (!dev)
590 goto out;
591 }
592
593 lock_sock(sk);
594 sk->sk_bound_dev_if = index;
595 sk_dst_reset(sk);
596 release_sock(sk);
597
598 ret = 0;
599
600 out:
601 #endif
602
603 return ret;
604 }
605
606 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
607 int __user *optlen, int len)
608 {
609 int ret = -ENOPROTOOPT;
610 #ifdef CONFIG_NETDEVICES
611 struct net *net = sock_net(sk);
612 char devname[IFNAMSIZ];
613
614 if (sk->sk_bound_dev_if == 0) {
615 len = 0;
616 goto zero;
617 }
618
619 ret = -EINVAL;
620 if (len < IFNAMSIZ)
621 goto out;
622
623 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
624 if (ret)
625 goto out;
626
627 len = strlen(devname) + 1;
628
629 ret = -EFAULT;
630 if (copy_to_user(optval, devname, len))
631 goto out;
632
633 zero:
634 ret = -EFAULT;
635 if (put_user(len, optlen))
636 goto out;
637
638 ret = 0;
639
640 out:
641 #endif
642
643 return ret;
644 }
645
646 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
647 {
648 if (valbool)
649 sock_set_flag(sk, bit);
650 else
651 sock_reset_flag(sk, bit);
652 }
653
654 /*
655 * This is meant for all protocols to use and covers goings on
656 * at the socket level. Everything here is generic.
657 */
658
659 int sock_setsockopt(struct socket *sock, int level, int optname,
660 char __user *optval, unsigned int optlen)
661 {
662 struct sock *sk = sock->sk;
663 int val;
664 int valbool;
665 struct linger ling;
666 int ret = 0;
667
668 /*
669 * Options without arguments
670 */
671
672 if (optname == SO_BINDTODEVICE)
673 return sock_setbindtodevice(sk, optval, optlen);
674
675 if (optlen < sizeof(int))
676 return -EINVAL;
677
678 if (get_user(val, (int __user *)optval))
679 return -EFAULT;
680
681 valbool = val ? 1 : 0;
682
683 lock_sock(sk);
684
685 switch (optname) {
686 case SO_DEBUG:
687 if (val && !capable(CAP_NET_ADMIN))
688 ret = -EACCES;
689 else
690 sock_valbool_flag(sk, SOCK_DBG, valbool);
691 break;
692 case SO_REUSEADDR:
693 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
694 break;
695 case SO_REUSEPORT:
696 sk->sk_reuseport = valbool;
697 break;
698 case SO_TYPE:
699 case SO_PROTOCOL:
700 case SO_DOMAIN:
701 case SO_ERROR:
702 ret = -ENOPROTOOPT;
703 break;
704 case SO_DONTROUTE:
705 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
706 break;
707 case SO_BROADCAST:
708 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
709 break;
710 case SO_SNDBUF:
711 /* Don't error on this BSD doesn't and if you think
712 * about it this is right. Otherwise apps have to
713 * play 'guess the biggest size' games. RCVBUF/SNDBUF
714 * are treated in BSD as hints
715 */
716 val = min_t(u32, val, sysctl_wmem_max);
717 set_sndbuf:
718 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
719 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
720 /* Wake up sending tasks if we upped the value. */
721 sk->sk_write_space(sk);
722 break;
723
724 case SO_SNDBUFFORCE:
725 if (!capable(CAP_NET_ADMIN)) {
726 ret = -EPERM;
727 break;
728 }
729 goto set_sndbuf;
730
731 case SO_RCVBUF:
732 /* Don't error on this BSD doesn't and if you think
733 * about it this is right. Otherwise apps have to
734 * play 'guess the biggest size' games. RCVBUF/SNDBUF
735 * are treated in BSD as hints
736 */
737 val = min_t(u32, val, sysctl_rmem_max);
738 set_rcvbuf:
739 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
740 /*
741 * We double it on the way in to account for
742 * "struct sk_buff" etc. overhead. Applications
743 * assume that the SO_RCVBUF setting they make will
744 * allow that much actual data to be received on that
745 * socket.
746 *
747 * Applications are unaware that "struct sk_buff" and
748 * other overheads allocate from the receive buffer
749 * during socket buffer allocation.
750 *
751 * And after considering the possible alternatives,
752 * returning the value we actually used in getsockopt
753 * is the most desirable behavior.
754 */
755 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
756 break;
757
758 case SO_RCVBUFFORCE:
759 if (!capable(CAP_NET_ADMIN)) {
760 ret = -EPERM;
761 break;
762 }
763 goto set_rcvbuf;
764
765 case SO_KEEPALIVE:
766 #ifdef CONFIG_INET
767 if (sk->sk_protocol == IPPROTO_TCP &&
768 sk->sk_type == SOCK_STREAM)
769 tcp_set_keepalive(sk, valbool);
770 #endif
771 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
772 break;
773
774 case SO_OOBINLINE:
775 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
776 break;
777
778 case SO_NO_CHECK:
779 sk->sk_no_check_tx = valbool;
780 break;
781
782 case SO_PRIORITY:
783 if ((val >= 0 && val <= 6) ||
784 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
785 sk->sk_priority = val;
786 else
787 ret = -EPERM;
788 break;
789
790 case SO_LINGER:
791 if (optlen < sizeof(ling)) {
792 ret = -EINVAL; /* 1003.1g */
793 break;
794 }
795 if (copy_from_user(&ling, optval, sizeof(ling))) {
796 ret = -EFAULT;
797 break;
798 }
799 if (!ling.l_onoff)
800 sock_reset_flag(sk, SOCK_LINGER);
801 else {
802 #if (BITS_PER_LONG == 32)
803 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
804 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
805 else
806 #endif
807 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
808 sock_set_flag(sk, SOCK_LINGER);
809 }
810 break;
811
812 case SO_BSDCOMPAT:
813 sock_warn_obsolete_bsdism("setsockopt");
814 break;
815
816 case SO_PASSCRED:
817 if (valbool)
818 set_bit(SOCK_PASSCRED, &sock->flags);
819 else
820 clear_bit(SOCK_PASSCRED, &sock->flags);
821 break;
822
823 case SO_TIMESTAMP:
824 case SO_TIMESTAMPNS:
825 if (valbool) {
826 if (optname == SO_TIMESTAMP)
827 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
828 else
829 sock_set_flag(sk, SOCK_RCVTSTAMPNS);
830 sock_set_flag(sk, SOCK_RCVTSTAMP);
831 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
832 } else {
833 sock_reset_flag(sk, SOCK_RCVTSTAMP);
834 sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
835 }
836 break;
837
838 case SO_TIMESTAMPING:
839 if (val & ~SOF_TIMESTAMPING_MASK) {
840 ret = -EINVAL;
841 break;
842 }
843 if (val & SOF_TIMESTAMPING_OPT_ID &&
844 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
845 if (sk->sk_protocol == IPPROTO_TCP) {
846 if (sk->sk_state != TCP_ESTABLISHED) {
847 ret = -EINVAL;
848 break;
849 }
850 sk->sk_tskey = tcp_sk(sk)->snd_una;
851 } else {
852 sk->sk_tskey = 0;
853 }
854 }
855 sk->sk_tsflags = val;
856 if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
857 sock_enable_timestamp(sk,
858 SOCK_TIMESTAMPING_RX_SOFTWARE);
859 else
860 sock_disable_timestamp(sk,
861 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
862 break;
863
864 case SO_RCVLOWAT:
865 if (val < 0)
866 val = INT_MAX;
867 sk->sk_rcvlowat = val ? : 1;
868 break;
869
870 case SO_RCVTIMEO:
871 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
872 break;
873
874 case SO_SNDTIMEO:
875 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
876 break;
877
878 case SO_ATTACH_FILTER:
879 ret = -EINVAL;
880 if (optlen == sizeof(struct sock_fprog)) {
881 struct sock_fprog fprog;
882
883 ret = -EFAULT;
884 if (copy_from_user(&fprog, optval, sizeof(fprog)))
885 break;
886
887 ret = sk_attach_filter(&fprog, sk);
888 }
889 break;
890
891 case SO_DETACH_FILTER:
892 ret = sk_detach_filter(sk);
893 break;
894
895 case SO_LOCK_FILTER:
896 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
897 ret = -EPERM;
898 else
899 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
900 break;
901
902 case SO_PASSSEC:
903 if (valbool)
904 set_bit(SOCK_PASSSEC, &sock->flags);
905 else
906 clear_bit(SOCK_PASSSEC, &sock->flags);
907 break;
908 case SO_MARK:
909 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
910 ret = -EPERM;
911 else
912 sk->sk_mark = val;
913 break;
914
915 /* We implement the SO_SNDLOWAT etc to
916 not be settable (1003.1g 5.3) */
917 case SO_RXQ_OVFL:
918 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
919 break;
920
921 case SO_WIFI_STATUS:
922 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
923 break;
924
925 case SO_PEEK_OFF:
926 if (sock->ops->set_peek_off)
927 ret = sock->ops->set_peek_off(sk, val);
928 else
929 ret = -EOPNOTSUPP;
930 break;
931
932 case SO_NOFCS:
933 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
934 break;
935
936 case SO_SELECT_ERR_QUEUE:
937 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
938 break;
939
940 #ifdef CONFIG_NET_RX_BUSY_POLL
941 case SO_BUSY_POLL:
942 /* allow unprivileged users to decrease the value */
943 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
944 ret = -EPERM;
945 else {
946 if (val < 0)
947 ret = -EINVAL;
948 else
949 sk->sk_ll_usec = val;
950 }
951 break;
952 #endif
953
954 case SO_MAX_PACING_RATE:
955 sk->sk_max_pacing_rate = val;
956 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
957 sk->sk_max_pacing_rate);
958 break;
959
960 default:
961 ret = -ENOPROTOOPT;
962 break;
963 }
964 release_sock(sk);
965 return ret;
966 }
967 EXPORT_SYMBOL(sock_setsockopt);
968
969
970 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
971 struct ucred *ucred)
972 {
973 ucred->pid = pid_vnr(pid);
974 ucred->uid = ucred->gid = -1;
975 if (cred) {
976 struct user_namespace *current_ns = current_user_ns();
977
978 ucred->uid = from_kuid_munged(current_ns, cred->euid);
979 ucred->gid = from_kgid_munged(current_ns, cred->egid);
980 }
981 }
982
983 int sock_getsockopt(struct socket *sock, int level, int optname,
984 char __user *optval, int __user *optlen)
985 {
986 struct sock *sk = sock->sk;
987
988 union {
989 int val;
990 struct linger ling;
991 struct timeval tm;
992 } v;
993
994 int lv = sizeof(int);
995 int len;
996
997 if (get_user(len, optlen))
998 return -EFAULT;
999 if (len < 0)
1000 return -EINVAL;
1001
1002 memset(&v, 0, sizeof(v));
1003
1004 switch (optname) {
1005 case SO_DEBUG:
1006 v.val = sock_flag(sk, SOCK_DBG);
1007 break;
1008
1009 case SO_DONTROUTE:
1010 v.val = sock_flag(sk, SOCK_LOCALROUTE);
1011 break;
1012
1013 case SO_BROADCAST:
1014 v.val = sock_flag(sk, SOCK_BROADCAST);
1015 break;
1016
1017 case SO_SNDBUF:
1018 v.val = sk->sk_sndbuf;
1019 break;
1020
1021 case SO_RCVBUF:
1022 v.val = sk->sk_rcvbuf;
1023 break;
1024
1025 case SO_REUSEADDR:
1026 v.val = sk->sk_reuse;
1027 break;
1028
1029 case SO_REUSEPORT:
1030 v.val = sk->sk_reuseport;
1031 break;
1032
1033 case SO_KEEPALIVE:
1034 v.val = sock_flag(sk, SOCK_KEEPOPEN);
1035 break;
1036
1037 case SO_TYPE:
1038 v.val = sk->sk_type;
1039 break;
1040
1041 case SO_PROTOCOL:
1042 v.val = sk->sk_protocol;
1043 break;
1044
1045 case SO_DOMAIN:
1046 v.val = sk->sk_family;
1047 break;
1048
1049 case SO_ERROR:
1050 v.val = -sock_error(sk);
1051 if (v.val == 0)
1052 v.val = xchg(&sk->sk_err_soft, 0);
1053 break;
1054
1055 case SO_OOBINLINE:
1056 v.val = sock_flag(sk, SOCK_URGINLINE);
1057 break;
1058
1059 case SO_NO_CHECK:
1060 v.val = sk->sk_no_check_tx;
1061 break;
1062
1063 case SO_PRIORITY:
1064 v.val = sk->sk_priority;
1065 break;
1066
1067 case SO_LINGER:
1068 lv = sizeof(v.ling);
1069 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER);
1070 v.ling.l_linger = sk->sk_lingertime / HZ;
1071 break;
1072
1073 case SO_BSDCOMPAT:
1074 sock_warn_obsolete_bsdism("getsockopt");
1075 break;
1076
1077 case SO_TIMESTAMP:
1078 v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1079 !sock_flag(sk, SOCK_RCVTSTAMPNS);
1080 break;
1081
1082 case SO_TIMESTAMPNS:
1083 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1084 break;
1085
1086 case SO_TIMESTAMPING:
1087 v.val = sk->sk_tsflags;
1088 break;
1089
1090 case SO_RCVTIMEO:
1091 lv = sizeof(struct timeval);
1092 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1093 v.tm.tv_sec = 0;
1094 v.tm.tv_usec = 0;
1095 } else {
1096 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1097 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1098 }
1099 break;
1100
1101 case SO_SNDTIMEO:
1102 lv = sizeof(struct timeval);
1103 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1104 v.tm.tv_sec = 0;
1105 v.tm.tv_usec = 0;
1106 } else {
1107 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1108 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1109 }
1110 break;
1111
1112 case SO_RCVLOWAT:
1113 v.val = sk->sk_rcvlowat;
1114 break;
1115
1116 case SO_SNDLOWAT:
1117 v.val = 1;
1118 break;
1119
1120 case SO_PASSCRED:
1121 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1122 break;
1123
1124 case SO_PEERCRED:
1125 {
1126 struct ucred peercred;
1127 if (len > sizeof(peercred))
1128 len = sizeof(peercred);
1129 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1130 if (copy_to_user(optval, &peercred, len))
1131 return -EFAULT;
1132 goto lenout;
1133 }
1134
1135 case SO_PEERNAME:
1136 {
1137 char address[128];
1138
1139 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1140 return -ENOTCONN;
1141 if (lv < len)
1142 return -EINVAL;
1143 if (copy_to_user(optval, address, len))
1144 return -EFAULT;
1145 goto lenout;
1146 }
1147
1148 /* Dubious BSD thing... Probably nobody even uses it, but
1149 * the UNIX standard wants it for whatever reason... -DaveM
1150 */
1151 case SO_ACCEPTCONN:
1152 v.val = sk->sk_state == TCP_LISTEN;
1153 break;
1154
1155 case SO_PASSSEC:
1156 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1157 break;
1158
1159 case SO_PEERSEC:
1160 return security_socket_getpeersec_stream(sock, optval, optlen, len);
1161
1162 case SO_MARK:
1163 v.val = sk->sk_mark;
1164 break;
1165
1166 case SO_RXQ_OVFL:
1167 v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1168 break;
1169
1170 case SO_WIFI_STATUS:
1171 v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1172 break;
1173
1174 case SO_PEEK_OFF:
1175 if (!sock->ops->set_peek_off)
1176 return -EOPNOTSUPP;
1177
1178 v.val = sk->sk_peek_off;
1179 break;
1180 case SO_NOFCS:
1181 v.val = sock_flag(sk, SOCK_NOFCS);
1182 break;
1183
1184 case SO_BINDTODEVICE:
1185 return sock_getbindtodevice(sk, optval, optlen, len);
1186
1187 case SO_GET_FILTER:
1188 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1189 if (len < 0)
1190 return len;
1191
1192 goto lenout;
1193
1194 case SO_LOCK_FILTER:
1195 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1196 break;
1197
1198 case SO_BPF_EXTENSIONS:
1199 v.val = bpf_tell_extensions();
1200 break;
1201
1202 case SO_SELECT_ERR_QUEUE:
1203 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1204 break;
1205
1206 #ifdef CONFIG_NET_RX_BUSY_POLL
1207 case SO_BUSY_POLL:
1208 v.val = sk->sk_ll_usec;
1209 break;
1210 #endif
1211
1212 case SO_MAX_PACING_RATE:
1213 v.val = sk->sk_max_pacing_rate;
1214 break;
1215
1216 default:
1217 return -ENOPROTOOPT;
1218 }
1219
1220 if (len > lv)
1221 len = lv;
1222 if (copy_to_user(optval, &v, len))
1223 return -EFAULT;
1224 lenout:
1225 if (put_user(len, optlen))
1226 return -EFAULT;
1227 return 0;
1228 }
1229
1230 /*
1231 * Initialize an sk_lock.
1232 *
1233 * (We also register the sk_lock with the lock validator.)
1234 */
1235 static inline void sock_lock_init(struct sock *sk)
1236 {
1237 sock_lock_init_class_and_name(sk,
1238 af_family_slock_key_strings[sk->sk_family],
1239 af_family_slock_keys + sk->sk_family,
1240 af_family_key_strings[sk->sk_family],
1241 af_family_keys + sk->sk_family);
1242 }
1243
1244 /*
1245 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1246 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1247 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1248 */
1249 static void sock_copy(struct sock *nsk, const struct sock *osk)
1250 {
1251 #ifdef CONFIG_SECURITY_NETWORK
1252 void *sptr = nsk->sk_security;
1253 #endif
1254 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1255
1256 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1257 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1258
1259 #ifdef CONFIG_SECURITY_NETWORK
1260 nsk->sk_security = sptr;
1261 security_sk_clone(osk, nsk);
1262 #endif
1263 }
1264
1265 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1266 {
1267 unsigned long nulls1, nulls2;
1268
1269 nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1270 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1271 if (nulls1 > nulls2)
1272 swap(nulls1, nulls2);
1273
1274 if (nulls1 != 0)
1275 memset((char *)sk, 0, nulls1);
1276 memset((char *)sk + nulls1 + sizeof(void *), 0,
1277 nulls2 - nulls1 - sizeof(void *));
1278 memset((char *)sk + nulls2 + sizeof(void *), 0,
1279 size - nulls2 - sizeof(void *));
1280 }
1281 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1282
1283 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1284 int family)
1285 {
1286 struct sock *sk;
1287 struct kmem_cache *slab;
1288
1289 slab = prot->slab;
1290 if (slab != NULL) {
1291 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1292 if (!sk)
1293 return sk;
1294 if (priority & __GFP_ZERO) {
1295 if (prot->clear_sk)
1296 prot->clear_sk(sk, prot->obj_size);
1297 else
1298 sk_prot_clear_nulls(sk, prot->obj_size);
1299 }
1300 } else
1301 sk = kmalloc(prot->obj_size, priority);
1302
1303 if (sk != NULL) {
1304 kmemcheck_annotate_bitfield(sk, flags);
1305
1306 if (security_sk_alloc(sk, family, priority))
1307 goto out_free;
1308
1309 if (!try_module_get(prot->owner))
1310 goto out_free_sec;
1311 sk_tx_queue_clear(sk);
1312 }
1313
1314 return sk;
1315
1316 out_free_sec:
1317 security_sk_free(sk);
1318 out_free:
1319 if (slab != NULL)
1320 kmem_cache_free(slab, sk);
1321 else
1322 kfree(sk);
1323 return NULL;
1324 }
1325
1326 static void sk_prot_free(struct proto *prot, struct sock *sk)
1327 {
1328 struct kmem_cache *slab;
1329 struct module *owner;
1330
1331 owner = prot->owner;
1332 slab = prot->slab;
1333
1334 security_sk_free(sk);
1335 if (slab != NULL)
1336 kmem_cache_free(slab, sk);
1337 else
1338 kfree(sk);
1339 module_put(owner);
1340 }
1341
1342 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
1343 void sock_update_netprioidx(struct sock *sk)
1344 {
1345 if (in_interrupt())
1346 return;
1347
1348 sk->sk_cgrp_prioidx = task_netprioidx(current);
1349 }
1350 EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1351 #endif
1352
1353 /**
1354 * sk_alloc - All socket objects are allocated here
1355 * @net: the applicable net namespace
1356 * @family: protocol family
1357 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1358 * @prot: struct proto associated with this new sock instance
1359 */
1360 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1361 struct proto *prot)
1362 {
1363 struct sock *sk;
1364
1365 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1366 if (sk) {
1367 sk->sk_family = family;
1368 /*
1369 * See comment in struct sock definition to understand
1370 * why we need sk_prot_creator -acme
1371 */
1372 sk->sk_prot = sk->sk_prot_creator = prot;
1373 sock_lock_init(sk);
1374 sock_net_set(sk, get_net(net));
1375 atomic_set(&sk->sk_wmem_alloc, 1);
1376
1377 sock_update_classid(sk);
1378 sock_update_netprioidx(sk);
1379 }
1380
1381 return sk;
1382 }
1383 EXPORT_SYMBOL(sk_alloc);
1384
1385 static void __sk_free(struct sock *sk)
1386 {
1387 struct sk_filter *filter;
1388
1389 if (sk->sk_destruct)
1390 sk->sk_destruct(sk);
1391
1392 filter = rcu_dereference_check(sk->sk_filter,
1393 atomic_read(&sk->sk_wmem_alloc) == 0);
1394 if (filter) {
1395 sk_filter_uncharge(sk, filter);
1396 RCU_INIT_POINTER(sk->sk_filter, NULL);
1397 }
1398
1399 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1400
1401 if (atomic_read(&sk->sk_omem_alloc))
1402 pr_debug("%s: optmem leakage (%d bytes) detected\n",
1403 __func__, atomic_read(&sk->sk_omem_alloc));
1404
1405 if (sk->sk_peer_cred)
1406 put_cred(sk->sk_peer_cred);
1407 put_pid(sk->sk_peer_pid);
1408 put_net(sock_net(sk));
1409 sk_prot_free(sk->sk_prot_creator, sk);
1410 }
1411
1412 void sk_free(struct sock *sk)
1413 {
1414 /*
1415 * We subtract one from sk_wmem_alloc and can know if
1416 * some packets are still in some tx queue.
1417 * If not null, sock_wfree() will call __sk_free(sk) later
1418 */
1419 if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1420 __sk_free(sk);
1421 }
1422 EXPORT_SYMBOL(sk_free);
1423
1424 /*
1425 * Last sock_put should drop reference to sk->sk_net. It has already
1426 * been dropped in sk_change_net. Taking reference to stopping namespace
1427 * is not an option.
1428 * Take reference to a socket to remove it from hash _alive_ and after that
1429 * destroy it in the context of init_net.
1430 */
1431 void sk_release_kernel(struct sock *sk)
1432 {
1433 if (sk == NULL || sk->sk_socket == NULL)
1434 return;
1435
1436 sock_hold(sk);
1437 sock_release(sk->sk_socket);
1438 release_net(sock_net(sk));
1439 sock_net_set(sk, get_net(&init_net));
1440 sock_put(sk);
1441 }
1442 EXPORT_SYMBOL(sk_release_kernel);
1443
1444 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1445 {
1446 if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1447 sock_update_memcg(newsk);
1448 }
1449
1450 /**
1451 * sk_clone_lock - clone a socket, and lock its clone
1452 * @sk: the socket to clone
1453 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1454 *
1455 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1456 */
1457 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1458 {
1459 struct sock *newsk;
1460 bool is_charged = true;
1461
1462 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1463 if (newsk != NULL) {
1464 struct sk_filter *filter;
1465
1466 sock_copy(newsk, sk);
1467
1468 /* SANITY */
1469 get_net(sock_net(newsk));
1470 sk_node_init(&newsk->sk_node);
1471 sock_lock_init(newsk);
1472 bh_lock_sock(newsk);
1473 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
1474 newsk->sk_backlog.len = 0;
1475
1476 atomic_set(&newsk->sk_rmem_alloc, 0);
1477 /*
1478 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1479 */
1480 atomic_set(&newsk->sk_wmem_alloc, 1);
1481 atomic_set(&newsk->sk_omem_alloc, 0);
1482 skb_queue_head_init(&newsk->sk_receive_queue);
1483 skb_queue_head_init(&newsk->sk_write_queue);
1484
1485 spin_lock_init(&newsk->sk_dst_lock);
1486 rwlock_init(&newsk->sk_callback_lock);
1487 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1488 af_callback_keys + newsk->sk_family,
1489 af_family_clock_key_strings[newsk->sk_family]);
1490
1491 newsk->sk_dst_cache = NULL;
1492 newsk->sk_wmem_queued = 0;
1493 newsk->sk_forward_alloc = 0;
1494 newsk->sk_send_head = NULL;
1495 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1496
1497 sock_reset_flag(newsk, SOCK_DONE);
1498 skb_queue_head_init(&newsk->sk_error_queue);
1499
1500 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1501 if (filter != NULL)
1502 /* though it's an empty new sock, the charging may fail
1503 * if sysctl_optmem_max was changed between creation of
1504 * original socket and cloning
1505 */
1506 is_charged = sk_filter_charge(newsk, filter);
1507
1508 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
1509 /* It is still raw copy of parent, so invalidate
1510 * destructor and make plain sk_free() */
1511 newsk->sk_destruct = NULL;
1512 bh_unlock_sock(newsk);
1513 sk_free(newsk);
1514 newsk = NULL;
1515 goto out;
1516 }
1517
1518 newsk->sk_err = 0;
1519 newsk->sk_priority = 0;
1520 /*
1521 * Before updating sk_refcnt, we must commit prior changes to memory
1522 * (Documentation/RCU/rculist_nulls.txt for details)
1523 */
1524 smp_wmb();
1525 atomic_set(&newsk->sk_refcnt, 2);
1526
1527 /*
1528 * Increment the counter in the same struct proto as the master
1529 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1530 * is the same as sk->sk_prot->socks, as this field was copied
1531 * with memcpy).
1532 *
1533 * This _changes_ the previous behaviour, where
1534 * tcp_create_openreq_child always was incrementing the
1535 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1536 * to be taken into account in all callers. -acme
1537 */
1538 sk_refcnt_debug_inc(newsk);
1539 sk_set_socket(newsk, NULL);
1540 newsk->sk_wq = NULL;
1541
1542 sk_update_clone(sk, newsk);
1543
1544 if (newsk->sk_prot->sockets_allocated)
1545 sk_sockets_allocated_inc(newsk);
1546
1547 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1548 net_enable_timestamp();
1549 }
1550 out:
1551 return newsk;
1552 }
1553 EXPORT_SYMBOL_GPL(sk_clone_lock);
1554
1555 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1556 {
1557 __sk_dst_set(sk, dst);
1558 sk->sk_route_caps = dst->dev->features;
1559 if (sk->sk_route_caps & NETIF_F_GSO)
1560 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1561 sk->sk_route_caps &= ~sk->sk_route_nocaps;
1562 if (sk_can_gso(sk)) {
1563 if (dst->header_len) {
1564 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1565 } else {
1566 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1567 sk->sk_gso_max_size = dst->dev->gso_max_size;
1568 sk->sk_gso_max_segs = dst->dev->gso_max_segs;
1569 }
1570 }
1571 }
1572 EXPORT_SYMBOL_GPL(sk_setup_caps);
1573
1574 /*
1575 * Simple resource managers for sockets.
1576 */
1577
1578
1579 /*
1580 * Write buffer destructor automatically called from kfree_skb.
1581 */
1582 void sock_wfree(struct sk_buff *skb)
1583 {
1584 struct sock *sk = skb->sk;
1585 unsigned int len = skb->truesize;
1586
1587 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1588 /*
1589 * Keep a reference on sk_wmem_alloc, this will be released
1590 * after sk_write_space() call
1591 */
1592 atomic_sub(len - 1, &sk->sk_wmem_alloc);
1593 sk->sk_write_space(sk);
1594 len = 1;
1595 }
1596 /*
1597 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1598 * could not do because of in-flight packets
1599 */
1600 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1601 __sk_free(sk);
1602 }
1603 EXPORT_SYMBOL(sock_wfree);
1604
1605 void skb_orphan_partial(struct sk_buff *skb)
1606 {
1607 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1608 * so we do not completely orphan skb, but transfert all
1609 * accounted bytes but one, to avoid unexpected reorders.
1610 */
1611 if (skb->destructor == sock_wfree
1612 #ifdef CONFIG_INET
1613 || skb->destructor == tcp_wfree
1614 #endif
1615 ) {
1616 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1617 skb->truesize = 1;
1618 } else {
1619 skb_orphan(skb);
1620 }
1621 }
1622 EXPORT_SYMBOL(skb_orphan_partial);
1623
1624 /*
1625 * Read buffer destructor automatically called from kfree_skb.
1626 */
1627 void sock_rfree(struct sk_buff *skb)
1628 {
1629 struct sock *sk = skb->sk;
1630 unsigned int len = skb->truesize;
1631
1632 atomic_sub(len, &sk->sk_rmem_alloc);
1633 sk_mem_uncharge(sk, len);
1634 }
1635 EXPORT_SYMBOL(sock_rfree);
1636
1637 void sock_efree(struct sk_buff *skb)
1638 {
1639 sock_put(skb->sk);
1640 }
1641 EXPORT_SYMBOL(sock_efree);
1642
1643 #ifdef CONFIG_INET
1644 void sock_edemux(struct sk_buff *skb)
1645 {
1646 struct sock *sk = skb->sk;
1647
1648 if (sk->sk_state == TCP_TIME_WAIT)
1649 inet_twsk_put(inet_twsk(sk));
1650 else
1651 sock_put(sk);
1652 }
1653 EXPORT_SYMBOL(sock_edemux);
1654 #endif
1655
1656 kuid_t sock_i_uid(struct sock *sk)
1657 {
1658 kuid_t uid;
1659
1660 read_lock_bh(&sk->sk_callback_lock);
1661 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1662 read_unlock_bh(&sk->sk_callback_lock);
1663 return uid;
1664 }
1665 EXPORT_SYMBOL(sock_i_uid);
1666
1667 unsigned long sock_i_ino(struct sock *sk)
1668 {
1669 unsigned long ino;
1670
1671 read_lock_bh(&sk->sk_callback_lock);
1672 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1673 read_unlock_bh(&sk->sk_callback_lock);
1674 return ino;
1675 }
1676 EXPORT_SYMBOL(sock_i_ino);
1677
1678 /*
1679 * Allocate a skb from the socket's send buffer.
1680 */
1681 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1682 gfp_t priority)
1683 {
1684 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1685 struct sk_buff *skb = alloc_skb(size, priority);
1686 if (skb) {
1687 skb_set_owner_w(skb, sk);
1688 return skb;
1689 }
1690 }
1691 return NULL;
1692 }
1693 EXPORT_SYMBOL(sock_wmalloc);
1694
1695 /*
1696 * Allocate a memory block from the socket's option memory buffer.
1697 */
1698 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1699 {
1700 if ((unsigned int)size <= sysctl_optmem_max &&
1701 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1702 void *mem;
1703 /* First do the add, to avoid the race if kmalloc
1704 * might sleep.
1705 */
1706 atomic_add(size, &sk->sk_omem_alloc);
1707 mem = kmalloc(size, priority);
1708 if (mem)
1709 return mem;
1710 atomic_sub(size, &sk->sk_omem_alloc);
1711 }
1712 return NULL;
1713 }
1714 EXPORT_SYMBOL(sock_kmalloc);
1715
1716 /*
1717 * Free an option memory block.
1718 */
1719 void sock_kfree_s(struct sock *sk, void *mem, int size)
1720 {
1721 kfree(mem);
1722 atomic_sub(size, &sk->sk_omem_alloc);
1723 }
1724 EXPORT_SYMBOL(sock_kfree_s);
1725
1726 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1727 I think, these locks should be removed for datagram sockets.
1728 */
1729 static long sock_wait_for_wmem(struct sock *sk, long timeo)
1730 {
1731 DEFINE_WAIT(wait);
1732
1733 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1734 for (;;) {
1735 if (!timeo)
1736 break;
1737 if (signal_pending(current))
1738 break;
1739 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1740 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1741 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1742 break;
1743 if (sk->sk_shutdown & SEND_SHUTDOWN)
1744 break;
1745 if (sk->sk_err)
1746 break;
1747 timeo = schedule_timeout(timeo);
1748 }
1749 finish_wait(sk_sleep(sk), &wait);
1750 return timeo;
1751 }
1752
1753
1754 /*
1755 * Generic send/receive buffer handlers
1756 */
1757
1758 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1759 unsigned long data_len, int noblock,
1760 int *errcode, int max_page_order)
1761 {
1762 struct sk_buff *skb;
1763 long timeo;
1764 int err;
1765
1766 timeo = sock_sndtimeo(sk, noblock);
1767 for (;;) {
1768 err = sock_error(sk);
1769 if (err != 0)
1770 goto failure;
1771
1772 err = -EPIPE;
1773 if (sk->sk_shutdown & SEND_SHUTDOWN)
1774 goto failure;
1775
1776 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1777 break;
1778
1779 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1780 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1781 err = -EAGAIN;
1782 if (!timeo)
1783 goto failure;
1784 if (signal_pending(current))
1785 goto interrupted;
1786 timeo = sock_wait_for_wmem(sk, timeo);
1787 }
1788 skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1789 errcode, sk->sk_allocation);
1790 if (skb)
1791 skb_set_owner_w(skb, sk);
1792 return skb;
1793
1794 interrupted:
1795 err = sock_intr_errno(timeo);
1796 failure:
1797 *errcode = err;
1798 return NULL;
1799 }
1800 EXPORT_SYMBOL(sock_alloc_send_pskb);
1801
1802 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1803 int noblock, int *errcode)
1804 {
1805 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1806 }
1807 EXPORT_SYMBOL(sock_alloc_send_skb);
1808
1809 /* On 32bit arches, an skb frag is limited to 2^15 */
1810 #define SKB_FRAG_PAGE_ORDER get_order(32768)
1811
1812 /**
1813 * skb_page_frag_refill - check that a page_frag contains enough room
1814 * @sz: minimum size of the fragment we want to get
1815 * @pfrag: pointer to page_frag
1816 * @gfp: priority for memory allocation
1817 *
1818 * Note: While this allocator tries to use high order pages, there is
1819 * no guarantee that allocations succeed. Therefore, @sz MUST be
1820 * less or equal than PAGE_SIZE.
1821 */
1822 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
1823 {
1824 if (pfrag->page) {
1825 if (atomic_read(&pfrag->page->_count) == 1) {
1826 pfrag->offset = 0;
1827 return true;
1828 }
1829 if (pfrag->offset + sz <= pfrag->size)
1830 return true;
1831 put_page(pfrag->page);
1832 }
1833
1834 pfrag->offset = 0;
1835 if (SKB_FRAG_PAGE_ORDER) {
1836 pfrag->page = alloc_pages(gfp | __GFP_COMP |
1837 __GFP_NOWARN | __GFP_NORETRY,
1838 SKB_FRAG_PAGE_ORDER);
1839 if (likely(pfrag->page)) {
1840 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
1841 return true;
1842 }
1843 }
1844 pfrag->page = alloc_page(gfp);
1845 if (likely(pfrag->page)) {
1846 pfrag->size = PAGE_SIZE;
1847 return true;
1848 }
1849 return false;
1850 }
1851 EXPORT_SYMBOL(skb_page_frag_refill);
1852
1853 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
1854 {
1855 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
1856 return true;
1857
1858 sk_enter_memory_pressure(sk);
1859 sk_stream_moderate_sndbuf(sk);
1860 return false;
1861 }
1862 EXPORT_SYMBOL(sk_page_frag_refill);
1863
1864 static void __lock_sock(struct sock *sk)
1865 __releases(&sk->sk_lock.slock)
1866 __acquires(&sk->sk_lock.slock)
1867 {
1868 DEFINE_WAIT(wait);
1869
1870 for (;;) {
1871 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1872 TASK_UNINTERRUPTIBLE);
1873 spin_unlock_bh(&sk->sk_lock.slock);
1874 schedule();
1875 spin_lock_bh(&sk->sk_lock.slock);
1876 if (!sock_owned_by_user(sk))
1877 break;
1878 }
1879 finish_wait(&sk->sk_lock.wq, &wait);
1880 }
1881
1882 static void __release_sock(struct sock *sk)
1883 __releases(&sk->sk_lock.slock)
1884 __acquires(&sk->sk_lock.slock)
1885 {
1886 struct sk_buff *skb = sk->sk_backlog.head;
1887
1888 do {
1889 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1890 bh_unlock_sock(sk);
1891
1892 do {
1893 struct sk_buff *next = skb->next;
1894
1895 prefetch(next);
1896 WARN_ON_ONCE(skb_dst_is_noref(skb));
1897 skb->next = NULL;
1898 sk_backlog_rcv(sk, skb);
1899
1900 /*
1901 * We are in process context here with softirqs
1902 * disabled, use cond_resched_softirq() to preempt.
1903 * This is safe to do because we've taken the backlog
1904 * queue private:
1905 */
1906 cond_resched_softirq();
1907
1908 skb = next;
1909 } while (skb != NULL);
1910
1911 bh_lock_sock(sk);
1912 } while ((skb = sk->sk_backlog.head) != NULL);
1913
1914 /*
1915 * Doing the zeroing here guarantee we can not loop forever
1916 * while a wild producer attempts to flood us.
1917 */
1918 sk->sk_backlog.len = 0;
1919 }
1920
1921 /**
1922 * sk_wait_data - wait for data to arrive at sk_receive_queue
1923 * @sk: sock to wait on
1924 * @timeo: for how long
1925 *
1926 * Now socket state including sk->sk_err is changed only under lock,
1927 * hence we may omit checks after joining wait queue.
1928 * We check receive queue before schedule() only as optimization;
1929 * it is very likely that release_sock() added new data.
1930 */
1931 int sk_wait_data(struct sock *sk, long *timeo)
1932 {
1933 int rc;
1934 DEFINE_WAIT(wait);
1935
1936 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1937 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1938 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1939 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1940 finish_wait(sk_sleep(sk), &wait);
1941 return rc;
1942 }
1943 EXPORT_SYMBOL(sk_wait_data);
1944
1945 /**
1946 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
1947 * @sk: socket
1948 * @size: memory size to allocate
1949 * @kind: allocation type
1950 *
1951 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
1952 * rmem allocation. This function assumes that protocols which have
1953 * memory_pressure use sk_wmem_queued as write buffer accounting.
1954 */
1955 int __sk_mem_schedule(struct sock *sk, int size, int kind)
1956 {
1957 struct proto *prot = sk->sk_prot;
1958 int amt = sk_mem_pages(size);
1959 long allocated;
1960 int parent_status = UNDER_LIMIT;
1961
1962 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1963
1964 allocated = sk_memory_allocated_add(sk, amt, &parent_status);
1965
1966 /* Under limit. */
1967 if (parent_status == UNDER_LIMIT &&
1968 allocated <= sk_prot_mem_limits(sk, 0)) {
1969 sk_leave_memory_pressure(sk);
1970 return 1;
1971 }
1972
1973 /* Under pressure. (we or our parents) */
1974 if ((parent_status > SOFT_LIMIT) ||
1975 allocated > sk_prot_mem_limits(sk, 1))
1976 sk_enter_memory_pressure(sk);
1977
1978 /* Over hard limit (we or our parents) */
1979 if ((parent_status == OVER_LIMIT) ||
1980 (allocated > sk_prot_mem_limits(sk, 2)))
1981 goto suppress_allocation;
1982
1983 /* guarantee minimum buffer size under pressure */
1984 if (kind == SK_MEM_RECV) {
1985 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
1986 return 1;
1987
1988 } else { /* SK_MEM_SEND */
1989 if (sk->sk_type == SOCK_STREAM) {
1990 if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
1991 return 1;
1992 } else if (atomic_read(&sk->sk_wmem_alloc) <
1993 prot->sysctl_wmem[0])
1994 return 1;
1995 }
1996
1997 if (sk_has_memory_pressure(sk)) {
1998 int alloc;
1999
2000 if (!sk_under_memory_pressure(sk))
2001 return 1;
2002 alloc = sk_sockets_allocated_read_positive(sk);
2003 if (sk_prot_mem_limits(sk, 2) > alloc *
2004 sk_mem_pages(sk->sk_wmem_queued +
2005 atomic_read(&sk->sk_rmem_alloc) +
2006 sk->sk_forward_alloc))
2007 return 1;
2008 }
2009
2010 suppress_allocation:
2011
2012 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2013 sk_stream_moderate_sndbuf(sk);
2014
2015 /* Fail only if socket is _under_ its sndbuf.
2016 * In this case we cannot block, so that we have to fail.
2017 */
2018 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2019 return 1;
2020 }
2021
2022 trace_sock_exceed_buf_limit(sk, prot, allocated);
2023
2024 /* Alas. Undo changes. */
2025 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2026
2027 sk_memory_allocated_sub(sk, amt);
2028
2029 return 0;
2030 }
2031 EXPORT_SYMBOL(__sk_mem_schedule);
2032
2033 /**
2034 * __sk_reclaim - reclaim memory_allocated
2035 * @sk: socket
2036 */
2037 void __sk_mem_reclaim(struct sock *sk)
2038 {
2039 sk_memory_allocated_sub(sk,
2040 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
2041 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
2042
2043 if (sk_under_memory_pressure(sk) &&
2044 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2045 sk_leave_memory_pressure(sk);
2046 }
2047 EXPORT_SYMBOL(__sk_mem_reclaim);
2048
2049
2050 /*
2051 * Set of default routines for initialising struct proto_ops when
2052 * the protocol does not support a particular function. In certain
2053 * cases where it makes no sense for a protocol to have a "do nothing"
2054 * function, some default processing is provided.
2055 */
2056
2057 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2058 {
2059 return -EOPNOTSUPP;
2060 }
2061 EXPORT_SYMBOL(sock_no_bind);
2062
2063 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2064 int len, int flags)
2065 {
2066 return -EOPNOTSUPP;
2067 }
2068 EXPORT_SYMBOL(sock_no_connect);
2069
2070 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2071 {
2072 return -EOPNOTSUPP;
2073 }
2074 EXPORT_SYMBOL(sock_no_socketpair);
2075
2076 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2077 {
2078 return -EOPNOTSUPP;
2079 }
2080 EXPORT_SYMBOL(sock_no_accept);
2081
2082 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2083 int *len, int peer)
2084 {
2085 return -EOPNOTSUPP;
2086 }
2087 EXPORT_SYMBOL(sock_no_getname);
2088
2089 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2090 {
2091 return 0;
2092 }
2093 EXPORT_SYMBOL(sock_no_poll);
2094
2095 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2096 {
2097 return -EOPNOTSUPP;
2098 }
2099 EXPORT_SYMBOL(sock_no_ioctl);
2100
2101 int sock_no_listen(struct socket *sock, int backlog)
2102 {
2103 return -EOPNOTSUPP;
2104 }
2105 EXPORT_SYMBOL(sock_no_listen);
2106
2107 int sock_no_shutdown(struct socket *sock, int how)
2108 {
2109 return -EOPNOTSUPP;
2110 }
2111 EXPORT_SYMBOL(sock_no_shutdown);
2112
2113 int sock_no_setsockopt(struct socket *sock, int level, int optname,
2114 char __user *optval, unsigned int optlen)
2115 {
2116 return -EOPNOTSUPP;
2117 }
2118 EXPORT_SYMBOL(sock_no_setsockopt);
2119
2120 int sock_no_getsockopt(struct socket *sock, int level, int optname,
2121 char __user *optval, int __user *optlen)
2122 {
2123 return -EOPNOTSUPP;
2124 }
2125 EXPORT_SYMBOL(sock_no_getsockopt);
2126
2127 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2128 size_t len)
2129 {
2130 return -EOPNOTSUPP;
2131 }
2132 EXPORT_SYMBOL(sock_no_sendmsg);
2133
2134 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
2135 size_t len, int flags)
2136 {
2137 return -EOPNOTSUPP;
2138 }
2139 EXPORT_SYMBOL(sock_no_recvmsg);
2140
2141 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2142 {
2143 /* Mirror missing mmap method error code */
2144 return -ENODEV;
2145 }
2146 EXPORT_SYMBOL(sock_no_mmap);
2147
2148 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2149 {
2150 ssize_t res;
2151 struct msghdr msg = {.msg_flags = flags};
2152 struct kvec iov;
2153 char *kaddr = kmap(page);
2154 iov.iov_base = kaddr + offset;
2155 iov.iov_len = size;
2156 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2157 kunmap(page);
2158 return res;
2159 }
2160 EXPORT_SYMBOL(sock_no_sendpage);
2161
2162 /*
2163 * Default Socket Callbacks
2164 */
2165
2166 static void sock_def_wakeup(struct sock *sk)
2167 {
2168 struct socket_wq *wq;
2169
2170 rcu_read_lock();
2171 wq = rcu_dereference(sk->sk_wq);
2172 if (wq_has_sleeper(wq))
2173 wake_up_interruptible_all(&wq->wait);
2174 rcu_read_unlock();
2175 }
2176
2177 static void sock_def_error_report(struct sock *sk)
2178 {
2179 struct socket_wq *wq;
2180
2181 rcu_read_lock();
2182 wq = rcu_dereference(sk->sk_wq);
2183 if (wq_has_sleeper(wq))
2184 wake_up_interruptible_poll(&wq->wait, POLLERR);
2185 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2186 rcu_read_unlock();
2187 }
2188
2189 static void sock_def_readable(struct sock *sk)
2190 {
2191 struct socket_wq *wq;
2192
2193 rcu_read_lock();
2194 wq = rcu_dereference(sk->sk_wq);
2195 if (wq_has_sleeper(wq))
2196 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2197 POLLRDNORM | POLLRDBAND);
2198 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2199 rcu_read_unlock();
2200 }
2201
2202 static void sock_def_write_space(struct sock *sk)
2203 {
2204 struct socket_wq *wq;
2205
2206 rcu_read_lock();
2207
2208 /* Do not wake up a writer until he can make "significant"
2209 * progress. --DaveM
2210 */
2211 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2212 wq = rcu_dereference(sk->sk_wq);
2213 if (wq_has_sleeper(wq))
2214 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2215 POLLWRNORM | POLLWRBAND);
2216
2217 /* Should agree with poll, otherwise some programs break */
2218 if (sock_writeable(sk))
2219 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2220 }
2221
2222 rcu_read_unlock();
2223 }
2224
2225 static void sock_def_destruct(struct sock *sk)
2226 {
2227 kfree(sk->sk_protinfo);
2228 }
2229
2230 void sk_send_sigurg(struct sock *sk)
2231 {
2232 if (sk->sk_socket && sk->sk_socket->file)
2233 if (send_sigurg(&sk->sk_socket->file->f_owner))
2234 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2235 }
2236 EXPORT_SYMBOL(sk_send_sigurg);
2237
2238 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2239 unsigned long expires)
2240 {
2241 if (!mod_timer(timer, expires))
2242 sock_hold(sk);
2243 }
2244 EXPORT_SYMBOL(sk_reset_timer);
2245
2246 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2247 {
2248 if (del_timer(timer))
2249 __sock_put(sk);
2250 }
2251 EXPORT_SYMBOL(sk_stop_timer);
2252
2253 void sock_init_data(struct socket *sock, struct sock *sk)
2254 {
2255 skb_queue_head_init(&sk->sk_receive_queue);
2256 skb_queue_head_init(&sk->sk_write_queue);
2257 skb_queue_head_init(&sk->sk_error_queue);
2258
2259 sk->sk_send_head = NULL;
2260
2261 init_timer(&sk->sk_timer);
2262
2263 sk->sk_allocation = GFP_KERNEL;
2264 sk->sk_rcvbuf = sysctl_rmem_default;
2265 sk->sk_sndbuf = sysctl_wmem_default;
2266 sk->sk_state = TCP_CLOSE;
2267 sk_set_socket(sk, sock);
2268
2269 sock_set_flag(sk, SOCK_ZAPPED);
2270
2271 if (sock) {
2272 sk->sk_type = sock->type;
2273 sk->sk_wq = sock->wq;
2274 sock->sk = sk;
2275 } else
2276 sk->sk_wq = NULL;
2277
2278 spin_lock_init(&sk->sk_dst_lock);
2279 rwlock_init(&sk->sk_callback_lock);
2280 lockdep_set_class_and_name(&sk->sk_callback_lock,
2281 af_callback_keys + sk->sk_family,
2282 af_family_clock_key_strings[sk->sk_family]);
2283
2284 sk->sk_state_change = sock_def_wakeup;
2285 sk->sk_data_ready = sock_def_readable;
2286 sk->sk_write_space = sock_def_write_space;
2287 sk->sk_error_report = sock_def_error_report;
2288 sk->sk_destruct = sock_def_destruct;
2289
2290 sk->sk_frag.page = NULL;
2291 sk->sk_frag.offset = 0;
2292 sk->sk_peek_off = -1;
2293
2294 sk->sk_peer_pid = NULL;
2295 sk->sk_peer_cred = NULL;
2296 sk->sk_write_pending = 0;
2297 sk->sk_rcvlowat = 1;
2298 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
2299 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
2300
2301 sk->sk_stamp = ktime_set(-1L, 0);
2302
2303 #ifdef CONFIG_NET_RX_BUSY_POLL
2304 sk->sk_napi_id = 0;
2305 sk->sk_ll_usec = sysctl_net_busy_read;
2306 #endif
2307
2308 sk->sk_max_pacing_rate = ~0U;
2309 sk->sk_pacing_rate = ~0U;
2310 /*
2311 * Before updating sk_refcnt, we must commit prior changes to memory
2312 * (Documentation/RCU/rculist_nulls.txt for details)
2313 */
2314 smp_wmb();
2315 atomic_set(&sk->sk_refcnt, 1);
2316 atomic_set(&sk->sk_drops, 0);
2317 }
2318 EXPORT_SYMBOL(sock_init_data);
2319
2320 void lock_sock_nested(struct sock *sk, int subclass)
2321 {
2322 might_sleep();
2323 spin_lock_bh(&sk->sk_lock.slock);
2324 if (sk->sk_lock.owned)
2325 __lock_sock(sk);
2326 sk->sk_lock.owned = 1;
2327 spin_unlock(&sk->sk_lock.slock);
2328 /*
2329 * The sk_lock has mutex_lock() semantics here:
2330 */
2331 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2332 local_bh_enable();
2333 }
2334 EXPORT_SYMBOL(lock_sock_nested);
2335
2336 void release_sock(struct sock *sk)
2337 {
2338 /*
2339 * The sk_lock has mutex_unlock() semantics:
2340 */
2341 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
2342
2343 spin_lock_bh(&sk->sk_lock.slock);
2344 if (sk->sk_backlog.tail)
2345 __release_sock(sk);
2346
2347 /* Warning : release_cb() might need to release sk ownership,
2348 * ie call sock_release_ownership(sk) before us.
2349 */
2350 if (sk->sk_prot->release_cb)
2351 sk->sk_prot->release_cb(sk);
2352
2353 sock_release_ownership(sk);
2354 if (waitqueue_active(&sk->sk_lock.wq))
2355 wake_up(&sk->sk_lock.wq);
2356 spin_unlock_bh(&sk->sk_lock.slock);
2357 }
2358 EXPORT_SYMBOL(release_sock);
2359
2360 /**
2361 * lock_sock_fast - fast version of lock_sock
2362 * @sk: socket
2363 *
2364 * This version should be used for very small section, where process wont block
2365 * return false if fast path is taken
2366 * sk_lock.slock locked, owned = 0, BH disabled
2367 * return true if slow path is taken
2368 * sk_lock.slock unlocked, owned = 1, BH enabled
2369 */
2370 bool lock_sock_fast(struct sock *sk)
2371 {
2372 might_sleep();
2373 spin_lock_bh(&sk->sk_lock.slock);
2374
2375 if (!sk->sk_lock.owned)
2376 /*
2377 * Note : We must disable BH
2378 */
2379 return false;
2380
2381 __lock_sock(sk);
2382 sk->sk_lock.owned = 1;
2383 spin_unlock(&sk->sk_lock.slock);
2384 /*
2385 * The sk_lock has mutex_lock() semantics here:
2386 */
2387 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2388 local_bh_enable();
2389 return true;
2390 }
2391 EXPORT_SYMBOL(lock_sock_fast);
2392
2393 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2394 {
2395 struct timeval tv;
2396 if (!sock_flag(sk, SOCK_TIMESTAMP))
2397 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2398 tv = ktime_to_timeval(sk->sk_stamp);
2399 if (tv.tv_sec == -1)
2400 return -ENOENT;
2401 if (tv.tv_sec == 0) {
2402 sk->sk_stamp = ktime_get_real();
2403 tv = ktime_to_timeval(sk->sk_stamp);
2404 }
2405 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2406 }
2407 EXPORT_SYMBOL(sock_get_timestamp);
2408
2409 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2410 {
2411 struct timespec ts;
2412 if (!sock_flag(sk, SOCK_TIMESTAMP))
2413 sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2414 ts = ktime_to_timespec(sk->sk_stamp);
2415 if (ts.tv_sec == -1)
2416 return -ENOENT;
2417 if (ts.tv_sec == 0) {
2418 sk->sk_stamp = ktime_get_real();
2419 ts = ktime_to_timespec(sk->sk_stamp);
2420 }
2421 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2422 }
2423 EXPORT_SYMBOL(sock_get_timestampns);
2424
2425 void sock_enable_timestamp(struct sock *sk, int flag)
2426 {
2427 if (!sock_flag(sk, flag)) {
2428 unsigned long previous_flags = sk->sk_flags;
2429
2430 sock_set_flag(sk, flag);
2431 /*
2432 * we just set one of the two flags which require net
2433 * time stamping, but time stamping might have been on
2434 * already because of the other one
2435 */
2436 if (!(previous_flags & SK_FLAGS_TIMESTAMP))
2437 net_enable_timestamp();
2438 }
2439 }
2440
2441 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2442 int level, int type)
2443 {
2444 struct sock_exterr_skb *serr;
2445 struct sk_buff *skb;
2446 int copied, err;
2447
2448 err = -EAGAIN;
2449 skb = sock_dequeue_err_skb(sk);
2450 if (skb == NULL)
2451 goto out;
2452
2453 copied = skb->len;
2454 if (copied > len) {
2455 msg->msg_flags |= MSG_TRUNC;
2456 copied = len;
2457 }
2458 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2459 if (err)
2460 goto out_free_skb;
2461
2462 sock_recv_timestamp(msg, sk, skb);
2463
2464 serr = SKB_EXT_ERR(skb);
2465 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2466
2467 msg->msg_flags |= MSG_ERRQUEUE;
2468 err = copied;
2469
2470 out_free_skb:
2471 kfree_skb(skb);
2472 out:
2473 return err;
2474 }
2475 EXPORT_SYMBOL(sock_recv_errqueue);
2476
2477 /*
2478 * Get a socket option on an socket.
2479 *
2480 * FIX: POSIX 1003.1g is very ambiguous here. It states that
2481 * asynchronous errors should be reported by getsockopt. We assume
2482 * this means if you specify SO_ERROR (otherwise whats the point of it).
2483 */
2484 int sock_common_getsockopt(struct socket *sock, int level, int optname,
2485 char __user *optval, int __user *optlen)
2486 {
2487 struct sock *sk = sock->sk;
2488
2489 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2490 }
2491 EXPORT_SYMBOL(sock_common_getsockopt);
2492
2493 #ifdef CONFIG_COMPAT
2494 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2495 char __user *optval, int __user *optlen)
2496 {
2497 struct sock *sk = sock->sk;
2498
2499 if (sk->sk_prot->compat_getsockopt != NULL)
2500 return sk->sk_prot->compat_getsockopt(sk, level, optname,
2501 optval, optlen);
2502 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2503 }
2504 EXPORT_SYMBOL(compat_sock_common_getsockopt);
2505 #endif
2506
2507 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
2508 struct msghdr *msg, size_t size, int flags)
2509 {
2510 struct sock *sk = sock->sk;
2511 int addr_len = 0;
2512 int err;
2513
2514 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
2515 flags & ~MSG_DONTWAIT, &addr_len);
2516 if (err >= 0)
2517 msg->msg_namelen = addr_len;
2518 return err;
2519 }
2520 EXPORT_SYMBOL(sock_common_recvmsg);
2521
2522 /*
2523 * Set socket options on an inet socket.
2524 */
2525 int sock_common_setsockopt(struct socket *sock, int level, int optname,
2526 char __user *optval, unsigned int optlen)
2527 {
2528 struct sock *sk = sock->sk;
2529
2530 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2531 }
2532 EXPORT_SYMBOL(sock_common_setsockopt);
2533
2534 #ifdef CONFIG_COMPAT
2535 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2536 char __user *optval, unsigned int optlen)
2537 {
2538 struct sock *sk = sock->sk;
2539
2540 if (sk->sk_prot->compat_setsockopt != NULL)
2541 return sk->sk_prot->compat_setsockopt(sk, level, optname,
2542 optval, optlen);
2543 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2544 }
2545 EXPORT_SYMBOL(compat_sock_common_setsockopt);
2546 #endif
2547
2548 void sk_common_release(struct sock *sk)
2549 {
2550 if (sk->sk_prot->destroy)
2551 sk->sk_prot->destroy(sk);
2552
2553 /*
2554 * Observation: when sock_common_release is called, processes have
2555 * no access to socket. But net still has.
2556 * Step one, detach it from networking:
2557 *
2558 * A. Remove from hash tables.
2559 */
2560
2561 sk->sk_prot->unhash(sk);
2562
2563 /*
2564 * In this point socket cannot receive new packets, but it is possible
2565 * that some packets are in flight because some CPU runs receiver and
2566 * did hash table lookup before we unhashed socket. They will achieve
2567 * receive queue and will be purged by socket destructor.
2568 *
2569 * Also we still have packets pending on receive queue and probably,
2570 * our own packets waiting in device queues. sock_destroy will drain
2571 * receive queue, but transmitted packets will delay socket destruction
2572 * until the last reference will be released.
2573 */
2574
2575 sock_orphan(sk);
2576
2577 xfrm_sk_free_policy(sk);
2578
2579 sk_refcnt_debug_release(sk);
2580
2581 if (sk->sk_frag.page) {
2582 put_page(sk->sk_frag.page);
2583 sk->sk_frag.page = NULL;
2584 }
2585
2586 sock_put(sk);
2587 }
2588 EXPORT_SYMBOL(sk_common_release);
2589
2590 #ifdef CONFIG_PROC_FS
2591 #define PROTO_INUSE_NR 64 /* should be enough for the first time */
2592 struct prot_inuse {
2593 int val[PROTO_INUSE_NR];
2594 };
2595
2596 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2597
2598 #ifdef CONFIG_NET_NS
2599 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2600 {
2601 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2602 }
2603 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2604
2605 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2606 {
2607 int cpu, idx = prot->inuse_idx;
2608 int res = 0;
2609
2610 for_each_possible_cpu(cpu)
2611 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2612
2613 return res >= 0 ? res : 0;
2614 }
2615 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2616
2617 static int __net_init sock_inuse_init_net(struct net *net)
2618 {
2619 net->core.inuse = alloc_percpu(struct prot_inuse);
2620 return net->core.inuse ? 0 : -ENOMEM;
2621 }
2622
2623 static void __net_exit sock_inuse_exit_net(struct net *net)
2624 {
2625 free_percpu(net->core.inuse);
2626 }
2627
2628 static struct pernet_operations net_inuse_ops = {
2629 .init = sock_inuse_init_net,
2630 .exit = sock_inuse_exit_net,
2631 };
2632
2633 static __init int net_inuse_init(void)
2634 {
2635 if (register_pernet_subsys(&net_inuse_ops))
2636 panic("Cannot initialize net inuse counters");
2637
2638 return 0;
2639 }
2640
2641 core_initcall(net_inuse_init);
2642 #else
2643 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2644
2645 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2646 {
2647 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2648 }
2649 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2650
2651 int sock_prot_inuse_get(struct net *net, struct proto *prot)
2652 {
2653 int cpu, idx = prot->inuse_idx;
2654 int res = 0;
2655
2656 for_each_possible_cpu(cpu)
2657 res += per_cpu(prot_inuse, cpu).val[idx];
2658
2659 return res >= 0 ? res : 0;
2660 }
2661 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2662 #endif
2663
2664 static void assign_proto_idx(struct proto *prot)
2665 {
2666 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2667
2668 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2669 pr_err("PROTO_INUSE_NR exhausted\n");
2670 return;
2671 }
2672
2673 set_bit(prot->inuse_idx, proto_inuse_idx);
2674 }
2675
2676 static void release_proto_idx(struct proto *prot)
2677 {
2678 if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2679 clear_bit(prot->inuse_idx, proto_inuse_idx);
2680 }
2681 #else
2682 static inline void assign_proto_idx(struct proto *prot)
2683 {
2684 }
2685
2686 static inline void release_proto_idx(struct proto *prot)
2687 {
2688 }
2689 #endif
2690
2691 int proto_register(struct proto *prot, int alloc_slab)
2692 {
2693 if (alloc_slab) {
2694 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2695 SLAB_HWCACHE_ALIGN | prot->slab_flags,
2696 NULL);
2697
2698 if (prot->slab == NULL) {
2699 pr_crit("%s: Can't create sock SLAB cache!\n",
2700 prot->name);
2701 goto out;
2702 }
2703
2704 if (prot->rsk_prot != NULL) {
2705 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
2706 if (prot->rsk_prot->slab_name == NULL)
2707 goto out_free_sock_slab;
2708
2709 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
2710 prot->rsk_prot->obj_size, 0,
2711 SLAB_HWCACHE_ALIGN, NULL);
2712
2713 if (prot->rsk_prot->slab == NULL) {
2714 pr_crit("%s: Can't create request sock SLAB cache!\n",
2715 prot->name);
2716 goto out_free_request_sock_slab_name;
2717 }
2718 }
2719
2720 if (prot->twsk_prot != NULL) {
2721 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2722
2723 if (prot->twsk_prot->twsk_slab_name == NULL)
2724 goto out_free_request_sock_slab;
2725
2726 prot->twsk_prot->twsk_slab =
2727 kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2728 prot->twsk_prot->twsk_obj_size,
2729 0,
2730 SLAB_HWCACHE_ALIGN |
2731 prot->slab_flags,
2732 NULL);
2733 if (prot->twsk_prot->twsk_slab == NULL)
2734 goto out_free_timewait_sock_slab_name;
2735 }
2736 }
2737
2738 mutex_lock(&proto_list_mutex);
2739 list_add(&prot->node, &proto_list);
2740 assign_proto_idx(prot);
2741 mutex_unlock(&proto_list_mutex);
2742 return 0;
2743
2744 out_free_timewait_sock_slab_name:
2745 kfree(prot->twsk_prot->twsk_slab_name);
2746 out_free_request_sock_slab:
2747 if (prot->rsk_prot && prot->rsk_prot->slab) {
2748 kmem_cache_destroy(prot->rsk_prot->slab);
2749 prot->rsk_prot->slab = NULL;
2750 }
2751 out_free_request_sock_slab_name:
2752 if (prot->rsk_prot)
2753 kfree(prot->rsk_prot->slab_name);
2754 out_free_sock_slab:
2755 kmem_cache_destroy(prot->slab);
2756 prot->slab = NULL;
2757 out:
2758 return -ENOBUFS;
2759 }
2760 EXPORT_SYMBOL(proto_register);
2761
2762 void proto_unregister(struct proto *prot)
2763 {
2764 mutex_lock(&proto_list_mutex);
2765 release_proto_idx(prot);
2766 list_del(&prot->node);
2767 mutex_unlock(&proto_list_mutex);
2768
2769 if (prot->slab != NULL) {
2770 kmem_cache_destroy(prot->slab);
2771 prot->slab = NULL;
2772 }
2773
2774 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
2775 kmem_cache_destroy(prot->rsk_prot->slab);
2776 kfree(prot->rsk_prot->slab_name);
2777 prot->rsk_prot->slab = NULL;
2778 }
2779
2780 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2781 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2782 kfree(prot->twsk_prot->twsk_slab_name);
2783 prot->twsk_prot->twsk_slab = NULL;
2784 }
2785 }
2786 EXPORT_SYMBOL(proto_unregister);
2787
2788 #ifdef CONFIG_PROC_FS
2789 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2790 __acquires(proto_list_mutex)
2791 {
2792 mutex_lock(&proto_list_mutex);
2793 return seq_list_start_head(&proto_list, *pos);
2794 }
2795
2796 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2797 {
2798 return seq_list_next(v, &proto_list, pos);
2799 }
2800
2801 static void proto_seq_stop(struct seq_file *seq, void *v)
2802 __releases(proto_list_mutex)
2803 {
2804 mutex_unlock(&proto_list_mutex);
2805 }
2806
2807 static char proto_method_implemented(const void *method)
2808 {
2809 return method == NULL ? 'n' : 'y';
2810 }
2811 static long sock_prot_memory_allocated(struct proto *proto)
2812 {
2813 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2814 }
2815
2816 static char *sock_prot_memory_pressure(struct proto *proto)
2817 {
2818 return proto->memory_pressure != NULL ?
2819 proto_memory_pressure(proto) ? "yes" : "no" : "NI";
2820 }
2821
2822 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2823 {
2824
2825 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2826 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2827 proto->name,
2828 proto->obj_size,
2829 sock_prot_inuse_get(seq_file_net(seq), proto),
2830 sock_prot_memory_allocated(proto),
2831 sock_prot_memory_pressure(proto),
2832 proto->max_header,
2833 proto->slab == NULL ? "no" : "yes",
2834 module_name(proto->owner),
2835 proto_method_implemented(proto->close),
2836 proto_method_implemented(proto->connect),
2837 proto_method_implemented(proto->disconnect),
2838 proto_method_implemented(proto->accept),
2839 proto_method_implemented(proto->ioctl),
2840 proto_method_implemented(proto->init),
2841 proto_method_implemented(proto->destroy),
2842 proto_method_implemented(proto->shutdown),
2843 proto_method_implemented(proto->setsockopt),
2844 proto_method_implemented(proto->getsockopt),
2845 proto_method_implemented(proto->sendmsg),
2846 proto_method_implemented(proto->recvmsg),
2847 proto_method_implemented(proto->sendpage),
2848 proto_method_implemented(proto->bind),
2849 proto_method_implemented(proto->backlog_rcv),
2850 proto_method_implemented(proto->hash),
2851 proto_method_implemented(proto->unhash),
2852 proto_method_implemented(proto->get_port),
2853 proto_method_implemented(proto->enter_memory_pressure));
2854 }
2855
2856 static int proto_seq_show(struct seq_file *seq, void *v)
2857 {
2858 if (v == &proto_list)
2859 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
2860 "protocol",
2861 "size",
2862 "sockets",
2863 "memory",
2864 "press",
2865 "maxhdr",
2866 "slab",
2867 "module",
2868 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
2869 else
2870 proto_seq_printf(seq, list_entry(v, struct proto, node));
2871 return 0;
2872 }
2873
2874 static const struct seq_operations proto_seq_ops = {
2875 .start = proto_seq_start,
2876 .next = proto_seq_next,
2877 .stop = proto_seq_stop,
2878 .show = proto_seq_show,
2879 };
2880
2881 static int proto_seq_open(struct inode *inode, struct file *file)
2882 {
2883 return seq_open_net(inode, file, &proto_seq_ops,
2884 sizeof(struct seq_net_private));
2885 }
2886
2887 static const struct file_operations proto_seq_fops = {
2888 .owner = THIS_MODULE,
2889 .open = proto_seq_open,
2890 .read = seq_read,
2891 .llseek = seq_lseek,
2892 .release = seq_release_net,
2893 };
2894
2895 static __net_init int proto_init_net(struct net *net)
2896 {
2897 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
2898 return -ENOMEM;
2899
2900 return 0;
2901 }
2902
2903 static __net_exit void proto_exit_net(struct net *net)
2904 {
2905 remove_proc_entry("protocols", net->proc_net);
2906 }
2907
2908
2909 static __net_initdata struct pernet_operations proto_net_ops = {
2910 .init = proto_init_net,
2911 .exit = proto_exit_net,
2912 };
2913
2914 static int __init proto_init(void)
2915 {
2916 return register_pernet_subsys(&proto_net_ops);
2917 }
2918
2919 subsys_initcall(proto_init);
2920
2921 #endif /* PROC_FS */