2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
121 struct hlist_head unix_socket_table
[2 * UNIX_HASH_SIZE
];
122 EXPORT_SYMBOL_GPL(unix_socket_table
);
123 DEFINE_SPINLOCK(unix_table_lock
);
124 EXPORT_SYMBOL_GPL(unix_table_lock
);
125 static atomic_long_t unix_nr_socks
;
128 static struct hlist_head
*unix_sockets_unbound(void *addr
)
130 unsigned long hash
= (unsigned long)addr
;
134 hash
%= UNIX_HASH_SIZE
;
135 return &unix_socket_table
[UNIX_HASH_SIZE
+ hash
];
138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
140 #ifdef CONFIG_SECURITY_NETWORK
141 static void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
143 memcpy(UNIXSID(skb
), &scm
->secid
, sizeof(u32
));
146 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
148 scm
->secid
= *UNIXSID(skb
);
151 static inline void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
154 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
156 #endif /* CONFIG_SECURITY_NETWORK */
159 * SMP locking strategy:
160 * hash table is protected with spinlock unix_table_lock
161 * each socket state is protected by separate spin lock.
164 static inline unsigned int unix_hash_fold(__wsum n
)
166 unsigned int hash
= (__force
unsigned int)csum_fold(n
);
169 return hash
&(UNIX_HASH_SIZE
-1);
172 #define unix_peer(sk) (unix_sk(sk)->peer)
174 static inline int unix_our_peer(struct sock
*sk
, struct sock
*osk
)
176 return unix_peer(osk
) == sk
;
179 static inline int unix_may_send(struct sock
*sk
, struct sock
*osk
)
181 return unix_peer(osk
) == NULL
|| unix_our_peer(sk
, osk
);
184 static inline int unix_recvq_full(struct sock
const *sk
)
186 return skb_queue_len(&sk
->sk_receive_queue
) > sk
->sk_max_ack_backlog
;
189 struct sock
*unix_peer_get(struct sock
*s
)
197 unix_state_unlock(s
);
200 EXPORT_SYMBOL_GPL(unix_peer_get
);
202 static inline void unix_release_addr(struct unix_address
*addr
)
204 if (atomic_dec_and_test(&addr
->refcnt
))
209 * Check unix socket name:
210 * - should be not zero length.
211 * - if started by not zero, should be NULL terminated (FS object)
212 * - if started by zero, it is abstract name.
215 static int unix_mkname(struct sockaddr_un
*sunaddr
, int len
, unsigned int *hashp
)
217 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
219 if (!sunaddr
|| sunaddr
->sun_family
!= AF_UNIX
)
221 if (sunaddr
->sun_path
[0]) {
223 * This may look like an off by one error but it is a bit more
224 * subtle. 108 is the longest valid AF_UNIX path for a binding.
225 * sun_path[108] doesn't as such exist. However in kernel space
226 * we are guaranteed that it is a valid memory location in our
227 * kernel address buffer.
229 ((char *)sunaddr
)[len
] = 0;
230 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
234 *hashp
= unix_hash_fold(csum_partial(sunaddr
, len
, 0));
238 static void __unix_remove_socket(struct sock
*sk
)
240 sk_del_node_init(sk
);
243 static void __unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
245 WARN_ON(!sk_unhashed(sk
));
246 sk_add_node(sk
, list
);
249 static inline void unix_remove_socket(struct sock
*sk
)
251 spin_lock(&unix_table_lock
);
252 __unix_remove_socket(sk
);
253 spin_unlock(&unix_table_lock
);
256 static inline void unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
258 spin_lock(&unix_table_lock
);
259 __unix_insert_socket(list
, sk
);
260 spin_unlock(&unix_table_lock
);
263 static struct sock
*__unix_find_socket_byname(struct net
*net
,
264 struct sockaddr_un
*sunname
,
265 int len
, int type
, unsigned int hash
)
269 sk_for_each(s
, &unix_socket_table
[hash
^ type
]) {
270 struct unix_sock
*u
= unix_sk(s
);
272 if (!net_eq(sock_net(s
), net
))
275 if (u
->addr
->len
== len
&&
276 !memcmp(u
->addr
->name
, sunname
, len
))
284 static inline struct sock
*unix_find_socket_byname(struct net
*net
,
285 struct sockaddr_un
*sunname
,
291 spin_lock(&unix_table_lock
);
292 s
= __unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
295 spin_unlock(&unix_table_lock
);
299 static struct sock
*unix_find_socket_byinode(struct inode
*i
)
303 spin_lock(&unix_table_lock
);
305 &unix_socket_table
[i
->i_ino
& (UNIX_HASH_SIZE
- 1)]) {
306 struct dentry
*dentry
= unix_sk(s
)->path
.dentry
;
308 if (dentry
&& dentry
->d_inode
== i
) {
315 spin_unlock(&unix_table_lock
);
319 static inline int unix_writable(struct sock
*sk
)
321 return (atomic_read(&sk
->sk_wmem_alloc
) << 2) <= sk
->sk_sndbuf
;
324 static void unix_write_space(struct sock
*sk
)
326 struct socket_wq
*wq
;
329 if (unix_writable(sk
)) {
330 wq
= rcu_dereference(sk
->sk_wq
);
331 if (wq_has_sleeper(wq
))
332 wake_up_interruptible_sync_poll(&wq
->wait
,
333 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
334 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
339 /* When dgram socket disconnects (or changes its peer), we clear its receive
340 * queue of packets arrived from previous peer. First, it allows to do
341 * flow control based only on wmem_alloc; second, sk connected to peer
342 * may receive messages only from that peer. */
343 static void unix_dgram_disconnected(struct sock
*sk
, struct sock
*other
)
345 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
346 skb_queue_purge(&sk
->sk_receive_queue
);
347 wake_up_interruptible_all(&unix_sk(sk
)->peer_wait
);
349 /* If one link of bidirectional dgram pipe is disconnected,
350 * we signal error. Messages are lost. Do not make this,
351 * when peer was not connected to us.
353 if (!sock_flag(other
, SOCK_DEAD
) && unix_peer(other
) == sk
) {
354 other
->sk_err
= ECONNRESET
;
355 other
->sk_error_report(other
);
360 static void unix_sock_destructor(struct sock
*sk
)
362 struct unix_sock
*u
= unix_sk(sk
);
364 skb_queue_purge(&sk
->sk_receive_queue
);
366 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
367 WARN_ON(!sk_unhashed(sk
));
368 WARN_ON(sk
->sk_socket
);
369 if (!sock_flag(sk
, SOCK_DEAD
)) {
370 pr_info("Attempt to release alive unix socket: %p\n", sk
);
375 unix_release_addr(u
->addr
);
377 atomic_long_dec(&unix_nr_socks
);
379 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
381 #ifdef UNIX_REFCNT_DEBUG
382 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk
,
383 atomic_long_read(&unix_nr_socks
));
387 static void unix_release_sock(struct sock
*sk
, int embrion
)
389 struct unix_sock
*u
= unix_sk(sk
);
395 unix_remove_socket(sk
);
400 sk
->sk_shutdown
= SHUTDOWN_MASK
;
402 u
->path
.dentry
= NULL
;
404 state
= sk
->sk_state
;
405 sk
->sk_state
= TCP_CLOSE
;
406 unix_state_unlock(sk
);
408 wake_up_interruptible_all(&u
->peer_wait
);
410 skpair
= unix_peer(sk
);
412 if (skpair
!= NULL
) {
413 if (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) {
414 unix_state_lock(skpair
);
416 skpair
->sk_shutdown
= SHUTDOWN_MASK
;
417 if (!skb_queue_empty(&sk
->sk_receive_queue
) || embrion
)
418 skpair
->sk_err
= ECONNRESET
;
419 unix_state_unlock(skpair
);
420 skpair
->sk_state_change(skpair
);
421 sk_wake_async(skpair
, SOCK_WAKE_WAITD
, POLL_HUP
);
423 sock_put(skpair
); /* It may now die */
424 unix_peer(sk
) = NULL
;
427 /* Try to flush out this socket. Throw out buffers at least */
429 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
430 if (state
== TCP_LISTEN
)
431 unix_release_sock(skb
->sk
, 1);
432 /* passed fds are erased in the kfree_skb hook */
441 /* ---- Socket is dead now and most probably destroyed ---- */
444 * Fixme: BSD difference: In BSD all sockets connected to us get
445 * ECONNRESET and we die on the spot. In Linux we behave
446 * like files and pipes do and wait for the last
449 * Can't we simply set sock->err?
451 * What the above comment does talk about? --ANK(980817)
454 if (unix_tot_inflight
)
455 unix_gc(); /* Garbage collect fds */
458 static void init_peercred(struct sock
*sk
)
460 put_pid(sk
->sk_peer_pid
);
461 if (sk
->sk_peer_cred
)
462 put_cred(sk
->sk_peer_cred
);
463 sk
->sk_peer_pid
= get_pid(task_tgid(current
));
464 sk
->sk_peer_cred
= get_current_cred();
467 static void copy_peercred(struct sock
*sk
, struct sock
*peersk
)
469 put_pid(sk
->sk_peer_pid
);
470 if (sk
->sk_peer_cred
)
471 put_cred(sk
->sk_peer_cred
);
472 sk
->sk_peer_pid
= get_pid(peersk
->sk_peer_pid
);
473 sk
->sk_peer_cred
= get_cred(peersk
->sk_peer_cred
);
476 static int unix_listen(struct socket
*sock
, int backlog
)
479 struct sock
*sk
= sock
->sk
;
480 struct unix_sock
*u
= unix_sk(sk
);
481 struct pid
*old_pid
= NULL
;
484 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
485 goto out
; /* Only stream/seqpacket sockets accept */
488 goto out
; /* No listens on an unbound socket */
490 if (sk
->sk_state
!= TCP_CLOSE
&& sk
->sk_state
!= TCP_LISTEN
)
492 if (backlog
> sk
->sk_max_ack_backlog
)
493 wake_up_interruptible_all(&u
->peer_wait
);
494 sk
->sk_max_ack_backlog
= backlog
;
495 sk
->sk_state
= TCP_LISTEN
;
496 /* set credentials so connect can copy them */
501 unix_state_unlock(sk
);
507 static int unix_release(struct socket
*);
508 static int unix_bind(struct socket
*, struct sockaddr
*, int);
509 static int unix_stream_connect(struct socket
*, struct sockaddr
*,
510 int addr_len
, int flags
);
511 static int unix_socketpair(struct socket
*, struct socket
*);
512 static int unix_accept(struct socket
*, struct socket
*, int);
513 static int unix_getname(struct socket
*, struct sockaddr
*, int *, int);
514 static unsigned int unix_poll(struct file
*, struct socket
*, poll_table
*);
515 static unsigned int unix_dgram_poll(struct file
*, struct socket
*,
517 static int unix_ioctl(struct socket
*, unsigned int, unsigned long);
518 static int unix_shutdown(struct socket
*, int);
519 static int unix_stream_sendmsg(struct kiocb
*, struct socket
*,
520 struct msghdr
*, size_t);
521 static int unix_stream_recvmsg(struct kiocb
*, struct socket
*,
522 struct msghdr
*, size_t, int);
523 static int unix_dgram_sendmsg(struct kiocb
*, struct socket
*,
524 struct msghdr
*, size_t);
525 static int unix_dgram_recvmsg(struct kiocb
*, struct socket
*,
526 struct msghdr
*, size_t, int);
527 static int unix_dgram_connect(struct socket
*, struct sockaddr
*,
529 static int unix_seqpacket_sendmsg(struct kiocb
*, struct socket
*,
530 struct msghdr
*, size_t);
531 static int unix_seqpacket_recvmsg(struct kiocb
*, struct socket
*,
532 struct msghdr
*, size_t, int);
534 static int unix_set_peek_off(struct sock
*sk
, int val
)
536 struct unix_sock
*u
= unix_sk(sk
);
538 if (mutex_lock_interruptible(&u
->readlock
))
541 sk
->sk_peek_off
= val
;
542 mutex_unlock(&u
->readlock
);
548 static const struct proto_ops unix_stream_ops
= {
550 .owner
= THIS_MODULE
,
551 .release
= unix_release
,
553 .connect
= unix_stream_connect
,
554 .socketpair
= unix_socketpair
,
555 .accept
= unix_accept
,
556 .getname
= unix_getname
,
559 .listen
= unix_listen
,
560 .shutdown
= unix_shutdown
,
561 .setsockopt
= sock_no_setsockopt
,
562 .getsockopt
= sock_no_getsockopt
,
563 .sendmsg
= unix_stream_sendmsg
,
564 .recvmsg
= unix_stream_recvmsg
,
565 .mmap
= sock_no_mmap
,
566 .sendpage
= sock_no_sendpage
,
567 .set_peek_off
= unix_set_peek_off
,
570 static const struct proto_ops unix_dgram_ops
= {
572 .owner
= THIS_MODULE
,
573 .release
= unix_release
,
575 .connect
= unix_dgram_connect
,
576 .socketpair
= unix_socketpair
,
577 .accept
= sock_no_accept
,
578 .getname
= unix_getname
,
579 .poll
= unix_dgram_poll
,
581 .listen
= sock_no_listen
,
582 .shutdown
= unix_shutdown
,
583 .setsockopt
= sock_no_setsockopt
,
584 .getsockopt
= sock_no_getsockopt
,
585 .sendmsg
= unix_dgram_sendmsg
,
586 .recvmsg
= unix_dgram_recvmsg
,
587 .mmap
= sock_no_mmap
,
588 .sendpage
= sock_no_sendpage
,
589 .set_peek_off
= unix_set_peek_off
,
592 static const struct proto_ops unix_seqpacket_ops
= {
594 .owner
= THIS_MODULE
,
595 .release
= unix_release
,
597 .connect
= unix_stream_connect
,
598 .socketpair
= unix_socketpair
,
599 .accept
= unix_accept
,
600 .getname
= unix_getname
,
601 .poll
= unix_dgram_poll
,
603 .listen
= unix_listen
,
604 .shutdown
= unix_shutdown
,
605 .setsockopt
= sock_no_setsockopt
,
606 .getsockopt
= sock_no_getsockopt
,
607 .sendmsg
= unix_seqpacket_sendmsg
,
608 .recvmsg
= unix_seqpacket_recvmsg
,
609 .mmap
= sock_no_mmap
,
610 .sendpage
= sock_no_sendpage
,
611 .set_peek_off
= unix_set_peek_off
,
614 static struct proto unix_proto
= {
616 .owner
= THIS_MODULE
,
617 .obj_size
= sizeof(struct unix_sock
),
621 * AF_UNIX sockets do not interact with hardware, hence they
622 * dont trigger interrupts - so it's safe for them to have
623 * bh-unsafe locking for their sk_receive_queue.lock. Split off
624 * this special lock-class by reinitializing the spinlock key:
626 static struct lock_class_key af_unix_sk_receive_queue_lock_key
;
628 static struct sock
*unix_create1(struct net
*net
, struct socket
*sock
)
630 struct sock
*sk
= NULL
;
633 atomic_long_inc(&unix_nr_socks
);
634 if (atomic_long_read(&unix_nr_socks
) > 2 * get_max_files())
637 sk
= sk_alloc(net
, PF_UNIX
, GFP_KERNEL
, &unix_proto
);
641 sock_init_data(sock
, sk
);
642 lockdep_set_class(&sk
->sk_receive_queue
.lock
,
643 &af_unix_sk_receive_queue_lock_key
);
645 sk
->sk_write_space
= unix_write_space
;
646 sk
->sk_max_ack_backlog
= net
->unx
.sysctl_max_dgram_qlen
;
647 sk
->sk_destruct
= unix_sock_destructor
;
649 u
->path
.dentry
= NULL
;
651 spin_lock_init(&u
->lock
);
652 atomic_long_set(&u
->inflight
, 0);
653 INIT_LIST_HEAD(&u
->link
);
654 mutex_init(&u
->readlock
); /* single task reading lock */
655 init_waitqueue_head(&u
->peer_wait
);
656 unix_insert_socket(unix_sockets_unbound(sk
), sk
);
659 atomic_long_dec(&unix_nr_socks
);
662 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
668 static int unix_create(struct net
*net
, struct socket
*sock
, int protocol
,
671 if (protocol
&& protocol
!= PF_UNIX
)
672 return -EPROTONOSUPPORT
;
674 sock
->state
= SS_UNCONNECTED
;
676 switch (sock
->type
) {
678 sock
->ops
= &unix_stream_ops
;
681 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
685 sock
->type
= SOCK_DGRAM
;
687 sock
->ops
= &unix_dgram_ops
;
690 sock
->ops
= &unix_seqpacket_ops
;
693 return -ESOCKTNOSUPPORT
;
696 return unix_create1(net
, sock
) ? 0 : -ENOMEM
;
699 static int unix_release(struct socket
*sock
)
701 struct sock
*sk
= sock
->sk
;
706 unix_release_sock(sk
, 0);
712 static int unix_autobind(struct socket
*sock
)
714 struct sock
*sk
= sock
->sk
;
715 struct net
*net
= sock_net(sk
);
716 struct unix_sock
*u
= unix_sk(sk
);
717 static u32 ordernum
= 1;
718 struct unix_address
*addr
;
720 unsigned int retries
= 0;
722 err
= mutex_lock_interruptible(&u
->readlock
);
731 addr
= kzalloc(sizeof(*addr
) + sizeof(short) + 16, GFP_KERNEL
);
735 addr
->name
->sun_family
= AF_UNIX
;
736 atomic_set(&addr
->refcnt
, 1);
739 addr
->len
= sprintf(addr
->name
->sun_path
+1, "%05x", ordernum
) + 1 + sizeof(short);
740 addr
->hash
= unix_hash_fold(csum_partial(addr
->name
, addr
->len
, 0));
742 spin_lock(&unix_table_lock
);
743 ordernum
= (ordernum
+1)&0xFFFFF;
745 if (__unix_find_socket_byname(net
, addr
->name
, addr
->len
, sock
->type
,
747 spin_unlock(&unix_table_lock
);
749 * __unix_find_socket_byname() may take long time if many names
750 * are already in use.
753 /* Give up if all names seems to be in use. */
754 if (retries
++ == 0xFFFFF) {
761 addr
->hash
^= sk
->sk_type
;
763 __unix_remove_socket(sk
);
765 __unix_insert_socket(&unix_socket_table
[addr
->hash
], sk
);
766 spin_unlock(&unix_table_lock
);
769 out
: mutex_unlock(&u
->readlock
);
773 static struct sock
*unix_find_other(struct net
*net
,
774 struct sockaddr_un
*sunname
, int len
,
775 int type
, unsigned int hash
, int *error
)
781 if (sunname
->sun_path
[0]) {
783 err
= kern_path(sunname
->sun_path
, LOOKUP_FOLLOW
, &path
);
786 inode
= path
.dentry
->d_inode
;
787 err
= inode_permission(inode
, MAY_WRITE
);
792 if (!S_ISSOCK(inode
->i_mode
))
794 u
= unix_find_socket_byinode(inode
);
798 if (u
->sk_type
== type
)
804 if (u
->sk_type
!= type
) {
810 u
= unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
812 struct dentry
*dentry
;
813 dentry
= unix_sk(u
)->path
.dentry
;
815 touch_atime(&unix_sk(u
)->path
);
828 static int unix_mknod(const char *sun_path
, umode_t mode
, struct path
*res
)
830 struct dentry
*dentry
;
834 * Get the parent directory, calculate the hash for last
837 dentry
= kern_path_create(AT_FDCWD
, sun_path
, &path
, 0);
838 err
= PTR_ERR(dentry
);
843 * All right, let's create it.
845 err
= security_path_mknod(&path
, dentry
, mode
, 0);
847 err
= vfs_mknod(path
.dentry
->d_inode
, dentry
, mode
, 0);
849 res
->mnt
= mntget(path
.mnt
);
850 res
->dentry
= dget(dentry
);
853 done_path_create(&path
, dentry
);
857 static int unix_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
859 struct sock
*sk
= sock
->sk
;
860 struct net
*net
= sock_net(sk
);
861 struct unix_sock
*u
= unix_sk(sk
);
862 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
863 char *sun_path
= sunaddr
->sun_path
;
866 struct unix_address
*addr
;
867 struct hlist_head
*list
;
870 if (sunaddr
->sun_family
!= AF_UNIX
)
873 if (addr_len
== sizeof(short)) {
874 err
= unix_autobind(sock
);
878 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
883 err
= mutex_lock_interruptible(&u
->readlock
);
892 addr
= kmalloc(sizeof(*addr
)+addr_len
, GFP_KERNEL
);
896 memcpy(addr
->name
, sunaddr
, addr_len
);
897 addr
->len
= addr_len
;
898 addr
->hash
= hash
^ sk
->sk_type
;
899 atomic_set(&addr
->refcnt
, 1);
903 umode_t mode
= S_IFSOCK
|
904 (SOCK_INODE(sock
)->i_mode
& ~current_umask());
905 err
= unix_mknod(sun_path
, mode
, &path
);
909 unix_release_addr(addr
);
912 addr
->hash
= UNIX_HASH_SIZE
;
913 hash
= path
.dentry
->d_inode
->i_ino
& (UNIX_HASH_SIZE
-1);
914 spin_lock(&unix_table_lock
);
916 list
= &unix_socket_table
[hash
];
918 spin_lock(&unix_table_lock
);
920 if (__unix_find_socket_byname(net
, sunaddr
, addr_len
,
921 sk
->sk_type
, hash
)) {
922 unix_release_addr(addr
);
926 list
= &unix_socket_table
[addr
->hash
];
930 __unix_remove_socket(sk
);
932 __unix_insert_socket(list
, sk
);
935 spin_unlock(&unix_table_lock
);
937 mutex_unlock(&u
->readlock
);
942 static void unix_state_double_lock(struct sock
*sk1
, struct sock
*sk2
)
944 if (unlikely(sk1
== sk2
) || !sk2
) {
945 unix_state_lock(sk1
);
949 unix_state_lock(sk1
);
950 unix_state_lock_nested(sk2
);
952 unix_state_lock(sk2
);
953 unix_state_lock_nested(sk1
);
957 static void unix_state_double_unlock(struct sock
*sk1
, struct sock
*sk2
)
959 if (unlikely(sk1
== sk2
) || !sk2
) {
960 unix_state_unlock(sk1
);
963 unix_state_unlock(sk1
);
964 unix_state_unlock(sk2
);
967 static int unix_dgram_connect(struct socket
*sock
, struct sockaddr
*addr
,
970 struct sock
*sk
= sock
->sk
;
971 struct net
*net
= sock_net(sk
);
972 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)addr
;
977 if (addr
->sa_family
!= AF_UNSPEC
) {
978 err
= unix_mkname(sunaddr
, alen
, &hash
);
983 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) &&
984 !unix_sk(sk
)->addr
&& (err
= unix_autobind(sock
)) != 0)
988 other
= unix_find_other(net
, sunaddr
, alen
, sock
->type
, hash
, &err
);
992 unix_state_double_lock(sk
, other
);
994 /* Apparently VFS overslept socket death. Retry. */
995 if (sock_flag(other
, SOCK_DEAD
)) {
996 unix_state_double_unlock(sk
, other
);
1002 if (!unix_may_send(sk
, other
))
1005 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1011 * 1003.1g breaking connected state with AF_UNSPEC
1014 unix_state_double_lock(sk
, other
);
1018 * If it was connected, reconnect.
1020 if (unix_peer(sk
)) {
1021 struct sock
*old_peer
= unix_peer(sk
);
1022 unix_peer(sk
) = other
;
1023 unix_state_double_unlock(sk
, other
);
1025 if (other
!= old_peer
)
1026 unix_dgram_disconnected(sk
, old_peer
);
1029 unix_peer(sk
) = other
;
1030 unix_state_double_unlock(sk
, other
);
1035 unix_state_double_unlock(sk
, other
);
1041 static long unix_wait_for_peer(struct sock
*other
, long timeo
)
1043 struct unix_sock
*u
= unix_sk(other
);
1047 prepare_to_wait_exclusive(&u
->peer_wait
, &wait
, TASK_INTERRUPTIBLE
);
1049 sched
= !sock_flag(other
, SOCK_DEAD
) &&
1050 !(other
->sk_shutdown
& RCV_SHUTDOWN
) &&
1051 unix_recvq_full(other
);
1053 unix_state_unlock(other
);
1056 timeo
= schedule_timeout(timeo
);
1058 finish_wait(&u
->peer_wait
, &wait
);
1062 static int unix_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
1063 int addr_len
, int flags
)
1065 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
1066 struct sock
*sk
= sock
->sk
;
1067 struct net
*net
= sock_net(sk
);
1068 struct unix_sock
*u
= unix_sk(sk
), *newu
, *otheru
;
1069 struct sock
*newsk
= NULL
;
1070 struct sock
*other
= NULL
;
1071 struct sk_buff
*skb
= NULL
;
1077 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
1082 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
&&
1083 (err
= unix_autobind(sock
)) != 0)
1086 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1088 /* First of all allocate resources.
1089 If we will make it after state is locked,
1090 we will have to recheck all again in any case.
1095 /* create new sock for complete connection */
1096 newsk
= unix_create1(sock_net(sk
), NULL
);
1100 /* Allocate skb for sending to listening sock */
1101 skb
= sock_wmalloc(newsk
, 1, 0, GFP_KERNEL
);
1106 /* Find listening sock. */
1107 other
= unix_find_other(net
, sunaddr
, addr_len
, sk
->sk_type
, hash
, &err
);
1111 /* Latch state of peer */
1112 unix_state_lock(other
);
1114 /* Apparently VFS overslept socket death. Retry. */
1115 if (sock_flag(other
, SOCK_DEAD
)) {
1116 unix_state_unlock(other
);
1121 err
= -ECONNREFUSED
;
1122 if (other
->sk_state
!= TCP_LISTEN
)
1124 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1127 if (unix_recvq_full(other
)) {
1132 timeo
= unix_wait_for_peer(other
, timeo
);
1134 err
= sock_intr_errno(timeo
);
1135 if (signal_pending(current
))
1143 It is tricky place. We need to grab our state lock and cannot
1144 drop lock on peer. It is dangerous because deadlock is
1145 possible. Connect to self case and simultaneous
1146 attempt to connect are eliminated by checking socket
1147 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1148 check this before attempt to grab lock.
1150 Well, and we have to recheck the state after socket locked.
1156 /* This is ok... continue with connect */
1158 case TCP_ESTABLISHED
:
1159 /* Socket is already connected */
1167 unix_state_lock_nested(sk
);
1169 if (sk
->sk_state
!= st
) {
1170 unix_state_unlock(sk
);
1171 unix_state_unlock(other
);
1176 err
= security_unix_stream_connect(sk
, other
, newsk
);
1178 unix_state_unlock(sk
);
1182 /* The way is open! Fastly set all the necessary fields... */
1185 unix_peer(newsk
) = sk
;
1186 newsk
->sk_state
= TCP_ESTABLISHED
;
1187 newsk
->sk_type
= sk
->sk_type
;
1188 init_peercred(newsk
);
1189 newu
= unix_sk(newsk
);
1190 RCU_INIT_POINTER(newsk
->sk_wq
, &newu
->peer_wq
);
1191 otheru
= unix_sk(other
);
1193 /* copy address information from listening to new sock*/
1195 atomic_inc(&otheru
->addr
->refcnt
);
1196 newu
->addr
= otheru
->addr
;
1198 if (otheru
->path
.dentry
) {
1199 path_get(&otheru
->path
);
1200 newu
->path
= otheru
->path
;
1203 /* Set credentials */
1204 copy_peercred(sk
, other
);
1206 sock
->state
= SS_CONNECTED
;
1207 sk
->sk_state
= TCP_ESTABLISHED
;
1210 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1211 unix_peer(sk
) = newsk
;
1213 unix_state_unlock(sk
);
1215 /* take ten and and send info to listening sock */
1216 spin_lock(&other
->sk_receive_queue
.lock
);
1217 __skb_queue_tail(&other
->sk_receive_queue
, skb
);
1218 spin_unlock(&other
->sk_receive_queue
.lock
);
1219 unix_state_unlock(other
);
1220 other
->sk_data_ready(other
);
1226 unix_state_unlock(other
);
1231 unix_release_sock(newsk
, 0);
1237 static int unix_socketpair(struct socket
*socka
, struct socket
*sockb
)
1239 struct sock
*ska
= socka
->sk
, *skb
= sockb
->sk
;
1241 /* Join our sockets back to back */
1244 unix_peer(ska
) = skb
;
1245 unix_peer(skb
) = ska
;
1249 if (ska
->sk_type
!= SOCK_DGRAM
) {
1250 ska
->sk_state
= TCP_ESTABLISHED
;
1251 skb
->sk_state
= TCP_ESTABLISHED
;
1252 socka
->state
= SS_CONNECTED
;
1253 sockb
->state
= SS_CONNECTED
;
1258 static void unix_sock_inherit_flags(const struct socket
*old
,
1261 if (test_bit(SOCK_PASSCRED
, &old
->flags
))
1262 set_bit(SOCK_PASSCRED
, &new->flags
);
1263 if (test_bit(SOCK_PASSSEC
, &old
->flags
))
1264 set_bit(SOCK_PASSSEC
, &new->flags
);
1267 static int unix_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1269 struct sock
*sk
= sock
->sk
;
1271 struct sk_buff
*skb
;
1275 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
1279 if (sk
->sk_state
!= TCP_LISTEN
)
1282 /* If socket state is TCP_LISTEN it cannot change (for now...),
1283 * so that no locks are necessary.
1286 skb
= skb_recv_datagram(sk
, 0, flags
&O_NONBLOCK
, &err
);
1288 /* This means receive shutdown. */
1295 skb_free_datagram(sk
, skb
);
1296 wake_up_interruptible(&unix_sk(sk
)->peer_wait
);
1298 /* attach accepted sock to socket */
1299 unix_state_lock(tsk
);
1300 newsock
->state
= SS_CONNECTED
;
1301 unix_sock_inherit_flags(sock
, newsock
);
1302 sock_graft(tsk
, newsock
);
1303 unix_state_unlock(tsk
);
1311 static int unix_getname(struct socket
*sock
, struct sockaddr
*uaddr
, int *uaddr_len
, int peer
)
1313 struct sock
*sk
= sock
->sk
;
1314 struct unix_sock
*u
;
1315 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, uaddr
);
1319 sk
= unix_peer_get(sk
);
1330 unix_state_lock(sk
);
1332 sunaddr
->sun_family
= AF_UNIX
;
1333 sunaddr
->sun_path
[0] = 0;
1334 *uaddr_len
= sizeof(short);
1336 struct unix_address
*addr
= u
->addr
;
1338 *uaddr_len
= addr
->len
;
1339 memcpy(sunaddr
, addr
->name
, *uaddr_len
);
1341 unix_state_unlock(sk
);
1347 static void unix_detach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1351 scm
->fp
= UNIXCB(skb
).fp
;
1352 UNIXCB(skb
).fp
= NULL
;
1354 for (i
= scm
->fp
->count
-1; i
>= 0; i
--)
1355 unix_notinflight(scm
->fp
->fp
[i
]);
1358 static void unix_destruct_scm(struct sk_buff
*skb
)
1360 struct scm_cookie scm
;
1361 memset(&scm
, 0, sizeof(scm
));
1362 scm
.pid
= UNIXCB(skb
).pid
;
1364 unix_detach_fds(&scm
, skb
);
1366 /* Alas, it calls VFS */
1367 /* So fscking what? fput() had been SMP-safe since the last Summer */
1372 #define MAX_RECURSION_LEVEL 4
1374 static int unix_attach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1377 unsigned char max_level
= 0;
1378 int unix_sock_count
= 0;
1380 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--) {
1381 struct sock
*sk
= unix_get_socket(scm
->fp
->fp
[i
]);
1385 max_level
= max(max_level
,
1386 unix_sk(sk
)->recursion_level
);
1389 if (unlikely(max_level
> MAX_RECURSION_LEVEL
))
1390 return -ETOOMANYREFS
;
1393 * Need to duplicate file references for the sake of garbage
1394 * collection. Otherwise a socket in the fps might become a
1395 * candidate for GC while the skb is not yet queued.
1397 UNIXCB(skb
).fp
= scm_fp_dup(scm
->fp
);
1398 if (!UNIXCB(skb
).fp
)
1401 if (unix_sock_count
) {
1402 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--)
1403 unix_inflight(scm
->fp
->fp
[i
]);
1408 static int unix_scm_to_skb(struct scm_cookie
*scm
, struct sk_buff
*skb
, bool send_fds
)
1412 UNIXCB(skb
).pid
= get_pid(scm
->pid
);
1413 UNIXCB(skb
).uid
= scm
->creds
.uid
;
1414 UNIXCB(skb
).gid
= scm
->creds
.gid
;
1415 UNIXCB(skb
).fp
= NULL
;
1416 if (scm
->fp
&& send_fds
)
1417 err
= unix_attach_fds(scm
, skb
);
1419 skb
->destructor
= unix_destruct_scm
;
1424 * Some apps rely on write() giving SCM_CREDENTIALS
1425 * We include credentials if source or destination socket
1426 * asserted SOCK_PASSCRED.
1428 static void maybe_add_creds(struct sk_buff
*skb
, const struct socket
*sock
,
1429 const struct sock
*other
)
1431 if (UNIXCB(skb
).pid
)
1433 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) ||
1434 !other
->sk_socket
||
1435 test_bit(SOCK_PASSCRED
, &other
->sk_socket
->flags
)) {
1436 UNIXCB(skb
).pid
= get_pid(task_tgid(current
));
1437 current_uid_gid(&UNIXCB(skb
).uid
, &UNIXCB(skb
).gid
);
1442 * Send AF_UNIX data.
1445 static int unix_dgram_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1446 struct msghdr
*msg
, size_t len
)
1448 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1449 struct sock
*sk
= sock
->sk
;
1450 struct net
*net
= sock_net(sk
);
1451 struct unix_sock
*u
= unix_sk(sk
);
1452 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, msg
->msg_name
);
1453 struct sock
*other
= NULL
;
1454 int namelen
= 0; /* fake GCC */
1457 struct sk_buff
*skb
;
1459 struct scm_cookie tmp_scm
;
1462 struct iov_iter from
;
1464 iov_iter_init(&from
, WRITE
, msg
->msg_iov
, msg
->msg_iovlen
, len
);
1466 if (NULL
== siocb
->scm
)
1467 siocb
->scm
= &tmp_scm
;
1469 err
= scm_send(sock
, msg
, siocb
->scm
, false);
1474 if (msg
->msg_flags
&MSG_OOB
)
1477 if (msg
->msg_namelen
) {
1478 err
= unix_mkname(sunaddr
, msg
->msg_namelen
, &hash
);
1485 other
= unix_peer_get(sk
);
1490 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
1491 && (err
= unix_autobind(sock
)) != 0)
1495 if (len
> sk
->sk_sndbuf
- 32)
1498 if (len
> SKB_MAX_ALLOC
) {
1499 data_len
= min_t(size_t,
1500 len
- SKB_MAX_ALLOC
,
1501 MAX_SKB_FRAGS
* PAGE_SIZE
);
1502 data_len
= PAGE_ALIGN(data_len
);
1504 BUILD_BUG_ON(SKB_MAX_ALLOC
< PAGE_SIZE
);
1507 skb
= sock_alloc_send_pskb(sk
, len
- data_len
, data_len
,
1508 msg
->msg_flags
& MSG_DONTWAIT
, &err
,
1509 PAGE_ALLOC_COSTLY_ORDER
);
1513 err
= unix_scm_to_skb(siocb
->scm
, skb
, true);
1516 max_level
= err
+ 1;
1517 unix_get_secdata(siocb
->scm
, skb
);
1519 skb_put(skb
, len
- data_len
);
1520 skb
->data_len
= data_len
;
1522 err
= skb_copy_datagram_from_iter(skb
, 0, &from
, len
);
1526 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1531 if (sunaddr
== NULL
)
1534 other
= unix_find_other(net
, sunaddr
, namelen
, sk
->sk_type
,
1540 if (sk_filter(other
, skb
) < 0) {
1541 /* Toss the packet but do not return any error to the sender */
1546 unix_state_lock(other
);
1548 if (!unix_may_send(sk
, other
))
1551 if (sock_flag(other
, SOCK_DEAD
)) {
1553 * Check with 1003.1g - what should
1556 unix_state_unlock(other
);
1560 unix_state_lock(sk
);
1561 if (unix_peer(sk
) == other
) {
1562 unix_peer(sk
) = NULL
;
1563 unix_state_unlock(sk
);
1565 unix_dgram_disconnected(sk
, other
);
1567 err
= -ECONNREFUSED
;
1569 unix_state_unlock(sk
);
1579 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1582 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1583 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1588 if (unix_peer(other
) != sk
&& unix_recvq_full(other
)) {
1594 timeo
= unix_wait_for_peer(other
, timeo
);
1596 err
= sock_intr_errno(timeo
);
1597 if (signal_pending(current
))
1603 if (sock_flag(other
, SOCK_RCVTSTAMP
))
1604 __net_timestamp(skb
);
1605 maybe_add_creds(skb
, sock
, other
);
1606 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1607 if (max_level
> unix_sk(other
)->recursion_level
)
1608 unix_sk(other
)->recursion_level
= max_level
;
1609 unix_state_unlock(other
);
1610 other
->sk_data_ready(other
);
1612 scm_destroy(siocb
->scm
);
1616 unix_state_unlock(other
);
1622 scm_destroy(siocb
->scm
);
1626 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1627 * bytes, and a minimun of a full page.
1629 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1631 static int unix_stream_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1632 struct msghdr
*msg
, size_t len
)
1634 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
1635 struct sock
*sk
= sock
->sk
;
1636 struct sock
*other
= NULL
;
1638 struct sk_buff
*skb
;
1640 struct scm_cookie tmp_scm
;
1641 bool fds_sent
= false;
1644 struct iov_iter from
;
1646 iov_iter_init(&from
, WRITE
, msg
->msg_iov
, msg
->msg_iovlen
, len
);
1648 if (NULL
== siocb
->scm
)
1649 siocb
->scm
= &tmp_scm
;
1651 err
= scm_send(sock
, msg
, siocb
->scm
, false);
1656 if (msg
->msg_flags
&MSG_OOB
)
1659 if (msg
->msg_namelen
) {
1660 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1664 other
= unix_peer(sk
);
1669 if (sk
->sk_shutdown
& SEND_SHUTDOWN
)
1672 while (sent
< len
) {
1675 /* Keep two messages in the pipe so it schedules better */
1676 size
= min_t(int, size
, (sk
->sk_sndbuf
>> 1) - 64);
1678 /* allow fallback to order-0 allocations */
1679 size
= min_t(int, size
, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ
);
1681 data_len
= max_t(int, 0, size
- SKB_MAX_HEAD(0));
1683 data_len
= min_t(size_t, size
, PAGE_ALIGN(data_len
));
1685 skb
= sock_alloc_send_pskb(sk
, size
- data_len
, data_len
,
1686 msg
->msg_flags
& MSG_DONTWAIT
, &err
,
1687 get_order(UNIX_SKB_FRAGS_SZ
));
1691 /* Only send the fds in the first buffer */
1692 err
= unix_scm_to_skb(siocb
->scm
, skb
, !fds_sent
);
1697 max_level
= err
+ 1;
1700 skb_put(skb
, size
- data_len
);
1701 skb
->data_len
= data_len
;
1703 err
= skb_copy_datagram_from_iter(skb
, 0, &from
, size
);
1709 unix_state_lock(other
);
1711 if (sock_flag(other
, SOCK_DEAD
) ||
1712 (other
->sk_shutdown
& RCV_SHUTDOWN
))
1715 maybe_add_creds(skb
, sock
, other
);
1716 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1717 if (max_level
> unix_sk(other
)->recursion_level
)
1718 unix_sk(other
)->recursion_level
= max_level
;
1719 unix_state_unlock(other
);
1720 other
->sk_data_ready(other
);
1724 scm_destroy(siocb
->scm
);
1730 unix_state_unlock(other
);
1733 if (sent
== 0 && !(msg
->msg_flags
&MSG_NOSIGNAL
))
1734 send_sig(SIGPIPE
, current
, 0);
1737 scm_destroy(siocb
->scm
);
1739 return sent
? : err
;
1742 static int unix_seqpacket_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1743 struct msghdr
*msg
, size_t len
)
1746 struct sock
*sk
= sock
->sk
;
1748 err
= sock_error(sk
);
1752 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1755 if (msg
->msg_namelen
)
1756 msg
->msg_namelen
= 0;
1758 return unix_dgram_sendmsg(kiocb
, sock
, msg
, len
);
1761 static int unix_seqpacket_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1762 struct msghdr
*msg
, size_t size
,
1765 struct sock
*sk
= sock
->sk
;
1767 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1770 return unix_dgram_recvmsg(iocb
, sock
, msg
, size
, flags
);
1773 static void unix_copy_addr(struct msghdr
*msg
, struct sock
*sk
)
1775 struct unix_sock
*u
= unix_sk(sk
);
1778 msg
->msg_namelen
= u
->addr
->len
;
1779 memcpy(msg
->msg_name
, u
->addr
->name
, u
->addr
->len
);
1783 static int unix_dgram_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1784 struct msghdr
*msg
, size_t size
,
1787 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1788 struct scm_cookie tmp_scm
;
1789 struct sock
*sk
= sock
->sk
;
1790 struct unix_sock
*u
= unix_sk(sk
);
1791 int noblock
= flags
& MSG_DONTWAIT
;
1792 struct sk_buff
*skb
;
1800 err
= mutex_lock_interruptible(&u
->readlock
);
1801 if (unlikely(err
)) {
1802 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1803 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1805 err
= noblock
? -EAGAIN
: -ERESTARTSYS
;
1809 skip
= sk_peek_offset(sk
, flags
);
1811 skb
= __skb_recv_datagram(sk
, flags
, &peeked
, &skip
, &err
);
1813 unix_state_lock(sk
);
1814 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1815 if (sk
->sk_type
== SOCK_SEQPACKET
&& err
== -EAGAIN
&&
1816 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1818 unix_state_unlock(sk
);
1822 wake_up_interruptible_sync_poll(&u
->peer_wait
,
1823 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
1826 unix_copy_addr(msg
, skb
->sk
);
1828 if (size
> skb
->len
- skip
)
1829 size
= skb
->len
- skip
;
1830 else if (size
< skb
->len
- skip
)
1831 msg
->msg_flags
|= MSG_TRUNC
;
1833 err
= skb_copy_datagram_msg(skb
, skip
, msg
, size
);
1837 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
1838 __sock_recv_timestamp(msg
, sk
, skb
);
1841 siocb
->scm
= &tmp_scm
;
1842 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1844 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).uid
, UNIXCB(skb
).gid
);
1845 unix_set_secdata(siocb
->scm
, skb
);
1847 if (!(flags
& MSG_PEEK
)) {
1849 unix_detach_fds(siocb
->scm
, skb
);
1851 sk_peek_offset_bwd(sk
, skb
->len
);
1853 /* It is questionable: on PEEK we could:
1854 - do not return fds - good, but too simple 8)
1855 - return fds, and do not return them on read (old strategy,
1857 - clone fds (I chose it for now, it is the most universal
1860 POSIX 1003.1g does not actually define this clearly
1861 at all. POSIX 1003.1g doesn't define a lot of things
1866 sk_peek_offset_fwd(sk
, size
);
1869 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1871 err
= (flags
& MSG_TRUNC
) ? skb
->len
- skip
: size
;
1873 scm_recv(sock
, msg
, siocb
->scm
, flags
);
1876 skb_free_datagram(sk
, skb
);
1878 mutex_unlock(&u
->readlock
);
1884 * Sleep until more data has arrived. But check for races..
1886 static long unix_stream_data_wait(struct sock
*sk
, long timeo
,
1887 struct sk_buff
*last
)
1891 unix_state_lock(sk
);
1894 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1896 if (skb_peek_tail(&sk
->sk_receive_queue
) != last
||
1898 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1899 signal_pending(current
) ||
1903 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1904 unix_state_unlock(sk
);
1905 timeo
= freezable_schedule_timeout(timeo
);
1906 unix_state_lock(sk
);
1907 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1910 finish_wait(sk_sleep(sk
), &wait
);
1911 unix_state_unlock(sk
);
1915 static unsigned int unix_skb_len(const struct sk_buff
*skb
)
1917 return skb
->len
- UNIXCB(skb
).consumed
;
1920 static int unix_stream_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1921 struct msghdr
*msg
, size_t size
,
1924 struct sock_iocb
*siocb
= kiocb_to_siocb(iocb
);
1925 struct scm_cookie tmp_scm
;
1926 struct sock
*sk
= sock
->sk
;
1927 struct unix_sock
*u
= unix_sk(sk
);
1928 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, msg
->msg_name
);
1930 int noblock
= flags
& MSG_DONTWAIT
;
1931 int check_creds
= 0;
1938 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1945 target
= sock_rcvlowat(sk
, flags
&MSG_WAITALL
, size
);
1946 timeo
= sock_rcvtimeo(sk
, noblock
);
1948 /* Lock the socket to prevent queue disordering
1949 * while sleeps in memcpy_tomsg
1953 siocb
->scm
= &tmp_scm
;
1954 memset(&tmp_scm
, 0, sizeof(tmp_scm
));
1957 err
= mutex_lock_interruptible(&u
->readlock
);
1958 if (unlikely(err
)) {
1959 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1960 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1962 err
= noblock
? -EAGAIN
: -ERESTARTSYS
;
1968 struct sk_buff
*skb
, *last
;
1970 unix_state_lock(sk
);
1971 last
= skb
= skb_peek(&sk
->sk_receive_queue
);
1974 unix_sk(sk
)->recursion_level
= 0;
1975 if (copied
>= target
)
1979 * POSIX 1003.1g mandates this order.
1982 err
= sock_error(sk
);
1985 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1988 unix_state_unlock(sk
);
1992 mutex_unlock(&u
->readlock
);
1994 timeo
= unix_stream_data_wait(sk
, timeo
, last
);
1996 if (signal_pending(current
)
1997 || mutex_lock_interruptible(&u
->readlock
)) {
1998 err
= sock_intr_errno(timeo
);
2004 unix_state_unlock(sk
);
2008 skip
= sk_peek_offset(sk
, flags
);
2009 while (skip
>= unix_skb_len(skb
)) {
2010 skip
-= unix_skb_len(skb
);
2012 skb
= skb_peek_next(skb
, &sk
->sk_receive_queue
);
2017 unix_state_unlock(sk
);
2020 /* Never glue messages from different writers */
2021 if ((UNIXCB(skb
).pid
!= siocb
->scm
->pid
) ||
2022 !uid_eq(UNIXCB(skb
).uid
, siocb
->scm
->creds
.uid
) ||
2023 !gid_eq(UNIXCB(skb
).gid
, siocb
->scm
->creds
.gid
))
2025 } else if (test_bit(SOCK_PASSCRED
, &sock
->flags
)) {
2026 /* Copy credentials */
2027 scm_set_cred(siocb
->scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).uid
, UNIXCB(skb
).gid
);
2031 /* Copy address just once */
2033 unix_copy_addr(msg
, skb
->sk
);
2037 chunk
= min_t(unsigned int, unix_skb_len(skb
) - skip
, size
);
2038 if (skb_copy_datagram_msg(skb
, UNIXCB(skb
).consumed
+ skip
,
2047 /* Mark read part of skb as used */
2048 if (!(flags
& MSG_PEEK
)) {
2049 UNIXCB(skb
).consumed
+= chunk
;
2051 sk_peek_offset_bwd(sk
, chunk
);
2054 unix_detach_fds(siocb
->scm
, skb
);
2056 if (unix_skb_len(skb
))
2059 skb_unlink(skb
, &sk
->sk_receive_queue
);
2065 /* It is questionable, see note in unix_dgram_recvmsg.
2068 siocb
->scm
->fp
= scm_fp_dup(UNIXCB(skb
).fp
);
2070 sk_peek_offset_fwd(sk
, chunk
);
2076 mutex_unlock(&u
->readlock
);
2077 scm_recv(sock
, msg
, siocb
->scm
, flags
);
2079 return copied
? : err
;
2082 static int unix_shutdown(struct socket
*sock
, int mode
)
2084 struct sock
*sk
= sock
->sk
;
2087 if (mode
< SHUT_RD
|| mode
> SHUT_RDWR
)
2090 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2091 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2092 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2096 unix_state_lock(sk
);
2097 sk
->sk_shutdown
|= mode
;
2098 other
= unix_peer(sk
);
2101 unix_state_unlock(sk
);
2102 sk
->sk_state_change(sk
);
2105 (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
)) {
2109 if (mode
&RCV_SHUTDOWN
)
2110 peer_mode
|= SEND_SHUTDOWN
;
2111 if (mode
&SEND_SHUTDOWN
)
2112 peer_mode
|= RCV_SHUTDOWN
;
2113 unix_state_lock(other
);
2114 other
->sk_shutdown
|= peer_mode
;
2115 unix_state_unlock(other
);
2116 other
->sk_state_change(other
);
2117 if (peer_mode
== SHUTDOWN_MASK
)
2118 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_HUP
);
2119 else if (peer_mode
& RCV_SHUTDOWN
)
2120 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_IN
);
2128 long unix_inq_len(struct sock
*sk
)
2130 struct sk_buff
*skb
;
2133 if (sk
->sk_state
== TCP_LISTEN
)
2136 spin_lock(&sk
->sk_receive_queue
.lock
);
2137 if (sk
->sk_type
== SOCK_STREAM
||
2138 sk
->sk_type
== SOCK_SEQPACKET
) {
2139 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
2140 amount
+= unix_skb_len(skb
);
2142 skb
= skb_peek(&sk
->sk_receive_queue
);
2146 spin_unlock(&sk
->sk_receive_queue
.lock
);
2150 EXPORT_SYMBOL_GPL(unix_inq_len
);
2152 long unix_outq_len(struct sock
*sk
)
2154 return sk_wmem_alloc_get(sk
);
2156 EXPORT_SYMBOL_GPL(unix_outq_len
);
2158 static int unix_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
2160 struct sock
*sk
= sock
->sk
;
2166 amount
= unix_outq_len(sk
);
2167 err
= put_user(amount
, (int __user
*)arg
);
2170 amount
= unix_inq_len(sk
);
2174 err
= put_user(amount
, (int __user
*)arg
);
2183 static unsigned int unix_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2185 struct sock
*sk
= sock
->sk
;
2188 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2191 /* exceptional events? */
2194 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2196 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2197 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2200 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2201 mask
|= POLLIN
| POLLRDNORM
;
2203 /* Connection-based need to check for termination and startup */
2204 if ((sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) &&
2205 sk
->sk_state
== TCP_CLOSE
)
2209 * we set writable also when the other side has shut down the
2210 * connection. This prevents stuck sockets.
2212 if (unix_writable(sk
))
2213 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2218 static unsigned int unix_dgram_poll(struct file
*file
, struct socket
*sock
,
2221 struct sock
*sk
= sock
->sk
, *other
;
2222 unsigned int mask
, writable
;
2224 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2227 /* exceptional events? */
2228 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
2230 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? POLLPRI
: 0);
2232 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2233 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2234 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2238 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2239 mask
|= POLLIN
| POLLRDNORM
;
2241 /* Connection-based need to check for termination and startup */
2242 if (sk
->sk_type
== SOCK_SEQPACKET
) {
2243 if (sk
->sk_state
== TCP_CLOSE
)
2245 /* connection hasn't started yet? */
2246 if (sk
->sk_state
== TCP_SYN_SENT
)
2250 /* No write status requested, avoid expensive OUT tests. */
2251 if (!(poll_requested_events(wait
) & (POLLWRBAND
|POLLWRNORM
|POLLOUT
)))
2254 writable
= unix_writable(sk
);
2255 other
= unix_peer_get(sk
);
2257 if (unix_peer(other
) != sk
) {
2258 sock_poll_wait(file
, &unix_sk(other
)->peer_wait
, wait
);
2259 if (unix_recvq_full(other
))
2266 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2268 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
2273 #ifdef CONFIG_PROC_FS
2275 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2277 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2278 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2279 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2281 static struct sock
*unix_from_bucket(struct seq_file
*seq
, loff_t
*pos
)
2283 unsigned long offset
= get_offset(*pos
);
2284 unsigned long bucket
= get_bucket(*pos
);
2286 unsigned long count
= 0;
2288 for (sk
= sk_head(&unix_socket_table
[bucket
]); sk
; sk
= sk_next(sk
)) {
2289 if (sock_net(sk
) != seq_file_net(seq
))
2291 if (++count
== offset
)
2298 static struct sock
*unix_next_socket(struct seq_file
*seq
,
2302 unsigned long bucket
;
2304 while (sk
> (struct sock
*)SEQ_START_TOKEN
) {
2308 if (sock_net(sk
) == seq_file_net(seq
))
2313 sk
= unix_from_bucket(seq
, pos
);
2318 bucket
= get_bucket(*pos
) + 1;
2319 *pos
= set_bucket_offset(bucket
, 1);
2320 } while (bucket
< ARRAY_SIZE(unix_socket_table
));
2325 static void *unix_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2326 __acquires(unix_table_lock
)
2328 spin_lock(&unix_table_lock
);
2331 return SEQ_START_TOKEN
;
2333 if (get_bucket(*pos
) >= ARRAY_SIZE(unix_socket_table
))
2336 return unix_next_socket(seq
, NULL
, pos
);
2339 static void *unix_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2342 return unix_next_socket(seq
, v
, pos
);
2345 static void unix_seq_stop(struct seq_file
*seq
, void *v
)
2346 __releases(unix_table_lock
)
2348 spin_unlock(&unix_table_lock
);
2351 static int unix_seq_show(struct seq_file
*seq
, void *v
)
2354 if (v
== SEQ_START_TOKEN
)
2355 seq_puts(seq
, "Num RefCount Protocol Flags Type St "
2359 struct unix_sock
*u
= unix_sk(s
);
2362 seq_printf(seq
, "%pK: %08X %08X %08X %04X %02X %5lu",
2364 atomic_read(&s
->sk_refcnt
),
2366 s
->sk_state
== TCP_LISTEN
? __SO_ACCEPTCON
: 0,
2369 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTED
: SS_UNCONNECTED
) :
2370 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTING
: SS_DISCONNECTING
),
2378 len
= u
->addr
->len
- sizeof(short);
2379 if (!UNIX_ABSTRACT(s
))
2385 for ( ; i
< len
; i
++)
2386 seq_putc(seq
, u
->addr
->name
->sun_path
[i
]);
2388 unix_state_unlock(s
);
2389 seq_putc(seq
, '\n');
2395 static const struct seq_operations unix_seq_ops
= {
2396 .start
= unix_seq_start
,
2397 .next
= unix_seq_next
,
2398 .stop
= unix_seq_stop
,
2399 .show
= unix_seq_show
,
2402 static int unix_seq_open(struct inode
*inode
, struct file
*file
)
2404 return seq_open_net(inode
, file
, &unix_seq_ops
,
2405 sizeof(struct seq_net_private
));
2408 static const struct file_operations unix_seq_fops
= {
2409 .owner
= THIS_MODULE
,
2410 .open
= unix_seq_open
,
2412 .llseek
= seq_lseek
,
2413 .release
= seq_release_net
,
2418 static const struct net_proto_family unix_family_ops
= {
2420 .create
= unix_create
,
2421 .owner
= THIS_MODULE
,
2425 static int __net_init
unix_net_init(struct net
*net
)
2427 int error
= -ENOMEM
;
2429 net
->unx
.sysctl_max_dgram_qlen
= 10;
2430 if (unix_sysctl_register(net
))
2433 #ifdef CONFIG_PROC_FS
2434 if (!proc_create("unix", 0, net
->proc_net
, &unix_seq_fops
)) {
2435 unix_sysctl_unregister(net
);
2444 static void __net_exit
unix_net_exit(struct net
*net
)
2446 unix_sysctl_unregister(net
);
2447 remove_proc_entry("unix", net
->proc_net
);
2450 static struct pernet_operations unix_net_ops
= {
2451 .init
= unix_net_init
,
2452 .exit
= unix_net_exit
,
2455 static int __init
af_unix_init(void)
2459 BUILD_BUG_ON(sizeof(struct unix_skb_parms
) > FIELD_SIZEOF(struct sk_buff
, cb
));
2461 rc
= proto_register(&unix_proto
, 1);
2463 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__
);
2467 sock_register(&unix_family_ops
);
2468 register_pernet_subsys(&unix_net_ops
);
2473 static void __exit
af_unix_exit(void)
2475 sock_unregister(PF_UNIX
);
2476 proto_unregister(&unix_proto
);
2477 unregister_pernet_subsys(&unix_net_ops
);
2480 /* Earlier than device_initcall() so that other drivers invoking
2481 request_module() don't end up in a loop when modprobe tries
2482 to use a UNIX socket. But later than subsys_initcall() because
2483 we depend on stuff initialised there */
2484 fs_initcall(af_unix_init
);
2485 module_exit(af_unix_exit
);
2487 MODULE_LICENSE("GPL");
2488 MODULE_ALIAS_NETPROTO(PF_UNIX
);