]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/unix/af_unix.c
net: Fix vlan bitmask changes in EHEA driver.
[mirror_ubuntu-artful-kernel.git] / net / unix / af_unix.c
CommitLineData
1da177e4
LT
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
113aa838 4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
1da177e4
LT
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#include <linux/module.h>
1da177e4 84#include <linux/kernel.h>
1da177e4
LT
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
457c4cbc 104#include <net/net_namespace.h>
1da177e4 105#include <net/sock.h>
c752f073 106#include <net/tcp_states.h>
1da177e4
LT
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
1da177e4
LT
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
7123aaa3 118struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
fa7ff56f
PE
119EXPORT_SYMBOL_GPL(unix_socket_table);
120DEFINE_SPINLOCK(unix_table_lock);
121EXPORT_SYMBOL_GPL(unix_table_lock);
518de9b3 122static atomic_long_t unix_nr_socks;
1da177e4 123
1da177e4 124
7123aaa3
ED
125static struct hlist_head *unix_sockets_unbound(void *addr)
126{
127 unsigned long hash = (unsigned long)addr;
128
129 hash ^= hash >> 16;
130 hash ^= hash >> 8;
131 hash %= UNIX_HASH_SIZE;
132 return &unix_socket_table[UNIX_HASH_SIZE + hash];
133}
134
135#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
1da177e4 136
877ce7c1 137#ifdef CONFIG_SECURITY_NETWORK
dc49c1f9 138static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1 139{
dc49c1f9 140 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
877ce7c1
CZ
141}
142
143static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
144{
dc49c1f9 145 scm->secid = *UNIXSID(skb);
877ce7c1
CZ
146}
147#else
dc49c1f9 148static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1
CZ
149{ }
150
151static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
152{ }
153#endif /* CONFIG_SECURITY_NETWORK */
154
1da177e4
LT
155/*
156 * SMP locking strategy:
fbe9cc4a 157 * hash table is protected with spinlock unix_table_lock
663717f6 158 * each socket state is protected by separate spin lock.
1da177e4
LT
159 */
160
95c96174 161static inline unsigned int unix_hash_fold(__wsum n)
1da177e4 162{
95c96174
ED
163 unsigned int hash = (__force unsigned int)n;
164
1da177e4
LT
165 hash ^= hash>>16;
166 hash ^= hash>>8;
167 return hash&(UNIX_HASH_SIZE-1);
168}
169
170#define unix_peer(sk) (unix_sk(sk)->peer)
171
172static inline int unix_our_peer(struct sock *sk, struct sock *osk)
173{
174 return unix_peer(osk) == sk;
175}
176
177static inline int unix_may_send(struct sock *sk, struct sock *osk)
178{
6eba6a37 179 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
1da177e4
LT
180}
181
3c73419c
RW
182static inline int unix_recvq_full(struct sock const *sk)
183{
184 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
185}
186
fa7ff56f 187struct sock *unix_peer_get(struct sock *s)
1da177e4
LT
188{
189 struct sock *peer;
190
1c92b4e5 191 unix_state_lock(s);
1da177e4
LT
192 peer = unix_peer(s);
193 if (peer)
194 sock_hold(peer);
1c92b4e5 195 unix_state_unlock(s);
1da177e4
LT
196 return peer;
197}
fa7ff56f 198EXPORT_SYMBOL_GPL(unix_peer_get);
1da177e4
LT
199
200static inline void unix_release_addr(struct unix_address *addr)
201{
202 if (atomic_dec_and_test(&addr->refcnt))
203 kfree(addr);
204}
205
206/*
207 * Check unix socket name:
208 * - should be not zero length.
209 * - if started by not zero, should be NULL terminated (FS object)
210 * - if started by zero, it is abstract name.
211 */
ac7bfa62 212
95c96174 213static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
1da177e4
LT
214{
215 if (len <= sizeof(short) || len > sizeof(*sunaddr))
216 return -EINVAL;
217 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
218 return -EINVAL;
219 if (sunaddr->sun_path[0]) {
220 /*
221 * This may look like an off by one error but it is a bit more
222 * subtle. 108 is the longest valid AF_UNIX path for a binding.
25985edc 223 * sun_path[108] doesn't as such exist. However in kernel space
1da177e4
LT
224 * we are guaranteed that it is a valid memory location in our
225 * kernel address buffer.
226 */
e27dfcea 227 ((char *)sunaddr)[len] = 0;
1da177e4
LT
228 len = strlen(sunaddr->sun_path)+1+sizeof(short);
229 return len;
230 }
231
07f0757a 232 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
1da177e4
LT
233 return len;
234}
235
236static void __unix_remove_socket(struct sock *sk)
237{
238 sk_del_node_init(sk);
239}
240
241static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
242{
547b792c 243 WARN_ON(!sk_unhashed(sk));
1da177e4
LT
244 sk_add_node(sk, list);
245}
246
247static inline void unix_remove_socket(struct sock *sk)
248{
fbe9cc4a 249 spin_lock(&unix_table_lock);
1da177e4 250 __unix_remove_socket(sk);
fbe9cc4a 251 spin_unlock(&unix_table_lock);
1da177e4
LT
252}
253
254static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
255{
fbe9cc4a 256 spin_lock(&unix_table_lock);
1da177e4 257 __unix_insert_socket(list, sk);
fbe9cc4a 258 spin_unlock(&unix_table_lock);
1da177e4
LT
259}
260
097e66c5
DL
261static struct sock *__unix_find_socket_byname(struct net *net,
262 struct sockaddr_un *sunname,
95c96174 263 int len, int type, unsigned int hash)
1da177e4
LT
264{
265 struct sock *s;
1da177e4 266
b67bfe0d 267 sk_for_each(s, &unix_socket_table[hash ^ type]) {
1da177e4
LT
268 struct unix_sock *u = unix_sk(s);
269
878628fb 270 if (!net_eq(sock_net(s), net))
097e66c5
DL
271 continue;
272
1da177e4
LT
273 if (u->addr->len == len &&
274 !memcmp(u->addr->name, sunname, len))
275 goto found;
276 }
277 s = NULL;
278found:
279 return s;
280}
281
097e66c5
DL
282static inline struct sock *unix_find_socket_byname(struct net *net,
283 struct sockaddr_un *sunname,
1da177e4 284 int len, int type,
95c96174 285 unsigned int hash)
1da177e4
LT
286{
287 struct sock *s;
288
fbe9cc4a 289 spin_lock(&unix_table_lock);
097e66c5 290 s = __unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
291 if (s)
292 sock_hold(s);
fbe9cc4a 293 spin_unlock(&unix_table_lock);
1da177e4
LT
294 return s;
295}
296
6616f788 297static struct sock *unix_find_socket_byinode(struct inode *i)
1da177e4
LT
298{
299 struct sock *s;
1da177e4 300
fbe9cc4a 301 spin_lock(&unix_table_lock);
b67bfe0d 302 sk_for_each(s,
1da177e4 303 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
40ffe67d 304 struct dentry *dentry = unix_sk(s)->path.dentry;
1da177e4 305
6eba6a37 306 if (dentry && dentry->d_inode == i) {
1da177e4
LT
307 sock_hold(s);
308 goto found;
309 }
310 }
311 s = NULL;
312found:
fbe9cc4a 313 spin_unlock(&unix_table_lock);
1da177e4
LT
314 return s;
315}
316
317static inline int unix_writable(struct sock *sk)
318{
319 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
320}
321
322static void unix_write_space(struct sock *sk)
323{
43815482
ED
324 struct socket_wq *wq;
325
326 rcu_read_lock();
1da177e4 327 if (unix_writable(sk)) {
43815482
ED
328 wq = rcu_dereference(sk->sk_wq);
329 if (wq_has_sleeper(wq))
67426b75
ED
330 wake_up_interruptible_sync_poll(&wq->wait,
331 POLLOUT | POLLWRNORM | POLLWRBAND);
8d8ad9d7 332 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4 333 }
43815482 334 rcu_read_unlock();
1da177e4
LT
335}
336
337/* When dgram socket disconnects (or changes its peer), we clear its receive
338 * queue of packets arrived from previous peer. First, it allows to do
339 * flow control based only on wmem_alloc; second, sk connected to peer
340 * may receive messages only from that peer. */
341static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
342{
b03efcfb 343 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1da177e4
LT
344 skb_queue_purge(&sk->sk_receive_queue);
345 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
346
347 /* If one link of bidirectional dgram pipe is disconnected,
348 * we signal error. Messages are lost. Do not make this,
349 * when peer was not connected to us.
350 */
351 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
352 other->sk_err = ECONNRESET;
353 other->sk_error_report(other);
354 }
355 }
356}
357
358static void unix_sock_destructor(struct sock *sk)
359{
360 struct unix_sock *u = unix_sk(sk);
361
362 skb_queue_purge(&sk->sk_receive_queue);
363
547b792c
IJ
364 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
365 WARN_ON(!sk_unhashed(sk));
366 WARN_ON(sk->sk_socket);
1da177e4 367 if (!sock_flag(sk, SOCK_DEAD)) {
6b41e7dd 368 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
1da177e4
LT
369 return;
370 }
371
372 if (u->addr)
373 unix_release_addr(u->addr);
374
518de9b3 375 atomic_long_dec(&unix_nr_socks);
6f756a8c 376 local_bh_disable();
a8076d8d 377 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
6f756a8c 378 local_bh_enable();
1da177e4 379#ifdef UNIX_REFCNT_DEBUG
518de9b3
ED
380 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
381 atomic_long_read(&unix_nr_socks));
1da177e4
LT
382#endif
383}
384
ded34e0f 385static void unix_release_sock(struct sock *sk, int embrion)
1da177e4
LT
386{
387 struct unix_sock *u = unix_sk(sk);
40ffe67d 388 struct path path;
1da177e4
LT
389 struct sock *skpair;
390 struct sk_buff *skb;
391 int state;
392
393 unix_remove_socket(sk);
394
395 /* Clear state */
1c92b4e5 396 unix_state_lock(sk);
1da177e4
LT
397 sock_orphan(sk);
398 sk->sk_shutdown = SHUTDOWN_MASK;
40ffe67d
AV
399 path = u->path;
400 u->path.dentry = NULL;
401 u->path.mnt = NULL;
1da177e4
LT
402 state = sk->sk_state;
403 sk->sk_state = TCP_CLOSE;
1c92b4e5 404 unix_state_unlock(sk);
1da177e4
LT
405
406 wake_up_interruptible_all(&u->peer_wait);
407
e27dfcea 408 skpair = unix_peer(sk);
1da177e4 409
e27dfcea 410 if (skpair != NULL) {
1da177e4 411 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
1c92b4e5 412 unix_state_lock(skpair);
1da177e4
LT
413 /* No more writes */
414 skpair->sk_shutdown = SHUTDOWN_MASK;
415 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
416 skpair->sk_err = ECONNRESET;
1c92b4e5 417 unix_state_unlock(skpair);
1da177e4 418 skpair->sk_state_change(skpair);
8d8ad9d7 419 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4
LT
420 }
421 sock_put(skpair); /* It may now die */
422 unix_peer(sk) = NULL;
423 }
424
425 /* Try to flush out this socket. Throw out buffers at least */
426
427 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
e27dfcea 428 if (state == TCP_LISTEN)
1da177e4
LT
429 unix_release_sock(skb->sk, 1);
430 /* passed fds are erased in the kfree_skb hook */
431 kfree_skb(skb);
432 }
433
40ffe67d
AV
434 if (path.dentry)
435 path_put(&path);
1da177e4
LT
436
437 sock_put(sk);
438
439 /* ---- Socket is dead now and most probably destroyed ---- */
440
441 /*
e04dae84 442 * Fixme: BSD difference: In BSD all sockets connected to us get
1da177e4
LT
443 * ECONNRESET and we die on the spot. In Linux we behave
444 * like files and pipes do and wait for the last
445 * dereference.
446 *
447 * Can't we simply set sock->err?
448 *
449 * What the above comment does talk about? --ANK(980817)
450 */
451
9305cfa4 452 if (unix_tot_inflight)
ac7bfa62 453 unix_gc(); /* Garbage collect fds */
1da177e4
LT
454}
455
109f6e39
EB
456static void init_peercred(struct sock *sk)
457{
458 put_pid(sk->sk_peer_pid);
459 if (sk->sk_peer_cred)
460 put_cred(sk->sk_peer_cred);
461 sk->sk_peer_pid = get_pid(task_tgid(current));
462 sk->sk_peer_cred = get_current_cred();
463}
464
465static void copy_peercred(struct sock *sk, struct sock *peersk)
466{
467 put_pid(sk->sk_peer_pid);
468 if (sk->sk_peer_cred)
469 put_cred(sk->sk_peer_cred);
470 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
471 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
472}
473
1da177e4
LT
474static int unix_listen(struct socket *sock, int backlog)
475{
476 int err;
477 struct sock *sk = sock->sk;
478 struct unix_sock *u = unix_sk(sk);
109f6e39 479 struct pid *old_pid = NULL;
1da177e4
LT
480
481 err = -EOPNOTSUPP;
6eba6a37
ED
482 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
483 goto out; /* Only stream/seqpacket sockets accept */
1da177e4
LT
484 err = -EINVAL;
485 if (!u->addr)
6eba6a37 486 goto out; /* No listens on an unbound socket */
1c92b4e5 487 unix_state_lock(sk);
1da177e4
LT
488 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
489 goto out_unlock;
490 if (backlog > sk->sk_max_ack_backlog)
491 wake_up_interruptible_all(&u->peer_wait);
492 sk->sk_max_ack_backlog = backlog;
493 sk->sk_state = TCP_LISTEN;
494 /* set credentials so connect can copy them */
109f6e39 495 init_peercred(sk);
1da177e4
LT
496 err = 0;
497
498out_unlock:
1c92b4e5 499 unix_state_unlock(sk);
109f6e39 500 put_pid(old_pid);
1da177e4
LT
501out:
502 return err;
503}
504
505static int unix_release(struct socket *);
506static int unix_bind(struct socket *, struct sockaddr *, int);
507static int unix_stream_connect(struct socket *, struct sockaddr *,
508 int addr_len, int flags);
509static int unix_socketpair(struct socket *, struct socket *);
510static int unix_accept(struct socket *, struct socket *, int);
511static int unix_getname(struct socket *, struct sockaddr *, int *, int);
512static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
ec0d215f
RW
513static unsigned int unix_dgram_poll(struct file *, struct socket *,
514 poll_table *);
1da177e4
LT
515static int unix_ioctl(struct socket *, unsigned int, unsigned long);
516static int unix_shutdown(struct socket *, int);
517static int unix_stream_sendmsg(struct kiocb *, struct socket *,
518 struct msghdr *, size_t);
519static int unix_stream_recvmsg(struct kiocb *, struct socket *,
520 struct msghdr *, size_t, int);
521static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
522 struct msghdr *, size_t);
523static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
524 struct msghdr *, size_t, int);
525static int unix_dgram_connect(struct socket *, struct sockaddr *,
526 int, int);
527static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
528 struct msghdr *, size_t);
a05d2ad1
EB
529static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
530 struct msghdr *, size_t, int);
1da177e4 531
f55bb7f9
PE
532static void unix_set_peek_off(struct sock *sk, int val)
533{
534 struct unix_sock *u = unix_sk(sk);
535
536 mutex_lock(&u->readlock);
537 sk->sk_peek_off = val;
538 mutex_unlock(&u->readlock);
539}
540
541
90ddc4f0 542static const struct proto_ops unix_stream_ops = {
1da177e4
LT
543 .family = PF_UNIX,
544 .owner = THIS_MODULE,
545 .release = unix_release,
546 .bind = unix_bind,
547 .connect = unix_stream_connect,
548 .socketpair = unix_socketpair,
549 .accept = unix_accept,
550 .getname = unix_getname,
551 .poll = unix_poll,
552 .ioctl = unix_ioctl,
553 .listen = unix_listen,
554 .shutdown = unix_shutdown,
555 .setsockopt = sock_no_setsockopt,
556 .getsockopt = sock_no_getsockopt,
557 .sendmsg = unix_stream_sendmsg,
558 .recvmsg = unix_stream_recvmsg,
559 .mmap = sock_no_mmap,
560 .sendpage = sock_no_sendpage,
fc0d7536 561 .set_peek_off = unix_set_peek_off,
1da177e4
LT
562};
563
90ddc4f0 564static const struct proto_ops unix_dgram_ops = {
1da177e4
LT
565 .family = PF_UNIX,
566 .owner = THIS_MODULE,
567 .release = unix_release,
568 .bind = unix_bind,
569 .connect = unix_dgram_connect,
570 .socketpair = unix_socketpair,
571 .accept = sock_no_accept,
572 .getname = unix_getname,
ec0d215f 573 .poll = unix_dgram_poll,
1da177e4
LT
574 .ioctl = unix_ioctl,
575 .listen = sock_no_listen,
576 .shutdown = unix_shutdown,
577 .setsockopt = sock_no_setsockopt,
578 .getsockopt = sock_no_getsockopt,
579 .sendmsg = unix_dgram_sendmsg,
580 .recvmsg = unix_dgram_recvmsg,
581 .mmap = sock_no_mmap,
582 .sendpage = sock_no_sendpage,
f55bb7f9 583 .set_peek_off = unix_set_peek_off,
1da177e4
LT
584};
585
90ddc4f0 586static const struct proto_ops unix_seqpacket_ops = {
1da177e4
LT
587 .family = PF_UNIX,
588 .owner = THIS_MODULE,
589 .release = unix_release,
590 .bind = unix_bind,
591 .connect = unix_stream_connect,
592 .socketpair = unix_socketpair,
593 .accept = unix_accept,
594 .getname = unix_getname,
ec0d215f 595 .poll = unix_dgram_poll,
1da177e4
LT
596 .ioctl = unix_ioctl,
597 .listen = unix_listen,
598 .shutdown = unix_shutdown,
599 .setsockopt = sock_no_setsockopt,
600 .getsockopt = sock_no_getsockopt,
601 .sendmsg = unix_seqpacket_sendmsg,
a05d2ad1 602 .recvmsg = unix_seqpacket_recvmsg,
1da177e4
LT
603 .mmap = sock_no_mmap,
604 .sendpage = sock_no_sendpage,
f55bb7f9 605 .set_peek_off = unix_set_peek_off,
1da177e4
LT
606};
607
608static struct proto unix_proto = {
248969ae
ED
609 .name = "UNIX",
610 .owner = THIS_MODULE,
248969ae 611 .obj_size = sizeof(struct unix_sock),
1da177e4
LT
612};
613
a09785a2
IM
614/*
615 * AF_UNIX sockets do not interact with hardware, hence they
616 * dont trigger interrupts - so it's safe for them to have
617 * bh-unsafe locking for their sk_receive_queue.lock. Split off
618 * this special lock-class by reinitializing the spinlock key:
619 */
620static struct lock_class_key af_unix_sk_receive_queue_lock_key;
621
6eba6a37 622static struct sock *unix_create1(struct net *net, struct socket *sock)
1da177e4
LT
623{
624 struct sock *sk = NULL;
625 struct unix_sock *u;
626
518de9b3
ED
627 atomic_long_inc(&unix_nr_socks);
628 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
1da177e4
LT
629 goto out;
630
6257ff21 631 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
1da177e4
LT
632 if (!sk)
633 goto out;
634
6eba6a37 635 sock_init_data(sock, sk);
a09785a2
IM
636 lockdep_set_class(&sk->sk_receive_queue.lock,
637 &af_unix_sk_receive_queue_lock_key);
1da177e4
LT
638
639 sk->sk_write_space = unix_write_space;
a0a53c8b 640 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
1da177e4
LT
641 sk->sk_destruct = unix_sock_destructor;
642 u = unix_sk(sk);
40ffe67d
AV
643 u->path.dentry = NULL;
644 u->path.mnt = NULL;
fd19f329 645 spin_lock_init(&u->lock);
516e0cc5 646 atomic_long_set(&u->inflight, 0);
1fd05ba5 647 INIT_LIST_HEAD(&u->link);
57b47a53 648 mutex_init(&u->readlock); /* single task reading lock */
1da177e4 649 init_waitqueue_head(&u->peer_wait);
7123aaa3 650 unix_insert_socket(unix_sockets_unbound(sk), sk);
1da177e4 651out:
284b327b 652 if (sk == NULL)
518de9b3 653 atomic_long_dec(&unix_nr_socks);
920de804
ED
654 else {
655 local_bh_disable();
a8076d8d 656 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
920de804
ED
657 local_bh_enable();
658 }
1da177e4
LT
659 return sk;
660}
661
3f378b68
EP
662static int unix_create(struct net *net, struct socket *sock, int protocol,
663 int kern)
1da177e4
LT
664{
665 if (protocol && protocol != PF_UNIX)
666 return -EPROTONOSUPPORT;
667
668 sock->state = SS_UNCONNECTED;
669
670 switch (sock->type) {
671 case SOCK_STREAM:
672 sock->ops = &unix_stream_ops;
673 break;
674 /*
675 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
676 * nothing uses it.
677 */
678 case SOCK_RAW:
e27dfcea 679 sock->type = SOCK_DGRAM;
1da177e4
LT
680 case SOCK_DGRAM:
681 sock->ops = &unix_dgram_ops;
682 break;
683 case SOCK_SEQPACKET:
684 sock->ops = &unix_seqpacket_ops;
685 break;
686 default:
687 return -ESOCKTNOSUPPORT;
688 }
689
1b8d7ae4 690 return unix_create1(net, sock) ? 0 : -ENOMEM;
1da177e4
LT
691}
692
693static int unix_release(struct socket *sock)
694{
695 struct sock *sk = sock->sk;
696
697 if (!sk)
698 return 0;
699
ded34e0f 700 unix_release_sock(sk, 0);
1da177e4
LT
701 sock->sk = NULL;
702
ded34e0f 703 return 0;
1da177e4
LT
704}
705
706static int unix_autobind(struct socket *sock)
707{
708 struct sock *sk = sock->sk;
3b1e0a65 709 struct net *net = sock_net(sk);
1da177e4
LT
710 struct unix_sock *u = unix_sk(sk);
711 static u32 ordernum = 1;
6eba6a37 712 struct unix_address *addr;
1da177e4 713 int err;
8df73ff9 714 unsigned int retries = 0;
1da177e4 715
57b47a53 716 mutex_lock(&u->readlock);
1da177e4
LT
717
718 err = 0;
719 if (u->addr)
720 goto out;
721
722 err = -ENOMEM;
0da974f4 723 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
1da177e4
LT
724 if (!addr)
725 goto out;
726
1da177e4
LT
727 addr->name->sun_family = AF_UNIX;
728 atomic_set(&addr->refcnt, 1);
729
730retry:
731 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
07f0757a 732 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
1da177e4 733
fbe9cc4a 734 spin_lock(&unix_table_lock);
1da177e4
LT
735 ordernum = (ordernum+1)&0xFFFFF;
736
097e66c5 737 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
1da177e4 738 addr->hash)) {
fbe9cc4a 739 spin_unlock(&unix_table_lock);
8df73ff9
TH
740 /*
741 * __unix_find_socket_byname() may take long time if many names
742 * are already in use.
743 */
744 cond_resched();
745 /* Give up if all names seems to be in use. */
746 if (retries++ == 0xFFFFF) {
747 err = -ENOSPC;
748 kfree(addr);
749 goto out;
750 }
1da177e4
LT
751 goto retry;
752 }
753 addr->hash ^= sk->sk_type;
754
755 __unix_remove_socket(sk);
756 u->addr = addr;
757 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
fbe9cc4a 758 spin_unlock(&unix_table_lock);
1da177e4
LT
759 err = 0;
760
57b47a53 761out: mutex_unlock(&u->readlock);
1da177e4
LT
762 return err;
763}
764
097e66c5
DL
765static struct sock *unix_find_other(struct net *net,
766 struct sockaddr_un *sunname, int len,
95c96174 767 int type, unsigned int hash, int *error)
1da177e4
LT
768{
769 struct sock *u;
421748ec 770 struct path path;
1da177e4 771 int err = 0;
ac7bfa62 772
1da177e4 773 if (sunname->sun_path[0]) {
421748ec
AV
774 struct inode *inode;
775 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
1da177e4
LT
776 if (err)
777 goto fail;
421748ec
AV
778 inode = path.dentry->d_inode;
779 err = inode_permission(inode, MAY_WRITE);
1da177e4
LT
780 if (err)
781 goto put_fail;
782
783 err = -ECONNREFUSED;
421748ec 784 if (!S_ISSOCK(inode->i_mode))
1da177e4 785 goto put_fail;
6616f788 786 u = unix_find_socket_byinode(inode);
1da177e4
LT
787 if (!u)
788 goto put_fail;
789
790 if (u->sk_type == type)
68ac1234 791 touch_atime(&path);
1da177e4 792
421748ec 793 path_put(&path);
1da177e4 794
e27dfcea 795 err = -EPROTOTYPE;
1da177e4
LT
796 if (u->sk_type != type) {
797 sock_put(u);
798 goto fail;
799 }
800 } else {
801 err = -ECONNREFUSED;
e27dfcea 802 u = unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
803 if (u) {
804 struct dentry *dentry;
40ffe67d 805 dentry = unix_sk(u)->path.dentry;
1da177e4 806 if (dentry)
68ac1234 807 touch_atime(&unix_sk(u)->path);
1da177e4
LT
808 } else
809 goto fail;
810 }
811 return u;
812
813put_fail:
421748ec 814 path_put(&path);
1da177e4 815fail:
e27dfcea 816 *error = err;
1da177e4
LT
817 return NULL;
818}
819
faf02010
AV
820static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
821{
822 struct dentry *dentry;
823 struct path path;
824 int err = 0;
825 /*
826 * Get the parent directory, calculate the hash for last
827 * component.
828 */
829 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
830 err = PTR_ERR(dentry);
831 if (IS_ERR(dentry))
832 return err;
833
834 /*
835 * All right, let's create it.
836 */
837 err = security_path_mknod(&path, dentry, mode, 0);
838 if (!err) {
839 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
840 if (!err) {
841 res->mnt = mntget(path.mnt);
842 res->dentry = dget(dentry);
843 }
844 }
845 done_path_create(&path, dentry);
846 return err;
847}
1da177e4
LT
848
849static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
850{
851 struct sock *sk = sock->sk;
3b1e0a65 852 struct net *net = sock_net(sk);
1da177e4 853 struct unix_sock *u = unix_sk(sk);
e27dfcea 854 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
dae6ad8f 855 char *sun_path = sunaddr->sun_path;
1da177e4 856 int err;
95c96174 857 unsigned int hash;
1da177e4
LT
858 struct unix_address *addr;
859 struct hlist_head *list;
860
861 err = -EINVAL;
862 if (sunaddr->sun_family != AF_UNIX)
863 goto out;
864
e27dfcea 865 if (addr_len == sizeof(short)) {
1da177e4
LT
866 err = unix_autobind(sock);
867 goto out;
868 }
869
870 err = unix_mkname(sunaddr, addr_len, &hash);
871 if (err < 0)
872 goto out;
873 addr_len = err;
874
57b47a53 875 mutex_lock(&u->readlock);
1da177e4
LT
876
877 err = -EINVAL;
878 if (u->addr)
879 goto out_up;
880
881 err = -ENOMEM;
882 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
883 if (!addr)
884 goto out_up;
885
886 memcpy(addr->name, sunaddr, addr_len);
887 addr->len = addr_len;
888 addr->hash = hash ^ sk->sk_type;
889 atomic_set(&addr->refcnt, 1);
890
dae6ad8f 891 if (sun_path[0]) {
faf02010
AV
892 struct path path;
893 umode_t mode = S_IFSOCK |
ce3b0f8d 894 (SOCK_INODE(sock)->i_mode & ~current_umask());
faf02010
AV
895 err = unix_mknod(sun_path, mode, &path);
896 if (err) {
897 if (err == -EEXIST)
898 err = -EADDRINUSE;
899 unix_release_addr(addr);
900 goto out_up;
901 }
1da177e4 902 addr->hash = UNIX_HASH_SIZE;
faf02010
AV
903 hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
904 spin_lock(&unix_table_lock);
905 u->path = path;
906 list = &unix_socket_table[hash];
907 } else {
908 spin_lock(&unix_table_lock);
1da177e4 909 err = -EADDRINUSE;
097e66c5 910 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1da177e4
LT
911 sk->sk_type, hash)) {
912 unix_release_addr(addr);
913 goto out_unlock;
914 }
915
916 list = &unix_socket_table[addr->hash];
1da177e4
LT
917 }
918
919 err = 0;
920 __unix_remove_socket(sk);
921 u->addr = addr;
922 __unix_insert_socket(list, sk);
923
924out_unlock:
fbe9cc4a 925 spin_unlock(&unix_table_lock);
1da177e4 926out_up:
57b47a53 927 mutex_unlock(&u->readlock);
1da177e4
LT
928out:
929 return err;
1da177e4
LT
930}
931
278a3de5
DM
932static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
933{
934 if (unlikely(sk1 == sk2) || !sk2) {
935 unix_state_lock(sk1);
936 return;
937 }
938 if (sk1 < sk2) {
939 unix_state_lock(sk1);
940 unix_state_lock_nested(sk2);
941 } else {
942 unix_state_lock(sk2);
943 unix_state_lock_nested(sk1);
944 }
945}
946
947static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
948{
949 if (unlikely(sk1 == sk2) || !sk2) {
950 unix_state_unlock(sk1);
951 return;
952 }
953 unix_state_unlock(sk1);
954 unix_state_unlock(sk2);
955}
956
1da177e4
LT
957static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
958 int alen, int flags)
959{
960 struct sock *sk = sock->sk;
3b1e0a65 961 struct net *net = sock_net(sk);
e27dfcea 962 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1da177e4 963 struct sock *other;
95c96174 964 unsigned int hash;
1da177e4
LT
965 int err;
966
967 if (addr->sa_family != AF_UNSPEC) {
968 err = unix_mkname(sunaddr, alen, &hash);
969 if (err < 0)
970 goto out;
971 alen = err;
972
973 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
974 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
975 goto out;
976
278a3de5 977restart:
e27dfcea 978 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1da177e4
LT
979 if (!other)
980 goto out;
981
278a3de5
DM
982 unix_state_double_lock(sk, other);
983
984 /* Apparently VFS overslept socket death. Retry. */
985 if (sock_flag(other, SOCK_DEAD)) {
986 unix_state_double_unlock(sk, other);
987 sock_put(other);
988 goto restart;
989 }
1da177e4
LT
990
991 err = -EPERM;
992 if (!unix_may_send(sk, other))
993 goto out_unlock;
994
995 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
996 if (err)
997 goto out_unlock;
998
999 } else {
1000 /*
1001 * 1003.1g breaking connected state with AF_UNSPEC
1002 */
1003 other = NULL;
278a3de5 1004 unix_state_double_lock(sk, other);
1da177e4
LT
1005 }
1006
1007 /*
1008 * If it was connected, reconnect.
1009 */
1010 if (unix_peer(sk)) {
1011 struct sock *old_peer = unix_peer(sk);
e27dfcea 1012 unix_peer(sk) = other;
278a3de5 1013 unix_state_double_unlock(sk, other);
1da177e4
LT
1014
1015 if (other != old_peer)
1016 unix_dgram_disconnected(sk, old_peer);
1017 sock_put(old_peer);
1018 } else {
e27dfcea 1019 unix_peer(sk) = other;
278a3de5 1020 unix_state_double_unlock(sk, other);
1da177e4 1021 }
ac7bfa62 1022 return 0;
1da177e4
LT
1023
1024out_unlock:
278a3de5 1025 unix_state_double_unlock(sk, other);
1da177e4
LT
1026 sock_put(other);
1027out:
1028 return err;
1029}
1030
1031static long unix_wait_for_peer(struct sock *other, long timeo)
1032{
1033 struct unix_sock *u = unix_sk(other);
1034 int sched;
1035 DEFINE_WAIT(wait);
1036
1037 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1038
1039 sched = !sock_flag(other, SOCK_DEAD) &&
1040 !(other->sk_shutdown & RCV_SHUTDOWN) &&
3c73419c 1041 unix_recvq_full(other);
1da177e4 1042
1c92b4e5 1043 unix_state_unlock(other);
1da177e4
LT
1044
1045 if (sched)
1046 timeo = schedule_timeout(timeo);
1047
1048 finish_wait(&u->peer_wait, &wait);
1049 return timeo;
1050}
1051
1052static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1053 int addr_len, int flags)
1054{
e27dfcea 1055 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1da177e4 1056 struct sock *sk = sock->sk;
3b1e0a65 1057 struct net *net = sock_net(sk);
1da177e4
LT
1058 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1059 struct sock *newsk = NULL;
1060 struct sock *other = NULL;
1061 struct sk_buff *skb = NULL;
95c96174 1062 unsigned int hash;
1da177e4
LT
1063 int st;
1064 int err;
1065 long timeo;
1066
1067 err = unix_mkname(sunaddr, addr_len, &hash);
1068 if (err < 0)
1069 goto out;
1070 addr_len = err;
1071
f64f9e71
JP
1072 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1073 (err = unix_autobind(sock)) != 0)
1da177e4
LT
1074 goto out;
1075
1076 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1077
1078 /* First of all allocate resources.
1079 If we will make it after state is locked,
1080 we will have to recheck all again in any case.
1081 */
1082
1083 err = -ENOMEM;
1084
1085 /* create new sock for complete connection */
3b1e0a65 1086 newsk = unix_create1(sock_net(sk), NULL);
1da177e4
LT
1087 if (newsk == NULL)
1088 goto out;
1089
1090 /* Allocate skb for sending to listening sock */
1091 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1092 if (skb == NULL)
1093 goto out;
1094
1095restart:
1096 /* Find listening sock. */
097e66c5 1097 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1da177e4
LT
1098 if (!other)
1099 goto out;
1100
1101 /* Latch state of peer */
1c92b4e5 1102 unix_state_lock(other);
1da177e4
LT
1103
1104 /* Apparently VFS overslept socket death. Retry. */
1105 if (sock_flag(other, SOCK_DEAD)) {
1c92b4e5 1106 unix_state_unlock(other);
1da177e4
LT
1107 sock_put(other);
1108 goto restart;
1109 }
1110
1111 err = -ECONNREFUSED;
1112 if (other->sk_state != TCP_LISTEN)
1113 goto out_unlock;
77238f2b
TS
1114 if (other->sk_shutdown & RCV_SHUTDOWN)
1115 goto out_unlock;
1da177e4 1116
3c73419c 1117 if (unix_recvq_full(other)) {
1da177e4
LT
1118 err = -EAGAIN;
1119 if (!timeo)
1120 goto out_unlock;
1121
1122 timeo = unix_wait_for_peer(other, timeo);
1123
1124 err = sock_intr_errno(timeo);
1125 if (signal_pending(current))
1126 goto out;
1127 sock_put(other);
1128 goto restart;
ac7bfa62 1129 }
1da177e4
LT
1130
1131 /* Latch our state.
1132
e5537bfc 1133 It is tricky place. We need to grab our state lock and cannot
1da177e4
LT
1134 drop lock on peer. It is dangerous because deadlock is
1135 possible. Connect to self case and simultaneous
1136 attempt to connect are eliminated by checking socket
1137 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1138 check this before attempt to grab lock.
1139
1140 Well, and we have to recheck the state after socket locked.
1141 */
1142 st = sk->sk_state;
1143
1144 switch (st) {
1145 case TCP_CLOSE:
1146 /* This is ok... continue with connect */
1147 break;
1148 case TCP_ESTABLISHED:
1149 /* Socket is already connected */
1150 err = -EISCONN;
1151 goto out_unlock;
1152 default:
1153 err = -EINVAL;
1154 goto out_unlock;
1155 }
1156
1c92b4e5 1157 unix_state_lock_nested(sk);
1da177e4
LT
1158
1159 if (sk->sk_state != st) {
1c92b4e5
DM
1160 unix_state_unlock(sk);
1161 unix_state_unlock(other);
1da177e4
LT
1162 sock_put(other);
1163 goto restart;
1164 }
1165
3610cda5 1166 err = security_unix_stream_connect(sk, other, newsk);
1da177e4 1167 if (err) {
1c92b4e5 1168 unix_state_unlock(sk);
1da177e4
LT
1169 goto out_unlock;
1170 }
1171
1172 /* The way is open! Fastly set all the necessary fields... */
1173
1174 sock_hold(sk);
1175 unix_peer(newsk) = sk;
1176 newsk->sk_state = TCP_ESTABLISHED;
1177 newsk->sk_type = sk->sk_type;
109f6e39 1178 init_peercred(newsk);
1da177e4 1179 newu = unix_sk(newsk);
eaefd110 1180 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1da177e4
LT
1181 otheru = unix_sk(other);
1182
1183 /* copy address information from listening to new sock*/
1184 if (otheru->addr) {
1185 atomic_inc(&otheru->addr->refcnt);
1186 newu->addr = otheru->addr;
1187 }
40ffe67d
AV
1188 if (otheru->path.dentry) {
1189 path_get(&otheru->path);
1190 newu->path = otheru->path;
1da177e4
LT
1191 }
1192
1193 /* Set credentials */
109f6e39 1194 copy_peercred(sk, other);
1da177e4 1195
1da177e4
LT
1196 sock->state = SS_CONNECTED;
1197 sk->sk_state = TCP_ESTABLISHED;
830a1e5c
BL
1198 sock_hold(newsk);
1199
1200 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1201 unix_peer(sk) = newsk;
1da177e4 1202
1c92b4e5 1203 unix_state_unlock(sk);
1da177e4
LT
1204
1205 /* take ten and and send info to listening sock */
1206 spin_lock(&other->sk_receive_queue.lock);
1207 __skb_queue_tail(&other->sk_receive_queue, skb);
1da177e4 1208 spin_unlock(&other->sk_receive_queue.lock);
1c92b4e5 1209 unix_state_unlock(other);
1da177e4
LT
1210 other->sk_data_ready(other, 0);
1211 sock_put(other);
1212 return 0;
1213
1214out_unlock:
1215 if (other)
1c92b4e5 1216 unix_state_unlock(other);
1da177e4
LT
1217
1218out:
40d44446 1219 kfree_skb(skb);
1da177e4
LT
1220 if (newsk)
1221 unix_release_sock(newsk, 0);
1222 if (other)
1223 sock_put(other);
1224 return err;
1225}
1226
1227static int unix_socketpair(struct socket *socka, struct socket *sockb)
1228{
e27dfcea 1229 struct sock *ska = socka->sk, *skb = sockb->sk;
1da177e4
LT
1230
1231 /* Join our sockets back to back */
1232 sock_hold(ska);
1233 sock_hold(skb);
e27dfcea
JK
1234 unix_peer(ska) = skb;
1235 unix_peer(skb) = ska;
109f6e39
EB
1236 init_peercred(ska);
1237 init_peercred(skb);
1da177e4
LT
1238
1239 if (ska->sk_type != SOCK_DGRAM) {
1240 ska->sk_state = TCP_ESTABLISHED;
1241 skb->sk_state = TCP_ESTABLISHED;
1242 socka->state = SS_CONNECTED;
1243 sockb->state = SS_CONNECTED;
1244 }
1245 return 0;
1246}
1247
1248static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1249{
1250 struct sock *sk = sock->sk;
1251 struct sock *tsk;
1252 struct sk_buff *skb;
1253 int err;
1254
1255 err = -EOPNOTSUPP;
6eba6a37 1256 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1da177e4
LT
1257 goto out;
1258
1259 err = -EINVAL;
1260 if (sk->sk_state != TCP_LISTEN)
1261 goto out;
1262
1263 /* If socket state is TCP_LISTEN it cannot change (for now...),
1264 * so that no locks are necessary.
1265 */
1266
1267 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1268 if (!skb) {
1269 /* This means receive shutdown. */
1270 if (err == 0)
1271 err = -EINVAL;
1272 goto out;
1273 }
1274
1275 tsk = skb->sk;
1276 skb_free_datagram(sk, skb);
1277 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1278
1279 /* attach accepted sock to socket */
1c92b4e5 1280 unix_state_lock(tsk);
1da177e4
LT
1281 newsock->state = SS_CONNECTED;
1282 sock_graft(tsk, newsock);
1c92b4e5 1283 unix_state_unlock(tsk);
1da177e4
LT
1284 return 0;
1285
1286out:
1287 return err;
1288}
1289
1290
1291static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1292{
1293 struct sock *sk = sock->sk;
1294 struct unix_sock *u;
13cfa97b 1295 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1da177e4
LT
1296 int err = 0;
1297
1298 if (peer) {
1299 sk = unix_peer_get(sk);
1300
1301 err = -ENOTCONN;
1302 if (!sk)
1303 goto out;
1304 err = 0;
1305 } else {
1306 sock_hold(sk);
1307 }
1308
1309 u = unix_sk(sk);
1c92b4e5 1310 unix_state_lock(sk);
1da177e4
LT
1311 if (!u->addr) {
1312 sunaddr->sun_family = AF_UNIX;
1313 sunaddr->sun_path[0] = 0;
1314 *uaddr_len = sizeof(short);
1315 } else {
1316 struct unix_address *addr = u->addr;
1317
1318 *uaddr_len = addr->len;
1319 memcpy(sunaddr, addr->name, *uaddr_len);
1320 }
1c92b4e5 1321 unix_state_unlock(sk);
1da177e4
LT
1322 sock_put(sk);
1323out:
1324 return err;
1325}
1326
1327static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1328{
1329 int i;
1330
1331 scm->fp = UNIXCB(skb).fp;
1da177e4
LT
1332 UNIXCB(skb).fp = NULL;
1333
6eba6a37 1334 for (i = scm->fp->count-1; i >= 0; i--)
1da177e4
LT
1335 unix_notinflight(scm->fp->fp[i]);
1336}
1337
7361c36c 1338static void unix_destruct_scm(struct sk_buff *skb)
1da177e4
LT
1339{
1340 struct scm_cookie scm;
1341 memset(&scm, 0, sizeof(scm));
7361c36c 1342 scm.pid = UNIXCB(skb).pid;
7361c36c
EB
1343 if (UNIXCB(skb).fp)
1344 unix_detach_fds(&scm, skb);
1da177e4
LT
1345
1346 /* Alas, it calls VFS */
1347 /* So fscking what? fput() had been SMP-safe since the last Summer */
1348 scm_destroy(&scm);
1349 sock_wfree(skb);
1350}
1351
25888e30
ED
1352#define MAX_RECURSION_LEVEL 4
1353
6209344f 1354static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1da177e4
LT
1355{
1356 int i;
25888e30
ED
1357 unsigned char max_level = 0;
1358 int unix_sock_count = 0;
1359
1360 for (i = scm->fp->count - 1; i >= 0; i--) {
1361 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1362
1363 if (sk) {
1364 unix_sock_count++;
1365 max_level = max(max_level,
1366 unix_sk(sk)->recursion_level);
1367 }
1368 }
1369 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1370 return -ETOOMANYREFS;
6209344f
MS
1371
1372 /*
1373 * Need to duplicate file references for the sake of garbage
1374 * collection. Otherwise a socket in the fps might become a
1375 * candidate for GC while the skb is not yet queued.
1376 */
1377 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1378 if (!UNIXCB(skb).fp)
1379 return -ENOMEM;
1380
25888e30
ED
1381 if (unix_sock_count) {
1382 for (i = scm->fp->count - 1; i >= 0; i--)
1383 unix_inflight(scm->fp->fp[i]);
1384 }
1385 return max_level;
1da177e4
LT
1386}
1387
f78a5fda 1388static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
7361c36c
EB
1389{
1390 int err = 0;
16e57262 1391
f78a5fda 1392 UNIXCB(skb).pid = get_pid(scm->pid);
6b0ee8c0
EB
1393 UNIXCB(skb).uid = scm->creds.uid;
1394 UNIXCB(skb).gid = scm->creds.gid;
7361c36c
EB
1395 UNIXCB(skb).fp = NULL;
1396 if (scm->fp && send_fds)
1397 err = unix_attach_fds(scm, skb);
1398
1399 skb->destructor = unix_destruct_scm;
1400 return err;
1401}
1402
16e57262
ED
1403/*
1404 * Some apps rely on write() giving SCM_CREDENTIALS
1405 * We include credentials if source or destination socket
1406 * asserted SOCK_PASSCRED.
1407 */
1408static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1409 const struct sock *other)
1410{
6b0ee8c0 1411 if (UNIXCB(skb).pid)
16e57262
ED
1412 return;
1413 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
25da0e3e
EB
1414 !other->sk_socket ||
1415 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
16e57262 1416 UNIXCB(skb).pid = get_pid(task_tgid(current));
6b0ee8c0 1417 current_euid_egid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
16e57262
ED
1418 }
1419}
1420
1da177e4
LT
1421/*
1422 * Send AF_UNIX data.
1423 */
1424
1425static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1426 struct msghdr *msg, size_t len)
1427{
1428 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1429 struct sock *sk = sock->sk;
3b1e0a65 1430 struct net *net = sock_net(sk);
1da177e4 1431 struct unix_sock *u = unix_sk(sk);
e27dfcea 1432 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1433 struct sock *other = NULL;
1434 int namelen = 0; /* fake GCC */
1435 int err;
95c96174 1436 unsigned int hash;
f78a5fda 1437 struct sk_buff *skb;
1da177e4
LT
1438 long timeo;
1439 struct scm_cookie tmp_scm;
25888e30 1440 int max_level;
eb6a2481 1441 int data_len = 0;
1da177e4
LT
1442
1443 if (NULL == siocb->scm)
1444 siocb->scm = &tmp_scm;
5f23b734 1445 wait_for_unix_gc();
e0e3cea4 1446 err = scm_send(sock, msg, siocb->scm, false);
1da177e4
LT
1447 if (err < 0)
1448 return err;
1449
1450 err = -EOPNOTSUPP;
1451 if (msg->msg_flags&MSG_OOB)
1452 goto out;
1453
1454 if (msg->msg_namelen) {
1455 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1456 if (err < 0)
1457 goto out;
1458 namelen = err;
1459 } else {
1460 sunaddr = NULL;
1461 err = -ENOTCONN;
1462 other = unix_peer_get(sk);
1463 if (!other)
1464 goto out;
1465 }
1466
f64f9e71
JP
1467 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1468 && (err = unix_autobind(sock)) != 0)
1da177e4
LT
1469 goto out;
1470
1471 err = -EMSGSIZE;
1472 if (len > sk->sk_sndbuf - 32)
1473 goto out;
1474
eb6a2481
ED
1475 if (len > SKB_MAX_ALLOC)
1476 data_len = min_t(size_t,
1477 len - SKB_MAX_ALLOC,
1478 MAX_SKB_FRAGS * PAGE_SIZE);
1479
1480 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1481 msg->msg_flags & MSG_DONTWAIT, &err);
e27dfcea 1482 if (skb == NULL)
1da177e4
LT
1483 goto out;
1484
f78a5fda 1485 err = unix_scm_to_skb(siocb->scm, skb, true);
25888e30 1486 if (err < 0)
7361c36c 1487 goto out_free;
25888e30 1488 max_level = err + 1;
dc49c1f9 1489 unix_get_secdata(siocb->scm, skb);
877ce7c1 1490
eb6a2481
ED
1491 skb_put(skb, len - data_len);
1492 skb->data_len = data_len;
1493 skb->len = len;
1494 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1da177e4
LT
1495 if (err)
1496 goto out_free;
1497
1498 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1499
1500restart:
1501 if (!other) {
1502 err = -ECONNRESET;
1503 if (sunaddr == NULL)
1504 goto out_free;
1505
097e66c5 1506 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1da177e4 1507 hash, &err);
e27dfcea 1508 if (other == NULL)
1da177e4
LT
1509 goto out_free;
1510 }
1511
d6ae3bae
AC
1512 if (sk_filter(other, skb) < 0) {
1513 /* Toss the packet but do not return any error to the sender */
1514 err = len;
1515 goto out_free;
1516 }
1517
1c92b4e5 1518 unix_state_lock(other);
1da177e4
LT
1519 err = -EPERM;
1520 if (!unix_may_send(sk, other))
1521 goto out_unlock;
1522
1523 if (sock_flag(other, SOCK_DEAD)) {
1524 /*
1525 * Check with 1003.1g - what should
1526 * datagram error
1527 */
1c92b4e5 1528 unix_state_unlock(other);
1da177e4
LT
1529 sock_put(other);
1530
1531 err = 0;
1c92b4e5 1532 unix_state_lock(sk);
1da177e4 1533 if (unix_peer(sk) == other) {
e27dfcea 1534 unix_peer(sk) = NULL;
1c92b4e5 1535 unix_state_unlock(sk);
1da177e4
LT
1536
1537 unix_dgram_disconnected(sk, other);
1538 sock_put(other);
1539 err = -ECONNREFUSED;
1540 } else {
1c92b4e5 1541 unix_state_unlock(sk);
1da177e4
LT
1542 }
1543
1544 other = NULL;
1545 if (err)
1546 goto out_free;
1547 goto restart;
1548 }
1549
1550 err = -EPIPE;
1551 if (other->sk_shutdown & RCV_SHUTDOWN)
1552 goto out_unlock;
1553
1554 if (sk->sk_type != SOCK_SEQPACKET) {
1555 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1556 if (err)
1557 goto out_unlock;
1558 }
1559
3c73419c 1560 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1da177e4
LT
1561 if (!timeo) {
1562 err = -EAGAIN;
1563 goto out_unlock;
1564 }
1565
1566 timeo = unix_wait_for_peer(other, timeo);
1567
1568 err = sock_intr_errno(timeo);
1569 if (signal_pending(current))
1570 goto out_free;
1571
1572 goto restart;
1573 }
1574
3f66116e
AC
1575 if (sock_flag(other, SOCK_RCVTSTAMP))
1576 __net_timestamp(skb);
16e57262 1577 maybe_add_creds(skb, sock, other);
1da177e4 1578 skb_queue_tail(&other->sk_receive_queue, skb);
25888e30
ED
1579 if (max_level > unix_sk(other)->recursion_level)
1580 unix_sk(other)->recursion_level = max_level;
1c92b4e5 1581 unix_state_unlock(other);
1da177e4
LT
1582 other->sk_data_ready(other, len);
1583 sock_put(other);
f78a5fda 1584 scm_destroy(siocb->scm);
1da177e4
LT
1585 return len;
1586
1587out_unlock:
1c92b4e5 1588 unix_state_unlock(other);
1da177e4
LT
1589out_free:
1590 kfree_skb(skb);
1591out:
1592 if (other)
1593 sock_put(other);
f78a5fda 1594 scm_destroy(siocb->scm);
1da177e4
LT
1595 return err;
1596}
1597
ac7bfa62 1598
1da177e4
LT
1599static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1600 struct msghdr *msg, size_t len)
1601{
1602 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1603 struct sock *sk = sock->sk;
1604 struct sock *other = NULL;
6eba6a37 1605 int err, size;
f78a5fda 1606 struct sk_buff *skb;
e27dfcea 1607 int sent = 0;
1da177e4 1608 struct scm_cookie tmp_scm;
8ba69ba6 1609 bool fds_sent = false;
25888e30 1610 int max_level;
1da177e4
LT
1611
1612 if (NULL == siocb->scm)
1613 siocb->scm = &tmp_scm;
5f23b734 1614 wait_for_unix_gc();
e0e3cea4 1615 err = scm_send(sock, msg, siocb->scm, false);
1da177e4
LT
1616 if (err < 0)
1617 return err;
1618
1619 err = -EOPNOTSUPP;
1620 if (msg->msg_flags&MSG_OOB)
1621 goto out_err;
1622
1623 if (msg->msg_namelen) {
1624 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1625 goto out_err;
1626 } else {
1da177e4 1627 err = -ENOTCONN;
830a1e5c 1628 other = unix_peer(sk);
1da177e4
LT
1629 if (!other)
1630 goto out_err;
1631 }
1632
1633 if (sk->sk_shutdown & SEND_SHUTDOWN)
1634 goto pipe_err;
1635
6eba6a37 1636 while (sent < len) {
1da177e4 1637 /*
e9df7d7f
BL
1638 * Optimisation for the fact that under 0.01% of X
1639 * messages typically need breaking up.
1da177e4
LT
1640 */
1641
e9df7d7f 1642 size = len-sent;
1da177e4
LT
1643
1644 /* Keep two messages in the pipe so it schedules better */
e9df7d7f
BL
1645 if (size > ((sk->sk_sndbuf >> 1) - 64))
1646 size = (sk->sk_sndbuf >> 1) - 64;
1da177e4
LT
1647
1648 if (size > SKB_MAX_ALLOC)
1649 size = SKB_MAX_ALLOC;
ac7bfa62 1650
1da177e4
LT
1651 /*
1652 * Grab a buffer
1653 */
ac7bfa62 1654
6eba6a37
ED
1655 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1656 &err);
1da177e4 1657
e27dfcea 1658 if (skb == NULL)
1da177e4
LT
1659 goto out_err;
1660
1661 /*
1662 * If you pass two values to the sock_alloc_send_skb
1663 * it tries to grab the large buffer with GFP_NOFS
1664 * (which can fail easily), and if it fails grab the
1665 * fallback size buffer which is under a page and will
1666 * succeed. [Alan]
1667 */
1668 size = min_t(int, size, skb_tailroom(skb));
1669
7361c36c 1670
f78a5fda
DM
1671 /* Only send the fds in the first buffer */
1672 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
25888e30 1673 if (err < 0) {
7361c36c 1674 kfree_skb(skb);
f78a5fda 1675 goto out_err;
6209344f 1676 }
25888e30 1677 max_level = err + 1;
7361c36c 1678 fds_sent = true;
1da177e4 1679
6eba6a37
ED
1680 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1681 if (err) {
1da177e4 1682 kfree_skb(skb);
f78a5fda 1683 goto out_err;
1da177e4
LT
1684 }
1685
1c92b4e5 1686 unix_state_lock(other);
1da177e4
LT
1687
1688 if (sock_flag(other, SOCK_DEAD) ||
1689 (other->sk_shutdown & RCV_SHUTDOWN))
1690 goto pipe_err_free;
1691
16e57262 1692 maybe_add_creds(skb, sock, other);
1da177e4 1693 skb_queue_tail(&other->sk_receive_queue, skb);
25888e30
ED
1694 if (max_level > unix_sk(other)->recursion_level)
1695 unix_sk(other)->recursion_level = max_level;
1c92b4e5 1696 unix_state_unlock(other);
1da177e4 1697 other->sk_data_ready(other, size);
e27dfcea 1698 sent += size;
1da177e4 1699 }
1da177e4 1700
f78a5fda 1701 scm_destroy(siocb->scm);
1da177e4
LT
1702 siocb->scm = NULL;
1703
1704 return sent;
1705
1706pipe_err_free:
1c92b4e5 1707 unix_state_unlock(other);
1da177e4
LT
1708 kfree_skb(skb);
1709pipe_err:
6eba6a37
ED
1710 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1711 send_sig(SIGPIPE, current, 0);
1da177e4
LT
1712 err = -EPIPE;
1713out_err:
f78a5fda 1714 scm_destroy(siocb->scm);
1da177e4
LT
1715 siocb->scm = NULL;
1716 return sent ? : err;
1717}
1718
1719static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1720 struct msghdr *msg, size_t len)
1721{
1722 int err;
1723 struct sock *sk = sock->sk;
ac7bfa62 1724
1da177e4
LT
1725 err = sock_error(sk);
1726 if (err)
1727 return err;
1728
1729 if (sk->sk_state != TCP_ESTABLISHED)
1730 return -ENOTCONN;
1731
1732 if (msg->msg_namelen)
1733 msg->msg_namelen = 0;
1734
1735 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1736}
ac7bfa62 1737
a05d2ad1
EB
1738static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1739 struct msghdr *msg, size_t size,
1740 int flags)
1741{
1742 struct sock *sk = sock->sk;
1743
1744 if (sk->sk_state != TCP_ESTABLISHED)
1745 return -ENOTCONN;
1746
1747 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1748}
1749
1da177e4
LT
1750static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1751{
1752 struct unix_sock *u = unix_sk(sk);
1753
1754 msg->msg_namelen = 0;
1755 if (u->addr) {
1756 msg->msg_namelen = u->addr->len;
1757 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1758 }
1759}
1760
1761static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1762 struct msghdr *msg, size_t size,
1763 int flags)
1764{
1765 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1766 struct scm_cookie tmp_scm;
1767 struct sock *sk = sock->sk;
1768 struct unix_sock *u = unix_sk(sk);
1769 int noblock = flags & MSG_DONTWAIT;
1770 struct sk_buff *skb;
1771 int err;
f55bb7f9 1772 int peeked, skip;
1da177e4
LT
1773
1774 err = -EOPNOTSUPP;
1775 if (flags&MSG_OOB)
1776 goto out;
1777
1778 msg->msg_namelen = 0;
1779
b3ca9b02
RW
1780 err = mutex_lock_interruptible(&u->readlock);
1781 if (err) {
1782 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1783 goto out;
1784 }
1da177e4 1785
f55bb7f9
PE
1786 skip = sk_peek_offset(sk, flags);
1787
1788 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
0a112258
FZ
1789 if (!skb) {
1790 unix_state_lock(sk);
1791 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1792 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1793 (sk->sk_shutdown & RCV_SHUTDOWN))
1794 err = 0;
1795 unix_state_unlock(sk);
1da177e4 1796 goto out_unlock;
0a112258 1797 }
1da177e4 1798
67426b75
ED
1799 wake_up_interruptible_sync_poll(&u->peer_wait,
1800 POLLOUT | POLLWRNORM | POLLWRBAND);
1da177e4
LT
1801
1802 if (msg->msg_name)
1803 unix_copy_addr(msg, skb->sk);
1804
f55bb7f9
PE
1805 if (size > skb->len - skip)
1806 size = skb->len - skip;
1807 else if (size < skb->len - skip)
1da177e4
LT
1808 msg->msg_flags |= MSG_TRUNC;
1809
f55bb7f9 1810 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1da177e4
LT
1811 if (err)
1812 goto out_free;
1813
3f66116e
AC
1814 if (sock_flag(sk, SOCK_RCVTSTAMP))
1815 __sock_recv_timestamp(msg, sk, skb);
1816
1da177e4
LT
1817 if (!siocb->scm) {
1818 siocb->scm = &tmp_scm;
1819 memset(&tmp_scm, 0, sizeof(tmp_scm));
1820 }
6b0ee8c0 1821 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
877ce7c1 1822 unix_set_secdata(siocb->scm, skb);
1da177e4 1823
6eba6a37 1824 if (!(flags & MSG_PEEK)) {
1da177e4
LT
1825 if (UNIXCB(skb).fp)
1826 unix_detach_fds(siocb->scm, skb);
f55bb7f9
PE
1827
1828 sk_peek_offset_bwd(sk, skb->len);
6eba6a37 1829 } else {
1da177e4
LT
1830 /* It is questionable: on PEEK we could:
1831 - do not return fds - good, but too simple 8)
1832 - return fds, and do not return them on read (old strategy,
1833 apparently wrong)
1834 - clone fds (I chose it for now, it is the most universal
1835 solution)
ac7bfa62
YH
1836
1837 POSIX 1003.1g does not actually define this clearly
1838 at all. POSIX 1003.1g doesn't define a lot of things
1839 clearly however!
1840
1da177e4 1841 */
f55bb7f9
PE
1842
1843 sk_peek_offset_fwd(sk, size);
1844
1da177e4
LT
1845 if (UNIXCB(skb).fp)
1846 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1847 }
9f6f9af7 1848 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1da177e4
LT
1849
1850 scm_recv(sock, msg, siocb->scm, flags);
1851
1852out_free:
6eba6a37 1853 skb_free_datagram(sk, skb);
1da177e4 1854out_unlock:
57b47a53 1855 mutex_unlock(&u->readlock);
1da177e4
LT
1856out:
1857 return err;
1858}
1859
1860/*
1861 * Sleep until data has arrive. But check for races..
1862 */
ac7bfa62 1863
6eba6a37 1864static long unix_stream_data_wait(struct sock *sk, long timeo)
1da177e4
LT
1865{
1866 DEFINE_WAIT(wait);
1867
1c92b4e5 1868 unix_state_lock(sk);
1da177e4
LT
1869
1870 for (;;) {
aa395145 1871 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4 1872
b03efcfb 1873 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1da177e4
LT
1874 sk->sk_err ||
1875 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1876 signal_pending(current) ||
1877 !timeo)
1878 break;
1879
1880 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1c92b4e5 1881 unix_state_unlock(sk);
1da177e4 1882 timeo = schedule_timeout(timeo);
1c92b4e5 1883 unix_state_lock(sk);
1da177e4
LT
1884 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1885 }
1886
aa395145 1887 finish_wait(sk_sleep(sk), &wait);
1c92b4e5 1888 unix_state_unlock(sk);
1da177e4
LT
1889 return timeo;
1890}
1891
1892
1893
1894static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1895 struct msghdr *msg, size_t size,
1896 int flags)
1897{
1898 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1899 struct scm_cookie tmp_scm;
1900 struct sock *sk = sock->sk;
1901 struct unix_sock *u = unix_sk(sk);
e27dfcea 1902 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1903 int copied = 0;
1904 int check_creds = 0;
1905 int target;
1906 int err = 0;
1907 long timeo;
fc0d7536 1908 int skip;
1da177e4
LT
1909
1910 err = -EINVAL;
1911 if (sk->sk_state != TCP_ESTABLISHED)
1912 goto out;
1913
1914 err = -EOPNOTSUPP;
1915 if (flags&MSG_OOB)
1916 goto out;
1917
1918 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1919 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1920
1921 msg->msg_namelen = 0;
1922
1923 /* Lock the socket to prevent queue disordering
1924 * while sleeps in memcpy_tomsg
1925 */
1926
1927 if (!siocb->scm) {
1928 siocb->scm = &tmp_scm;
1929 memset(&tmp_scm, 0, sizeof(tmp_scm));
1930 }
1931
b3ca9b02
RW
1932 err = mutex_lock_interruptible(&u->readlock);
1933 if (err) {
1934 err = sock_intr_errno(timeo);
1935 goto out;
1936 }
1da177e4 1937
fc0d7536
PE
1938 skip = sk_peek_offset(sk, flags);
1939
6eba6a37 1940 do {
1da177e4
LT
1941 int chunk;
1942 struct sk_buff *skb;
1943
3c0d2f37 1944 unix_state_lock(sk);
6f01fd6e 1945 skb = skb_peek(&sk->sk_receive_queue);
fc0d7536 1946again:
6eba6a37 1947 if (skb == NULL) {
25888e30 1948 unix_sk(sk)->recursion_level = 0;
1da177e4 1949 if (copied >= target)
3c0d2f37 1950 goto unlock;
1da177e4
LT
1951
1952 /*
1953 * POSIX 1003.1g mandates this order.
1954 */
ac7bfa62 1955
6eba6a37
ED
1956 err = sock_error(sk);
1957 if (err)
3c0d2f37 1958 goto unlock;
1da177e4 1959 if (sk->sk_shutdown & RCV_SHUTDOWN)
3c0d2f37
MS
1960 goto unlock;
1961
1962 unix_state_unlock(sk);
1da177e4
LT
1963 err = -EAGAIN;
1964 if (!timeo)
1965 break;
57b47a53 1966 mutex_unlock(&u->readlock);
1da177e4
LT
1967
1968 timeo = unix_stream_data_wait(sk, timeo);
1969
b3ca9b02
RW
1970 if (signal_pending(current)
1971 || mutex_lock_interruptible(&u->readlock)) {
1da177e4
LT
1972 err = sock_intr_errno(timeo);
1973 goto out;
1974 }
b3ca9b02 1975
1da177e4 1976 continue;
3c0d2f37
MS
1977 unlock:
1978 unix_state_unlock(sk);
1979 break;
1da177e4 1980 }
fc0d7536
PE
1981
1982 if (skip >= skb->len) {
1983 skip -= skb->len;
1984 skb = skb_peek_next(skb, &sk->sk_receive_queue);
1985 goto again;
1986 }
1987
3c0d2f37 1988 unix_state_unlock(sk);
1da177e4
LT
1989
1990 if (check_creds) {
1991 /* Never glue messages from different writers */
7361c36c 1992 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
6b0ee8c0
EB
1993 !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
1994 !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
1da177e4 1995 break;
0e82e7f6 1996 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
1da177e4 1997 /* Copy credentials */
6b0ee8c0 1998 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1da177e4
LT
1999 check_creds = 1;
2000 }
2001
2002 /* Copy address just once */
6eba6a37 2003 if (sunaddr) {
1da177e4
LT
2004 unix_copy_addr(msg, skb->sk);
2005 sunaddr = NULL;
2006 }
2007
fc0d7536
PE
2008 chunk = min_t(unsigned int, skb->len - skip, size);
2009 if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
1da177e4
LT
2010 if (copied == 0)
2011 copied = -EFAULT;
2012 break;
2013 }
2014 copied += chunk;
2015 size -= chunk;
2016
2017 /* Mark read part of skb as used */
6eba6a37 2018 if (!(flags & MSG_PEEK)) {
1da177e4
LT
2019 skb_pull(skb, chunk);
2020
fc0d7536
PE
2021 sk_peek_offset_bwd(sk, chunk);
2022
1da177e4
LT
2023 if (UNIXCB(skb).fp)
2024 unix_detach_fds(siocb->scm, skb);
2025
6f01fd6e 2026 if (skb->len)
1da177e4 2027 break;
1da177e4 2028
6f01fd6e 2029 skb_unlink(skb, &sk->sk_receive_queue);
70d4bf6d 2030 consume_skb(skb);
1da177e4
LT
2031
2032 if (siocb->scm->fp)
2033 break;
6eba6a37 2034 } else {
1da177e4
LT
2035 /* It is questionable, see note in unix_dgram_recvmsg.
2036 */
2037 if (UNIXCB(skb).fp)
2038 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2039
fc0d7536
PE
2040 sk_peek_offset_fwd(sk, chunk);
2041
1da177e4
LT
2042 break;
2043 }
2044 } while (size);
2045
57b47a53 2046 mutex_unlock(&u->readlock);
1da177e4
LT
2047 scm_recv(sock, msg, siocb->scm, flags);
2048out:
2049 return copied ? : err;
2050}
2051
2052static int unix_shutdown(struct socket *sock, int mode)
2053{
2054 struct sock *sk = sock->sk;
2055 struct sock *other;
2056
fc61b928
XW
2057 if (mode < SHUT_RD || mode > SHUT_RDWR)
2058 return -EINVAL;
2059 /* This maps:
2060 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2061 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2062 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2063 */
2064 ++mode;
7180a031
AC
2065
2066 unix_state_lock(sk);
2067 sk->sk_shutdown |= mode;
2068 other = unix_peer(sk);
2069 if (other)
2070 sock_hold(other);
2071 unix_state_unlock(sk);
2072 sk->sk_state_change(sk);
2073
2074 if (other &&
2075 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2076
2077 int peer_mode = 0;
2078
2079 if (mode&RCV_SHUTDOWN)
2080 peer_mode |= SEND_SHUTDOWN;
2081 if (mode&SEND_SHUTDOWN)
2082 peer_mode |= RCV_SHUTDOWN;
2083 unix_state_lock(other);
2084 other->sk_shutdown |= peer_mode;
2085 unix_state_unlock(other);
2086 other->sk_state_change(other);
2087 if (peer_mode == SHUTDOWN_MASK)
2088 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2089 else if (peer_mode & RCV_SHUTDOWN)
2090 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1da177e4 2091 }
7180a031
AC
2092 if (other)
2093 sock_put(other);
2094
1da177e4
LT
2095 return 0;
2096}
2097
885ee74d
PE
2098long unix_inq_len(struct sock *sk)
2099{
2100 struct sk_buff *skb;
2101 long amount = 0;
2102
2103 if (sk->sk_state == TCP_LISTEN)
2104 return -EINVAL;
2105
2106 spin_lock(&sk->sk_receive_queue.lock);
2107 if (sk->sk_type == SOCK_STREAM ||
2108 sk->sk_type == SOCK_SEQPACKET) {
2109 skb_queue_walk(&sk->sk_receive_queue, skb)
2110 amount += skb->len;
2111 } else {
2112 skb = skb_peek(&sk->sk_receive_queue);
2113 if (skb)
2114 amount = skb->len;
2115 }
2116 spin_unlock(&sk->sk_receive_queue.lock);
2117
2118 return amount;
2119}
2120EXPORT_SYMBOL_GPL(unix_inq_len);
2121
2122long unix_outq_len(struct sock *sk)
2123{
2124 return sk_wmem_alloc_get(sk);
2125}
2126EXPORT_SYMBOL_GPL(unix_outq_len);
2127
1da177e4
LT
2128static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2129{
2130 struct sock *sk = sock->sk;
e27dfcea 2131 long amount = 0;
1da177e4
LT
2132 int err;
2133
6eba6a37
ED
2134 switch (cmd) {
2135 case SIOCOUTQ:
885ee74d 2136 amount = unix_outq_len(sk);
6eba6a37
ED
2137 err = put_user(amount, (int __user *)arg);
2138 break;
2139 case SIOCINQ:
885ee74d
PE
2140 amount = unix_inq_len(sk);
2141 if (amount < 0)
2142 err = amount;
2143 else
1da177e4 2144 err = put_user(amount, (int __user *)arg);
885ee74d 2145 break;
6eba6a37
ED
2146 default:
2147 err = -ENOIOCTLCMD;
2148 break;
1da177e4
LT
2149 }
2150 return err;
2151}
2152
6eba6a37 2153static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
1da177e4
LT
2154{
2155 struct sock *sk = sock->sk;
2156 unsigned int mask;
2157
aa395145 2158 sock_poll_wait(file, sk_sleep(sk), wait);
1da177e4
LT
2159 mask = 0;
2160
2161 /* exceptional events? */
2162 if (sk->sk_err)
2163 mask |= POLLERR;
2164 if (sk->sk_shutdown == SHUTDOWN_MASK)
2165 mask |= POLLHUP;
f348d70a 2166 if (sk->sk_shutdown & RCV_SHUTDOWN)
db40980f 2167 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
1da177e4
LT
2168
2169 /* readable? */
db40980f 2170 if (!skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
2171 mask |= POLLIN | POLLRDNORM;
2172
2173 /* Connection-based need to check for termination and startup */
6eba6a37
ED
2174 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2175 sk->sk_state == TCP_CLOSE)
1da177e4
LT
2176 mask |= POLLHUP;
2177
2178 /*
2179 * we set writable also when the other side has shut down the
2180 * connection. This prevents stuck sockets.
2181 */
2182 if (unix_writable(sk))
2183 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2184
2185 return mask;
2186}
2187
ec0d215f
RW
2188static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2189 poll_table *wait)
3c73419c 2190{
ec0d215f
RW
2191 struct sock *sk = sock->sk, *other;
2192 unsigned int mask, writable;
3c73419c 2193
aa395145 2194 sock_poll_wait(file, sk_sleep(sk), wait);
3c73419c
RW
2195 mask = 0;
2196
2197 /* exceptional events? */
2198 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
7d4c04fc 2199 mask |= POLLERR |
8facd5fb 2200 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
7d4c04fc 2201
3c73419c 2202 if (sk->sk_shutdown & RCV_SHUTDOWN)
5456f09a 2203 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
3c73419c
RW
2204 if (sk->sk_shutdown == SHUTDOWN_MASK)
2205 mask |= POLLHUP;
2206
2207 /* readable? */
5456f09a 2208 if (!skb_queue_empty(&sk->sk_receive_queue))
3c73419c
RW
2209 mask |= POLLIN | POLLRDNORM;
2210
2211 /* Connection-based need to check for termination and startup */
2212 if (sk->sk_type == SOCK_SEQPACKET) {
2213 if (sk->sk_state == TCP_CLOSE)
2214 mask |= POLLHUP;
2215 /* connection hasn't started yet? */
2216 if (sk->sk_state == TCP_SYN_SENT)
2217 return mask;
2218 }
2219
973a34aa 2220 /* No write status requested, avoid expensive OUT tests. */
626cf236 2221 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
973a34aa
ED
2222 return mask;
2223
ec0d215f 2224 writable = unix_writable(sk);
5456f09a
ED
2225 other = unix_peer_get(sk);
2226 if (other) {
2227 if (unix_peer(other) != sk) {
2228 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2229 if (unix_recvq_full(other))
2230 writable = 0;
ec0d215f 2231 }
5456f09a 2232 sock_put(other);
ec0d215f
RW
2233 }
2234
2235 if (writable)
3c73419c
RW
2236 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2237 else
2238 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2239
3c73419c
RW
2240 return mask;
2241}
1da177e4
LT
2242
2243#ifdef CONFIG_PROC_FS
a53eb3fe 2244
7123aaa3
ED
2245#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2246
2247#define get_bucket(x) ((x) >> BUCKET_SPACE)
2248#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2249#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
a53eb3fe 2250
7123aaa3 2251static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
1da177e4 2252{
7123aaa3
ED
2253 unsigned long offset = get_offset(*pos);
2254 unsigned long bucket = get_bucket(*pos);
2255 struct sock *sk;
2256 unsigned long count = 0;
1da177e4 2257
7123aaa3
ED
2258 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2259 if (sock_net(sk) != seq_file_net(seq))
097e66c5 2260 continue;
7123aaa3
ED
2261 if (++count == offset)
2262 break;
2263 }
2264
2265 return sk;
2266}
2267
2268static struct sock *unix_next_socket(struct seq_file *seq,
2269 struct sock *sk,
2270 loff_t *pos)
2271{
2272 unsigned long bucket;
2273
2274 while (sk > (struct sock *)SEQ_START_TOKEN) {
2275 sk = sk_next(sk);
2276 if (!sk)
2277 goto next_bucket;
2278 if (sock_net(sk) == seq_file_net(seq))
2279 return sk;
1da177e4 2280 }
7123aaa3
ED
2281
2282 do {
2283 sk = unix_from_bucket(seq, pos);
2284 if (sk)
2285 return sk;
2286
2287next_bucket:
2288 bucket = get_bucket(*pos) + 1;
2289 *pos = set_bucket_offset(bucket, 1);
2290 } while (bucket < ARRAY_SIZE(unix_socket_table));
2291
1da177e4
LT
2292 return NULL;
2293}
2294
1da177e4 2295static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2296 __acquires(unix_table_lock)
1da177e4 2297{
fbe9cc4a 2298 spin_lock(&unix_table_lock);
7123aaa3
ED
2299
2300 if (!*pos)
2301 return SEQ_START_TOKEN;
2302
2303 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2304 return NULL;
2305
2306 return unix_next_socket(seq, NULL, pos);
1da177e4
LT
2307}
2308
2309static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2310{
2311 ++*pos;
7123aaa3 2312 return unix_next_socket(seq, v, pos);
1da177e4
LT
2313}
2314
2315static void unix_seq_stop(struct seq_file *seq, void *v)
9a429c49 2316 __releases(unix_table_lock)
1da177e4 2317{
fbe9cc4a 2318 spin_unlock(&unix_table_lock);
1da177e4
LT
2319}
2320
2321static int unix_seq_show(struct seq_file *seq, void *v)
2322{
ac7bfa62 2323
b9f3124f 2324 if (v == SEQ_START_TOKEN)
1da177e4
LT
2325 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2326 "Inode Path\n");
2327 else {
2328 struct sock *s = v;
2329 struct unix_sock *u = unix_sk(s);
1c92b4e5 2330 unix_state_lock(s);
1da177e4 2331
71338aa7 2332 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
1da177e4
LT
2333 s,
2334 atomic_read(&s->sk_refcnt),
2335 0,
2336 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2337 s->sk_type,
2338 s->sk_socket ?
2339 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2340 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2341 sock_i_ino(s));
2342
2343 if (u->addr) {
2344 int i, len;
2345 seq_putc(seq, ' ');
2346
2347 i = 0;
2348 len = u->addr->len - sizeof(short);
2349 if (!UNIX_ABSTRACT(s))
2350 len--;
2351 else {
2352 seq_putc(seq, '@');
2353 i++;
2354 }
2355 for ( ; i < len; i++)
2356 seq_putc(seq, u->addr->name->sun_path[i]);
2357 }
1c92b4e5 2358 unix_state_unlock(s);
1da177e4
LT
2359 seq_putc(seq, '\n');
2360 }
2361
2362 return 0;
2363}
2364
56b3d975 2365static const struct seq_operations unix_seq_ops = {
1da177e4
LT
2366 .start = unix_seq_start,
2367 .next = unix_seq_next,
2368 .stop = unix_seq_stop,
2369 .show = unix_seq_show,
2370};
2371
1da177e4
LT
2372static int unix_seq_open(struct inode *inode, struct file *file)
2373{
e372c414 2374 return seq_open_net(inode, file, &unix_seq_ops,
8b51b064 2375 sizeof(struct seq_net_private));
1da177e4
LT
2376}
2377
da7071d7 2378static const struct file_operations unix_seq_fops = {
1da177e4
LT
2379 .owner = THIS_MODULE,
2380 .open = unix_seq_open,
2381 .read = seq_read,
2382 .llseek = seq_lseek,
e372c414 2383 .release = seq_release_net,
1da177e4
LT
2384};
2385
2386#endif
2387
ec1b4cf7 2388static const struct net_proto_family unix_family_ops = {
1da177e4
LT
2389 .family = PF_UNIX,
2390 .create = unix_create,
2391 .owner = THIS_MODULE,
2392};
2393
097e66c5 2394
2c8c1e72 2395static int __net_init unix_net_init(struct net *net)
097e66c5
DL
2396{
2397 int error = -ENOMEM;
2398
a0a53c8b 2399 net->unx.sysctl_max_dgram_qlen = 10;
1597fbc0
PE
2400 if (unix_sysctl_register(net))
2401 goto out;
d392e497 2402
097e66c5 2403#ifdef CONFIG_PROC_FS
d4beaa66 2404 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
1597fbc0 2405 unix_sysctl_unregister(net);
097e66c5 2406 goto out;
1597fbc0 2407 }
097e66c5
DL
2408#endif
2409 error = 0;
2410out:
48dcc33e 2411 return error;
097e66c5
DL
2412}
2413
2c8c1e72 2414static void __net_exit unix_net_exit(struct net *net)
097e66c5 2415{
1597fbc0 2416 unix_sysctl_unregister(net);
ece31ffd 2417 remove_proc_entry("unix", net->proc_net);
097e66c5
DL
2418}
2419
2420static struct pernet_operations unix_net_ops = {
2421 .init = unix_net_init,
2422 .exit = unix_net_exit,
2423};
2424
1da177e4
LT
2425static int __init af_unix_init(void)
2426{
2427 int rc = -1;
1da177e4 2428
b4fff5f8 2429 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4
LT
2430
2431 rc = proto_register(&unix_proto, 1);
ac7bfa62
YH
2432 if (rc != 0) {
2433 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
0dc47877 2434 __func__);
1da177e4
LT
2435 goto out;
2436 }
2437
2438 sock_register(&unix_family_ops);
097e66c5 2439 register_pernet_subsys(&unix_net_ops);
1da177e4
LT
2440out:
2441 return rc;
2442}
2443
2444static void __exit af_unix_exit(void)
2445{
2446 sock_unregister(PF_UNIX);
1da177e4 2447 proto_unregister(&unix_proto);
097e66c5 2448 unregister_pernet_subsys(&unix_net_ops);
1da177e4
LT
2449}
2450
3d366960
DW
2451/* Earlier than device_initcall() so that other drivers invoking
2452 request_module() don't end up in a loop when modprobe tries
2453 to use a UNIX socket. But later than subsys_initcall() because
2454 we depend on stuff initialised there */
2455fs_initcall(af_unix_init);
1da177e4
LT
2456module_exit(af_unix_exit);
2457
2458MODULE_LICENSE("GPL");
2459MODULE_ALIAS_NETPROTO(PF_UNIX);