af_unix: dont send SCM_CREDENTIALS by default
[deliverable/linux.git] / net / unix / af_unix.c
CommitLineData
1da177e4
LT
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
113aa838 4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
1da177e4
LT
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#include <linux/module.h>
1da177e4 84#include <linux/kernel.h>
1da177e4
LT
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
457c4cbc 104#include <net/net_namespace.h>
1da177e4 105#include <net/sock.h>
c752f073 106#include <net/tcp_states.h>
1da177e4
LT
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
1da177e4
LT
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
13111698
AB
118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119static DEFINE_SPINLOCK(unix_table_lock);
518de9b3 120static atomic_long_t unix_nr_socks;
1da177e4
LT
121
122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
877ce7c1 126#ifdef CONFIG_SECURITY_NETWORK
dc49c1f9 127static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1 128{
dc49c1f9 129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
877ce7c1
CZ
130}
131
132static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133{
dc49c1f9 134 scm->secid = *UNIXSID(skb);
877ce7c1
CZ
135}
136#else
dc49c1f9 137static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1
CZ
138{ }
139
140static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141{ }
142#endif /* CONFIG_SECURITY_NETWORK */
143
1da177e4
LT
144/*
145 * SMP locking strategy:
fbe9cc4a 146 * hash table is protected with spinlock unix_table_lock
663717f6 147 * each socket state is protected by separate spin lock.
1da177e4
LT
148 */
149
44bb9363 150static inline unsigned unix_hash_fold(__wsum n)
1da177e4 151{
44bb9363 152 unsigned hash = (__force unsigned)n;
1da177e4
LT
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156}
157
158#define unix_peer(sk) (unix_sk(sk)->peer)
159
160static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161{
162 return unix_peer(osk) == sk;
163}
164
165static inline int unix_may_send(struct sock *sk, struct sock *osk)
166{
6eba6a37 167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
1da177e4
LT
168}
169
3c73419c
RW
170static inline int unix_recvq_full(struct sock const *sk)
171{
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173}
174
1da177e4
LT
175static struct sock *unix_peer_get(struct sock *s)
176{
177 struct sock *peer;
178
1c92b4e5 179 unix_state_lock(s);
1da177e4
LT
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
1c92b4e5 183 unix_state_unlock(s);
1da177e4
LT
184 return peer;
185}
186
187static inline void unix_release_addr(struct unix_address *addr)
188{
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191}
192
193/*
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
198 */
ac7bfa62 199
6eba6a37 200static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
1da177e4
LT
201{
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207 /*
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
25985edc 210 * sun_path[108] doesn't as such exist. However in kernel space
1da177e4
LT
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
213 */
e27dfcea 214 ((char *)sunaddr)[len] = 0;
1da177e4
LT
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
07f0757a 219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
1da177e4
LT
220 return len;
221}
222
223static void __unix_remove_socket(struct sock *sk)
224{
225 sk_del_node_init(sk);
226}
227
228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229{
547b792c 230 WARN_ON(!sk_unhashed(sk));
1da177e4
LT
231 sk_add_node(sk, list);
232}
233
234static inline void unix_remove_socket(struct sock *sk)
235{
fbe9cc4a 236 spin_lock(&unix_table_lock);
1da177e4 237 __unix_remove_socket(sk);
fbe9cc4a 238 spin_unlock(&unix_table_lock);
1da177e4
LT
239}
240
241static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242{
fbe9cc4a 243 spin_lock(&unix_table_lock);
1da177e4 244 __unix_insert_socket(list, sk);
fbe9cc4a 245 spin_unlock(&unix_table_lock);
1da177e4
LT
246}
247
097e66c5
DL
248static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
1da177e4
LT
250 int len, int type, unsigned hash)
251{
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
878628fb 258 if (!net_eq(sock_net(s), net))
097e66c5
DL
259 continue;
260
1da177e4
LT
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266found:
267 return s;
268}
269
097e66c5
DL
270static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
1da177e4
LT
272 int len, int type,
273 unsigned hash)
274{
275 struct sock *s;
276
fbe9cc4a 277 spin_lock(&unix_table_lock);
097e66c5 278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
279 if (s)
280 sock_hold(s);
fbe9cc4a 281 spin_unlock(&unix_table_lock);
1da177e4
LT
282 return s;
283}
284
6616f788 285static struct sock *unix_find_socket_byinode(struct inode *i)
1da177e4
LT
286{
287 struct sock *s;
288 struct hlist_node *node;
289
fbe9cc4a 290 spin_lock(&unix_table_lock);
1da177e4
LT
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
6eba6a37 295 if (dentry && dentry->d_inode == i) {
1da177e4
LT
296 sock_hold(s);
297 goto found;
298 }
299 }
300 s = NULL;
301found:
fbe9cc4a 302 spin_unlock(&unix_table_lock);
1da177e4
LT
303 return s;
304}
305
306static inline int unix_writable(struct sock *sk)
307{
308 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
309}
310
311static void unix_write_space(struct sock *sk)
312{
43815482
ED
313 struct socket_wq *wq;
314
315 rcu_read_lock();
1da177e4 316 if (unix_writable(sk)) {
43815482
ED
317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq))
67426b75
ED
319 wake_up_interruptible_sync_poll(&wq->wait,
320 POLLOUT | POLLWRNORM | POLLWRBAND);
8d8ad9d7 321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4 322 }
43815482 323 rcu_read_unlock();
1da177e4
LT
324}
325
326/* When dgram socket disconnects (or changes its peer), we clear its receive
327 * queue of packets arrived from previous peer. First, it allows to do
328 * flow control based only on wmem_alloc; second, sk connected to peer
329 * may receive messages only from that peer. */
330static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331{
b03efcfb 332 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1da177e4
LT
333 skb_queue_purge(&sk->sk_receive_queue);
334 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335
336 /* If one link of bidirectional dgram pipe is disconnected,
337 * we signal error. Messages are lost. Do not make this,
338 * when peer was not connected to us.
339 */
340 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
341 other->sk_err = ECONNRESET;
342 other->sk_error_report(other);
343 }
344 }
345}
346
347static void unix_sock_destructor(struct sock *sk)
348{
349 struct unix_sock *u = unix_sk(sk);
350
351 skb_queue_purge(&sk->sk_receive_queue);
352
547b792c
IJ
353 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354 WARN_ON(!sk_unhashed(sk));
355 WARN_ON(sk->sk_socket);
1da177e4 356 if (!sock_flag(sk, SOCK_DEAD)) {
6b41e7dd 357 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
1da177e4
LT
358 return;
359 }
360
361 if (u->addr)
362 unix_release_addr(u->addr);
363
518de9b3 364 atomic_long_dec(&unix_nr_socks);
6f756a8c 365 local_bh_disable();
a8076d8d 366 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
6f756a8c 367 local_bh_enable();
1da177e4 368#ifdef UNIX_REFCNT_DEBUG
518de9b3
ED
369 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
370 atomic_long_read(&unix_nr_socks));
1da177e4
LT
371#endif
372}
373
6eba6a37 374static int unix_release_sock(struct sock *sk, int embrion)
1da177e4
LT
375{
376 struct unix_sock *u = unix_sk(sk);
377 struct dentry *dentry;
378 struct vfsmount *mnt;
379 struct sock *skpair;
380 struct sk_buff *skb;
381 int state;
382
383 unix_remove_socket(sk);
384
385 /* Clear state */
1c92b4e5 386 unix_state_lock(sk);
1da177e4
LT
387 sock_orphan(sk);
388 sk->sk_shutdown = SHUTDOWN_MASK;
389 dentry = u->dentry;
390 u->dentry = NULL;
391 mnt = u->mnt;
392 u->mnt = NULL;
393 state = sk->sk_state;
394 sk->sk_state = TCP_CLOSE;
1c92b4e5 395 unix_state_unlock(sk);
1da177e4
LT
396
397 wake_up_interruptible_all(&u->peer_wait);
398
e27dfcea 399 skpair = unix_peer(sk);
1da177e4 400
e27dfcea 401 if (skpair != NULL) {
1da177e4 402 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
1c92b4e5 403 unix_state_lock(skpair);
1da177e4
LT
404 /* No more writes */
405 skpair->sk_shutdown = SHUTDOWN_MASK;
406 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
407 skpair->sk_err = ECONNRESET;
1c92b4e5 408 unix_state_unlock(skpair);
1da177e4 409 skpair->sk_state_change(skpair);
8d8ad9d7 410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4
LT
411 }
412 sock_put(skpair); /* It may now die */
413 unix_peer(sk) = NULL;
414 }
415
416 /* Try to flush out this socket. Throw out buffers at least */
417
418 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
e27dfcea 419 if (state == TCP_LISTEN)
1da177e4
LT
420 unix_release_sock(skb->sk, 1);
421 /* passed fds are erased in the kfree_skb hook */
422 kfree_skb(skb);
423 }
424
425 if (dentry) {
426 dput(dentry);
427 mntput(mnt);
428 }
429
430 sock_put(sk);
431
432 /* ---- Socket is dead now and most probably destroyed ---- */
433
434 /*
435 * Fixme: BSD difference: In BSD all sockets connected to use get
436 * ECONNRESET and we die on the spot. In Linux we behave
437 * like files and pipes do and wait for the last
438 * dereference.
439 *
440 * Can't we simply set sock->err?
441 *
442 * What the above comment does talk about? --ANK(980817)
443 */
444
9305cfa4 445 if (unix_tot_inflight)
ac7bfa62 446 unix_gc(); /* Garbage collect fds */
1da177e4
LT
447
448 return 0;
449}
450
109f6e39
EB
451static void init_peercred(struct sock *sk)
452{
453 put_pid(sk->sk_peer_pid);
454 if (sk->sk_peer_cred)
455 put_cred(sk->sk_peer_cred);
456 sk->sk_peer_pid = get_pid(task_tgid(current));
457 sk->sk_peer_cred = get_current_cred();
458}
459
460static void copy_peercred(struct sock *sk, struct sock *peersk)
461{
462 put_pid(sk->sk_peer_pid);
463 if (sk->sk_peer_cred)
464 put_cred(sk->sk_peer_cred);
465 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
466 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
467}
468
1da177e4
LT
469static int unix_listen(struct socket *sock, int backlog)
470{
471 int err;
472 struct sock *sk = sock->sk;
473 struct unix_sock *u = unix_sk(sk);
109f6e39
EB
474 struct pid *old_pid = NULL;
475 const struct cred *old_cred = NULL;
1da177e4
LT
476
477 err = -EOPNOTSUPP;
6eba6a37
ED
478 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
479 goto out; /* Only stream/seqpacket sockets accept */
1da177e4
LT
480 err = -EINVAL;
481 if (!u->addr)
6eba6a37 482 goto out; /* No listens on an unbound socket */
1c92b4e5 483 unix_state_lock(sk);
1da177e4
LT
484 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
485 goto out_unlock;
486 if (backlog > sk->sk_max_ack_backlog)
487 wake_up_interruptible_all(&u->peer_wait);
488 sk->sk_max_ack_backlog = backlog;
489 sk->sk_state = TCP_LISTEN;
490 /* set credentials so connect can copy them */
109f6e39 491 init_peercred(sk);
1da177e4
LT
492 err = 0;
493
494out_unlock:
1c92b4e5 495 unix_state_unlock(sk);
109f6e39
EB
496 put_pid(old_pid);
497 if (old_cred)
498 put_cred(old_cred);
1da177e4
LT
499out:
500 return err;
501}
502
503static int unix_release(struct socket *);
504static int unix_bind(struct socket *, struct sockaddr *, int);
505static int unix_stream_connect(struct socket *, struct sockaddr *,
506 int addr_len, int flags);
507static int unix_socketpair(struct socket *, struct socket *);
508static int unix_accept(struct socket *, struct socket *, int);
509static int unix_getname(struct socket *, struct sockaddr *, int *, int);
510static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
ec0d215f
RW
511static unsigned int unix_dgram_poll(struct file *, struct socket *,
512 poll_table *);
1da177e4
LT
513static int unix_ioctl(struct socket *, unsigned int, unsigned long);
514static int unix_shutdown(struct socket *, int);
515static int unix_stream_sendmsg(struct kiocb *, struct socket *,
516 struct msghdr *, size_t);
517static int unix_stream_recvmsg(struct kiocb *, struct socket *,
518 struct msghdr *, size_t, int);
519static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
520 struct msghdr *, size_t);
521static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
522 struct msghdr *, size_t, int);
523static int unix_dgram_connect(struct socket *, struct sockaddr *,
524 int, int);
525static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
526 struct msghdr *, size_t);
a05d2ad1
EB
527static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
528 struct msghdr *, size_t, int);
1da177e4 529
90ddc4f0 530static const struct proto_ops unix_stream_ops = {
1da177e4
LT
531 .family = PF_UNIX,
532 .owner = THIS_MODULE,
533 .release = unix_release,
534 .bind = unix_bind,
535 .connect = unix_stream_connect,
536 .socketpair = unix_socketpair,
537 .accept = unix_accept,
538 .getname = unix_getname,
539 .poll = unix_poll,
540 .ioctl = unix_ioctl,
541 .listen = unix_listen,
542 .shutdown = unix_shutdown,
543 .setsockopt = sock_no_setsockopt,
544 .getsockopt = sock_no_getsockopt,
545 .sendmsg = unix_stream_sendmsg,
546 .recvmsg = unix_stream_recvmsg,
547 .mmap = sock_no_mmap,
548 .sendpage = sock_no_sendpage,
549};
550
90ddc4f0 551static const struct proto_ops unix_dgram_ops = {
1da177e4
LT
552 .family = PF_UNIX,
553 .owner = THIS_MODULE,
554 .release = unix_release,
555 .bind = unix_bind,
556 .connect = unix_dgram_connect,
557 .socketpair = unix_socketpair,
558 .accept = sock_no_accept,
559 .getname = unix_getname,
ec0d215f 560 .poll = unix_dgram_poll,
1da177e4
LT
561 .ioctl = unix_ioctl,
562 .listen = sock_no_listen,
563 .shutdown = unix_shutdown,
564 .setsockopt = sock_no_setsockopt,
565 .getsockopt = sock_no_getsockopt,
566 .sendmsg = unix_dgram_sendmsg,
567 .recvmsg = unix_dgram_recvmsg,
568 .mmap = sock_no_mmap,
569 .sendpage = sock_no_sendpage,
570};
571
90ddc4f0 572static const struct proto_ops unix_seqpacket_ops = {
1da177e4
LT
573 .family = PF_UNIX,
574 .owner = THIS_MODULE,
575 .release = unix_release,
576 .bind = unix_bind,
577 .connect = unix_stream_connect,
578 .socketpair = unix_socketpair,
579 .accept = unix_accept,
580 .getname = unix_getname,
ec0d215f 581 .poll = unix_dgram_poll,
1da177e4
LT
582 .ioctl = unix_ioctl,
583 .listen = unix_listen,
584 .shutdown = unix_shutdown,
585 .setsockopt = sock_no_setsockopt,
586 .getsockopt = sock_no_getsockopt,
587 .sendmsg = unix_seqpacket_sendmsg,
a05d2ad1 588 .recvmsg = unix_seqpacket_recvmsg,
1da177e4
LT
589 .mmap = sock_no_mmap,
590 .sendpage = sock_no_sendpage,
591};
592
593static struct proto unix_proto = {
248969ae
ED
594 .name = "UNIX",
595 .owner = THIS_MODULE,
248969ae 596 .obj_size = sizeof(struct unix_sock),
1da177e4
LT
597};
598
a09785a2
IM
599/*
600 * AF_UNIX sockets do not interact with hardware, hence they
601 * dont trigger interrupts - so it's safe for them to have
602 * bh-unsafe locking for their sk_receive_queue.lock. Split off
603 * this special lock-class by reinitializing the spinlock key:
604 */
605static struct lock_class_key af_unix_sk_receive_queue_lock_key;
606
6eba6a37 607static struct sock *unix_create1(struct net *net, struct socket *sock)
1da177e4
LT
608{
609 struct sock *sk = NULL;
610 struct unix_sock *u;
611
518de9b3
ED
612 atomic_long_inc(&unix_nr_socks);
613 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
1da177e4
LT
614 goto out;
615
6257ff21 616 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
1da177e4
LT
617 if (!sk)
618 goto out;
619
6eba6a37 620 sock_init_data(sock, sk);
a09785a2
IM
621 lockdep_set_class(&sk->sk_receive_queue.lock,
622 &af_unix_sk_receive_queue_lock_key);
1da177e4
LT
623
624 sk->sk_write_space = unix_write_space;
a0a53c8b 625 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
1da177e4
LT
626 sk->sk_destruct = unix_sock_destructor;
627 u = unix_sk(sk);
628 u->dentry = NULL;
629 u->mnt = NULL;
fd19f329 630 spin_lock_init(&u->lock);
516e0cc5 631 atomic_long_set(&u->inflight, 0);
1fd05ba5 632 INIT_LIST_HEAD(&u->link);
57b47a53 633 mutex_init(&u->readlock); /* single task reading lock */
1da177e4
LT
634 init_waitqueue_head(&u->peer_wait);
635 unix_insert_socket(unix_sockets_unbound, sk);
636out:
284b327b 637 if (sk == NULL)
518de9b3 638 atomic_long_dec(&unix_nr_socks);
920de804
ED
639 else {
640 local_bh_disable();
a8076d8d 641 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
920de804
ED
642 local_bh_enable();
643 }
1da177e4
LT
644 return sk;
645}
646
3f378b68
EP
647static int unix_create(struct net *net, struct socket *sock, int protocol,
648 int kern)
1da177e4
LT
649{
650 if (protocol && protocol != PF_UNIX)
651 return -EPROTONOSUPPORT;
652
653 sock->state = SS_UNCONNECTED;
654
655 switch (sock->type) {
656 case SOCK_STREAM:
657 sock->ops = &unix_stream_ops;
658 break;
659 /*
660 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
661 * nothing uses it.
662 */
663 case SOCK_RAW:
e27dfcea 664 sock->type = SOCK_DGRAM;
1da177e4
LT
665 case SOCK_DGRAM:
666 sock->ops = &unix_dgram_ops;
667 break;
668 case SOCK_SEQPACKET:
669 sock->ops = &unix_seqpacket_ops;
670 break;
671 default:
672 return -ESOCKTNOSUPPORT;
673 }
674
1b8d7ae4 675 return unix_create1(net, sock) ? 0 : -ENOMEM;
1da177e4
LT
676}
677
678static int unix_release(struct socket *sock)
679{
680 struct sock *sk = sock->sk;
681
682 if (!sk)
683 return 0;
684
685 sock->sk = NULL;
686
6eba6a37 687 return unix_release_sock(sk, 0);
1da177e4
LT
688}
689
690static int unix_autobind(struct socket *sock)
691{
692 struct sock *sk = sock->sk;
3b1e0a65 693 struct net *net = sock_net(sk);
1da177e4
LT
694 struct unix_sock *u = unix_sk(sk);
695 static u32 ordernum = 1;
6eba6a37 696 struct unix_address *addr;
1da177e4 697 int err;
8df73ff9 698 unsigned int retries = 0;
1da177e4 699
57b47a53 700 mutex_lock(&u->readlock);
1da177e4
LT
701
702 err = 0;
703 if (u->addr)
704 goto out;
705
706 err = -ENOMEM;
0da974f4 707 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
1da177e4
LT
708 if (!addr)
709 goto out;
710
1da177e4
LT
711 addr->name->sun_family = AF_UNIX;
712 atomic_set(&addr->refcnt, 1);
713
714retry:
715 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
07f0757a 716 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
1da177e4 717
fbe9cc4a 718 spin_lock(&unix_table_lock);
1da177e4
LT
719 ordernum = (ordernum+1)&0xFFFFF;
720
097e66c5 721 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
1da177e4 722 addr->hash)) {
fbe9cc4a 723 spin_unlock(&unix_table_lock);
8df73ff9
TH
724 /*
725 * __unix_find_socket_byname() may take long time if many names
726 * are already in use.
727 */
728 cond_resched();
729 /* Give up if all names seems to be in use. */
730 if (retries++ == 0xFFFFF) {
731 err = -ENOSPC;
732 kfree(addr);
733 goto out;
734 }
1da177e4
LT
735 goto retry;
736 }
737 addr->hash ^= sk->sk_type;
738
739 __unix_remove_socket(sk);
740 u->addr = addr;
741 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
fbe9cc4a 742 spin_unlock(&unix_table_lock);
1da177e4
LT
743 err = 0;
744
57b47a53 745out: mutex_unlock(&u->readlock);
1da177e4
LT
746 return err;
747}
748
097e66c5
DL
749static struct sock *unix_find_other(struct net *net,
750 struct sockaddr_un *sunname, int len,
1da177e4
LT
751 int type, unsigned hash, int *error)
752{
753 struct sock *u;
421748ec 754 struct path path;
1da177e4 755 int err = 0;
ac7bfa62 756
1da177e4 757 if (sunname->sun_path[0]) {
421748ec
AV
758 struct inode *inode;
759 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
1da177e4
LT
760 if (err)
761 goto fail;
421748ec
AV
762 inode = path.dentry->d_inode;
763 err = inode_permission(inode, MAY_WRITE);
1da177e4
LT
764 if (err)
765 goto put_fail;
766
767 err = -ECONNREFUSED;
421748ec 768 if (!S_ISSOCK(inode->i_mode))
1da177e4 769 goto put_fail;
6616f788 770 u = unix_find_socket_byinode(inode);
1da177e4
LT
771 if (!u)
772 goto put_fail;
773
774 if (u->sk_type == type)
421748ec 775 touch_atime(path.mnt, path.dentry);
1da177e4 776
421748ec 777 path_put(&path);
1da177e4 778
e27dfcea 779 err = -EPROTOTYPE;
1da177e4
LT
780 if (u->sk_type != type) {
781 sock_put(u);
782 goto fail;
783 }
784 } else {
785 err = -ECONNREFUSED;
e27dfcea 786 u = unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
787 if (u) {
788 struct dentry *dentry;
789 dentry = unix_sk(u)->dentry;
790 if (dentry)
791 touch_atime(unix_sk(u)->mnt, dentry);
792 } else
793 goto fail;
794 }
795 return u;
796
797put_fail:
421748ec 798 path_put(&path);
1da177e4 799fail:
e27dfcea 800 *error = err;
1da177e4
LT
801 return NULL;
802}
803
804
805static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
806{
807 struct sock *sk = sock->sk;
3b1e0a65 808 struct net *net = sock_net(sk);
1da177e4 809 struct unix_sock *u = unix_sk(sk);
e27dfcea 810 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
dae6ad8f 811 char *sun_path = sunaddr->sun_path;
6eba6a37 812 struct dentry *dentry = NULL;
dae6ad8f 813 struct path path;
1da177e4
LT
814 int err;
815 unsigned hash;
816 struct unix_address *addr;
817 struct hlist_head *list;
818
819 err = -EINVAL;
820 if (sunaddr->sun_family != AF_UNIX)
821 goto out;
822
e27dfcea 823 if (addr_len == sizeof(short)) {
1da177e4
LT
824 err = unix_autobind(sock);
825 goto out;
826 }
827
828 err = unix_mkname(sunaddr, addr_len, &hash);
829 if (err < 0)
830 goto out;
831 addr_len = err;
832
57b47a53 833 mutex_lock(&u->readlock);
1da177e4
LT
834
835 err = -EINVAL;
836 if (u->addr)
837 goto out_up;
838
839 err = -ENOMEM;
840 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
841 if (!addr)
842 goto out_up;
843
844 memcpy(addr->name, sunaddr, addr_len);
845 addr->len = addr_len;
846 addr->hash = hash ^ sk->sk_type;
847 atomic_set(&addr->refcnt, 1);
848
dae6ad8f 849 if (sun_path[0]) {
1da177e4
LT
850 unsigned int mode;
851 err = 0;
852 /*
853 * Get the parent directory, calculate the hash for last
854 * component.
855 */
dae6ad8f 856 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
1da177e4
LT
857 err = PTR_ERR(dentry);
858 if (IS_ERR(dentry))
dae6ad8f 859 goto out_mknod_parent;
f81a0bff 860
1da177e4
LT
861 /*
862 * All right, let's create it.
863 */
864 mode = S_IFSOCK |
ce3b0f8d 865 (SOCK_INODE(sock)->i_mode & ~current_umask());
dae6ad8f 866 err = mnt_want_write(path.mnt);
463c3197
DH
867 if (err)
868 goto out_mknod_dput;
dae6ad8f 869 err = security_path_mknod(&path, dentry, mode, 0);
be6d3e56
KT
870 if (err)
871 goto out_mknod_drop_write;
dae6ad8f 872 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
be6d3e56 873out_mknod_drop_write:
dae6ad8f 874 mnt_drop_write(path.mnt);
1da177e4
LT
875 if (err)
876 goto out_mknod_dput;
dae6ad8f
AV
877 mutex_unlock(&path.dentry->d_inode->i_mutex);
878 dput(path.dentry);
879 path.dentry = dentry;
1da177e4
LT
880
881 addr->hash = UNIX_HASH_SIZE;
882 }
883
fbe9cc4a 884 spin_lock(&unix_table_lock);
1da177e4 885
dae6ad8f 886 if (!sun_path[0]) {
1da177e4 887 err = -EADDRINUSE;
097e66c5 888 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1da177e4
LT
889 sk->sk_type, hash)) {
890 unix_release_addr(addr);
891 goto out_unlock;
892 }
893
894 list = &unix_socket_table[addr->hash];
895 } else {
896 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
dae6ad8f
AV
897 u->dentry = path.dentry;
898 u->mnt = path.mnt;
1da177e4
LT
899 }
900
901 err = 0;
902 __unix_remove_socket(sk);
903 u->addr = addr;
904 __unix_insert_socket(list, sk);
905
906out_unlock:
fbe9cc4a 907 spin_unlock(&unix_table_lock);
1da177e4 908out_up:
57b47a53 909 mutex_unlock(&u->readlock);
1da177e4
LT
910out:
911 return err;
912
913out_mknod_dput:
914 dput(dentry);
dae6ad8f
AV
915 mutex_unlock(&path.dentry->d_inode->i_mutex);
916 path_put(&path);
1da177e4 917out_mknod_parent:
e27dfcea
JK
918 if (err == -EEXIST)
919 err = -EADDRINUSE;
1da177e4
LT
920 unix_release_addr(addr);
921 goto out_up;
922}
923
278a3de5
DM
924static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
925{
926 if (unlikely(sk1 == sk2) || !sk2) {
927 unix_state_lock(sk1);
928 return;
929 }
930 if (sk1 < sk2) {
931 unix_state_lock(sk1);
932 unix_state_lock_nested(sk2);
933 } else {
934 unix_state_lock(sk2);
935 unix_state_lock_nested(sk1);
936 }
937}
938
939static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
940{
941 if (unlikely(sk1 == sk2) || !sk2) {
942 unix_state_unlock(sk1);
943 return;
944 }
945 unix_state_unlock(sk1);
946 unix_state_unlock(sk2);
947}
948
1da177e4
LT
949static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
950 int alen, int flags)
951{
952 struct sock *sk = sock->sk;
3b1e0a65 953 struct net *net = sock_net(sk);
e27dfcea 954 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1da177e4
LT
955 struct sock *other;
956 unsigned hash;
957 int err;
958
959 if (addr->sa_family != AF_UNSPEC) {
960 err = unix_mkname(sunaddr, alen, &hash);
961 if (err < 0)
962 goto out;
963 alen = err;
964
965 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
966 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
967 goto out;
968
278a3de5 969restart:
e27dfcea 970 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1da177e4
LT
971 if (!other)
972 goto out;
973
278a3de5
DM
974 unix_state_double_lock(sk, other);
975
976 /* Apparently VFS overslept socket death. Retry. */
977 if (sock_flag(other, SOCK_DEAD)) {
978 unix_state_double_unlock(sk, other);
979 sock_put(other);
980 goto restart;
981 }
1da177e4
LT
982
983 err = -EPERM;
984 if (!unix_may_send(sk, other))
985 goto out_unlock;
986
987 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
988 if (err)
989 goto out_unlock;
990
991 } else {
992 /*
993 * 1003.1g breaking connected state with AF_UNSPEC
994 */
995 other = NULL;
278a3de5 996 unix_state_double_lock(sk, other);
1da177e4
LT
997 }
998
999 /*
1000 * If it was connected, reconnect.
1001 */
1002 if (unix_peer(sk)) {
1003 struct sock *old_peer = unix_peer(sk);
e27dfcea 1004 unix_peer(sk) = other;
278a3de5 1005 unix_state_double_unlock(sk, other);
1da177e4
LT
1006
1007 if (other != old_peer)
1008 unix_dgram_disconnected(sk, old_peer);
1009 sock_put(old_peer);
1010 } else {
e27dfcea 1011 unix_peer(sk) = other;
278a3de5 1012 unix_state_double_unlock(sk, other);
1da177e4 1013 }
ac7bfa62 1014 return 0;
1da177e4
LT
1015
1016out_unlock:
278a3de5 1017 unix_state_double_unlock(sk, other);
1da177e4
LT
1018 sock_put(other);
1019out:
1020 return err;
1021}
1022
1023static long unix_wait_for_peer(struct sock *other, long timeo)
1024{
1025 struct unix_sock *u = unix_sk(other);
1026 int sched;
1027 DEFINE_WAIT(wait);
1028
1029 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1030
1031 sched = !sock_flag(other, SOCK_DEAD) &&
1032 !(other->sk_shutdown & RCV_SHUTDOWN) &&
3c73419c 1033 unix_recvq_full(other);
1da177e4 1034
1c92b4e5 1035 unix_state_unlock(other);
1da177e4
LT
1036
1037 if (sched)
1038 timeo = schedule_timeout(timeo);
1039
1040 finish_wait(&u->peer_wait, &wait);
1041 return timeo;
1042}
1043
1044static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1045 int addr_len, int flags)
1046{
e27dfcea 1047 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1da177e4 1048 struct sock *sk = sock->sk;
3b1e0a65 1049 struct net *net = sock_net(sk);
1da177e4
LT
1050 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1051 struct sock *newsk = NULL;
1052 struct sock *other = NULL;
1053 struct sk_buff *skb = NULL;
1054 unsigned hash;
1055 int st;
1056 int err;
1057 long timeo;
1058
1059 err = unix_mkname(sunaddr, addr_len, &hash);
1060 if (err < 0)
1061 goto out;
1062 addr_len = err;
1063
f64f9e71
JP
1064 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1065 (err = unix_autobind(sock)) != 0)
1da177e4
LT
1066 goto out;
1067
1068 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1069
1070 /* First of all allocate resources.
1071 If we will make it after state is locked,
1072 we will have to recheck all again in any case.
1073 */
1074
1075 err = -ENOMEM;
1076
1077 /* create new sock for complete connection */
3b1e0a65 1078 newsk = unix_create1(sock_net(sk), NULL);
1da177e4
LT
1079 if (newsk == NULL)
1080 goto out;
1081
1082 /* Allocate skb for sending to listening sock */
1083 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1084 if (skb == NULL)
1085 goto out;
1086
1087restart:
1088 /* Find listening sock. */
097e66c5 1089 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1da177e4
LT
1090 if (!other)
1091 goto out;
1092
1093 /* Latch state of peer */
1c92b4e5 1094 unix_state_lock(other);
1da177e4
LT
1095
1096 /* Apparently VFS overslept socket death. Retry. */
1097 if (sock_flag(other, SOCK_DEAD)) {
1c92b4e5 1098 unix_state_unlock(other);
1da177e4
LT
1099 sock_put(other);
1100 goto restart;
1101 }
1102
1103 err = -ECONNREFUSED;
1104 if (other->sk_state != TCP_LISTEN)
1105 goto out_unlock;
77238f2b
TS
1106 if (other->sk_shutdown & RCV_SHUTDOWN)
1107 goto out_unlock;
1da177e4 1108
3c73419c 1109 if (unix_recvq_full(other)) {
1da177e4
LT
1110 err = -EAGAIN;
1111 if (!timeo)
1112 goto out_unlock;
1113
1114 timeo = unix_wait_for_peer(other, timeo);
1115
1116 err = sock_intr_errno(timeo);
1117 if (signal_pending(current))
1118 goto out;
1119 sock_put(other);
1120 goto restart;
ac7bfa62 1121 }
1da177e4
LT
1122
1123 /* Latch our state.
1124
e5537bfc 1125 It is tricky place. We need to grab our state lock and cannot
1da177e4
LT
1126 drop lock on peer. It is dangerous because deadlock is
1127 possible. Connect to self case and simultaneous
1128 attempt to connect are eliminated by checking socket
1129 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1130 check this before attempt to grab lock.
1131
1132 Well, and we have to recheck the state after socket locked.
1133 */
1134 st = sk->sk_state;
1135
1136 switch (st) {
1137 case TCP_CLOSE:
1138 /* This is ok... continue with connect */
1139 break;
1140 case TCP_ESTABLISHED:
1141 /* Socket is already connected */
1142 err = -EISCONN;
1143 goto out_unlock;
1144 default:
1145 err = -EINVAL;
1146 goto out_unlock;
1147 }
1148
1c92b4e5 1149 unix_state_lock_nested(sk);
1da177e4
LT
1150
1151 if (sk->sk_state != st) {
1c92b4e5
DM
1152 unix_state_unlock(sk);
1153 unix_state_unlock(other);
1da177e4
LT
1154 sock_put(other);
1155 goto restart;
1156 }
1157
3610cda5 1158 err = security_unix_stream_connect(sk, other, newsk);
1da177e4 1159 if (err) {
1c92b4e5 1160 unix_state_unlock(sk);
1da177e4
LT
1161 goto out_unlock;
1162 }
1163
1164 /* The way is open! Fastly set all the necessary fields... */
1165
1166 sock_hold(sk);
1167 unix_peer(newsk) = sk;
1168 newsk->sk_state = TCP_ESTABLISHED;
1169 newsk->sk_type = sk->sk_type;
109f6e39 1170 init_peercred(newsk);
1da177e4 1171 newu = unix_sk(newsk);
eaefd110 1172 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1da177e4
LT
1173 otheru = unix_sk(other);
1174
1175 /* copy address information from listening to new sock*/
1176 if (otheru->addr) {
1177 atomic_inc(&otheru->addr->refcnt);
1178 newu->addr = otheru->addr;
1179 }
1180 if (otheru->dentry) {
1181 newu->dentry = dget(otheru->dentry);
1182 newu->mnt = mntget(otheru->mnt);
1183 }
1184
1185 /* Set credentials */
109f6e39 1186 copy_peercred(sk, other);
1da177e4 1187
1da177e4
LT
1188 sock->state = SS_CONNECTED;
1189 sk->sk_state = TCP_ESTABLISHED;
830a1e5c
BL
1190 sock_hold(newsk);
1191
1192 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1193 unix_peer(sk) = newsk;
1da177e4 1194
1c92b4e5 1195 unix_state_unlock(sk);
1da177e4
LT
1196
1197 /* take ten and and send info to listening sock */
1198 spin_lock(&other->sk_receive_queue.lock);
1199 __skb_queue_tail(&other->sk_receive_queue, skb);
1da177e4 1200 spin_unlock(&other->sk_receive_queue.lock);
1c92b4e5 1201 unix_state_unlock(other);
1da177e4
LT
1202 other->sk_data_ready(other, 0);
1203 sock_put(other);
1204 return 0;
1205
1206out_unlock:
1207 if (other)
1c92b4e5 1208 unix_state_unlock(other);
1da177e4
LT
1209
1210out:
40d44446 1211 kfree_skb(skb);
1da177e4
LT
1212 if (newsk)
1213 unix_release_sock(newsk, 0);
1214 if (other)
1215 sock_put(other);
1216 return err;
1217}
1218
1219static int unix_socketpair(struct socket *socka, struct socket *sockb)
1220{
e27dfcea 1221 struct sock *ska = socka->sk, *skb = sockb->sk;
1da177e4
LT
1222
1223 /* Join our sockets back to back */
1224 sock_hold(ska);
1225 sock_hold(skb);
e27dfcea
JK
1226 unix_peer(ska) = skb;
1227 unix_peer(skb) = ska;
109f6e39
EB
1228 init_peercred(ska);
1229 init_peercred(skb);
1da177e4
LT
1230
1231 if (ska->sk_type != SOCK_DGRAM) {
1232 ska->sk_state = TCP_ESTABLISHED;
1233 skb->sk_state = TCP_ESTABLISHED;
1234 socka->state = SS_CONNECTED;
1235 sockb->state = SS_CONNECTED;
1236 }
1237 return 0;
1238}
1239
1240static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1241{
1242 struct sock *sk = sock->sk;
1243 struct sock *tsk;
1244 struct sk_buff *skb;
1245 int err;
1246
1247 err = -EOPNOTSUPP;
6eba6a37 1248 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1da177e4
LT
1249 goto out;
1250
1251 err = -EINVAL;
1252 if (sk->sk_state != TCP_LISTEN)
1253 goto out;
1254
1255 /* If socket state is TCP_LISTEN it cannot change (for now...),
1256 * so that no locks are necessary.
1257 */
1258
1259 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1260 if (!skb) {
1261 /* This means receive shutdown. */
1262 if (err == 0)
1263 err = -EINVAL;
1264 goto out;
1265 }
1266
1267 tsk = skb->sk;
1268 skb_free_datagram(sk, skb);
1269 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1270
1271 /* attach accepted sock to socket */
1c92b4e5 1272 unix_state_lock(tsk);
1da177e4
LT
1273 newsock->state = SS_CONNECTED;
1274 sock_graft(tsk, newsock);
1c92b4e5 1275 unix_state_unlock(tsk);
1da177e4
LT
1276 return 0;
1277
1278out:
1279 return err;
1280}
1281
1282
1283static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1284{
1285 struct sock *sk = sock->sk;
1286 struct unix_sock *u;
13cfa97b 1287 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1da177e4
LT
1288 int err = 0;
1289
1290 if (peer) {
1291 sk = unix_peer_get(sk);
1292
1293 err = -ENOTCONN;
1294 if (!sk)
1295 goto out;
1296 err = 0;
1297 } else {
1298 sock_hold(sk);
1299 }
1300
1301 u = unix_sk(sk);
1c92b4e5 1302 unix_state_lock(sk);
1da177e4
LT
1303 if (!u->addr) {
1304 sunaddr->sun_family = AF_UNIX;
1305 sunaddr->sun_path[0] = 0;
1306 *uaddr_len = sizeof(short);
1307 } else {
1308 struct unix_address *addr = u->addr;
1309
1310 *uaddr_len = addr->len;
1311 memcpy(sunaddr, addr->name, *uaddr_len);
1312 }
1c92b4e5 1313 unix_state_unlock(sk);
1da177e4
LT
1314 sock_put(sk);
1315out:
1316 return err;
1317}
1318
1319static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1320{
1321 int i;
1322
1323 scm->fp = UNIXCB(skb).fp;
1da177e4
LT
1324 UNIXCB(skb).fp = NULL;
1325
6eba6a37 1326 for (i = scm->fp->count-1; i >= 0; i--)
1da177e4
LT
1327 unix_notinflight(scm->fp->fp[i]);
1328}
1329
7361c36c 1330static void unix_destruct_scm(struct sk_buff *skb)
1da177e4
LT
1331{
1332 struct scm_cookie scm;
1333 memset(&scm, 0, sizeof(scm));
7361c36c
EB
1334 scm.pid = UNIXCB(skb).pid;
1335 scm.cred = UNIXCB(skb).cred;
1336 if (UNIXCB(skb).fp)
1337 unix_detach_fds(&scm, skb);
1da177e4
LT
1338
1339 /* Alas, it calls VFS */
1340 /* So fscking what? fput() had been SMP-safe since the last Summer */
1341 scm_destroy(&scm);
1342 sock_wfree(skb);
1343}
1344
25888e30
ED
1345#define MAX_RECURSION_LEVEL 4
1346
6209344f 1347static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1da177e4
LT
1348{
1349 int i;
25888e30
ED
1350 unsigned char max_level = 0;
1351 int unix_sock_count = 0;
1352
1353 for (i = scm->fp->count - 1; i >= 0; i--) {
1354 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1355
1356 if (sk) {
1357 unix_sock_count++;
1358 max_level = max(max_level,
1359 unix_sk(sk)->recursion_level);
1360 }
1361 }
1362 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1363 return -ETOOMANYREFS;
6209344f
MS
1364
1365 /*
1366 * Need to duplicate file references for the sake of garbage
1367 * collection. Otherwise a socket in the fps might become a
1368 * candidate for GC while the skb is not yet queued.
1369 */
1370 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1371 if (!UNIXCB(skb).fp)
1372 return -ENOMEM;
1373
25888e30
ED
1374 if (unix_sock_count) {
1375 for (i = scm->fp->count - 1; i >= 0; i--)
1376 unix_inflight(scm->fp->fp[i]);
1377 }
1378 return max_level;
1da177e4
LT
1379}
1380
f78a5fda 1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
7361c36c
EB
1382{
1383 int err = 0;
16e57262 1384
f78a5fda 1385 UNIXCB(skb).pid = get_pid(scm->pid);
16e57262
ED
1386 if (scm->cred)
1387 UNIXCB(skb).cred = get_cred(scm->cred);
7361c36c
EB
1388 UNIXCB(skb).fp = NULL;
1389 if (scm->fp && send_fds)
1390 err = unix_attach_fds(scm, skb);
1391
1392 skb->destructor = unix_destruct_scm;
1393 return err;
1394}
1395
16e57262
ED
1396/*
1397 * Some apps rely on write() giving SCM_CREDENTIALS
1398 * We include credentials if source or destination socket
1399 * asserted SOCK_PASSCRED.
1400 */
1401static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1402 const struct sock *other)
1403{
1404 if (UNIXCB(skb).cred)
1405 return;
1406 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1407 !other->sk_socket ||
1408 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1409 UNIXCB(skb).pid = get_pid(task_tgid(current));
1410 UNIXCB(skb).cred = get_current_cred();
1411 }
1412}
1413
1da177e4
LT
1414/*
1415 * Send AF_UNIX data.
1416 */
1417
1418static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1419 struct msghdr *msg, size_t len)
1420{
1421 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1422 struct sock *sk = sock->sk;
3b1e0a65 1423 struct net *net = sock_net(sk);
1da177e4 1424 struct unix_sock *u = unix_sk(sk);
e27dfcea 1425 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1426 struct sock *other = NULL;
1427 int namelen = 0; /* fake GCC */
1428 int err;
1429 unsigned hash;
f78a5fda 1430 struct sk_buff *skb;
1da177e4
LT
1431 long timeo;
1432 struct scm_cookie tmp_scm;
25888e30 1433 int max_level;
1da177e4
LT
1434
1435 if (NULL == siocb->scm)
1436 siocb->scm = &tmp_scm;
5f23b734 1437 wait_for_unix_gc();
1da177e4
LT
1438 err = scm_send(sock, msg, siocb->scm);
1439 if (err < 0)
1440 return err;
1441
1442 err = -EOPNOTSUPP;
1443 if (msg->msg_flags&MSG_OOB)
1444 goto out;
1445
1446 if (msg->msg_namelen) {
1447 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1448 if (err < 0)
1449 goto out;
1450 namelen = err;
1451 } else {
1452 sunaddr = NULL;
1453 err = -ENOTCONN;
1454 other = unix_peer_get(sk);
1455 if (!other)
1456 goto out;
1457 }
1458
f64f9e71
JP
1459 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1460 && (err = unix_autobind(sock)) != 0)
1da177e4
LT
1461 goto out;
1462
1463 err = -EMSGSIZE;
1464 if (len > sk->sk_sndbuf - 32)
1465 goto out;
1466
1467 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
e27dfcea 1468 if (skb == NULL)
1da177e4
LT
1469 goto out;
1470
f78a5fda 1471 err = unix_scm_to_skb(siocb->scm, skb, true);
25888e30 1472 if (err < 0)
7361c36c 1473 goto out_free;
25888e30 1474 max_level = err + 1;
dc49c1f9 1475 unix_get_secdata(siocb->scm, skb);
877ce7c1 1476
badff6d0 1477 skb_reset_transport_header(skb);
6eba6a37 1478 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1da177e4
LT
1479 if (err)
1480 goto out_free;
1481
1482 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1483
1484restart:
1485 if (!other) {
1486 err = -ECONNRESET;
1487 if (sunaddr == NULL)
1488 goto out_free;
1489
097e66c5 1490 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1da177e4 1491 hash, &err);
e27dfcea 1492 if (other == NULL)
1da177e4
LT
1493 goto out_free;
1494 }
1495
d6ae3bae
AC
1496 if (sk_filter(other, skb) < 0) {
1497 /* Toss the packet but do not return any error to the sender */
1498 err = len;
1499 goto out_free;
1500 }
1501
1c92b4e5 1502 unix_state_lock(other);
1da177e4
LT
1503 err = -EPERM;
1504 if (!unix_may_send(sk, other))
1505 goto out_unlock;
1506
1507 if (sock_flag(other, SOCK_DEAD)) {
1508 /*
1509 * Check with 1003.1g - what should
1510 * datagram error
1511 */
1c92b4e5 1512 unix_state_unlock(other);
1da177e4
LT
1513 sock_put(other);
1514
1515 err = 0;
1c92b4e5 1516 unix_state_lock(sk);
1da177e4 1517 if (unix_peer(sk) == other) {
e27dfcea 1518 unix_peer(sk) = NULL;
1c92b4e5 1519 unix_state_unlock(sk);
1da177e4
LT
1520
1521 unix_dgram_disconnected(sk, other);
1522 sock_put(other);
1523 err = -ECONNREFUSED;
1524 } else {
1c92b4e5 1525 unix_state_unlock(sk);
1da177e4
LT
1526 }
1527
1528 other = NULL;
1529 if (err)
1530 goto out_free;
1531 goto restart;
1532 }
1533
1534 err = -EPIPE;
1535 if (other->sk_shutdown & RCV_SHUTDOWN)
1536 goto out_unlock;
1537
1538 if (sk->sk_type != SOCK_SEQPACKET) {
1539 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1540 if (err)
1541 goto out_unlock;
1542 }
1543
3c73419c 1544 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1da177e4
LT
1545 if (!timeo) {
1546 err = -EAGAIN;
1547 goto out_unlock;
1548 }
1549
1550 timeo = unix_wait_for_peer(other, timeo);
1551
1552 err = sock_intr_errno(timeo);
1553 if (signal_pending(current))
1554 goto out_free;
1555
1556 goto restart;
1557 }
1558
3f66116e
AC
1559 if (sock_flag(other, SOCK_RCVTSTAMP))
1560 __net_timestamp(skb);
16e57262 1561 maybe_add_creds(skb, sock, other);
1da177e4 1562 skb_queue_tail(&other->sk_receive_queue, skb);
25888e30
ED
1563 if (max_level > unix_sk(other)->recursion_level)
1564 unix_sk(other)->recursion_level = max_level;
1c92b4e5 1565 unix_state_unlock(other);
1da177e4
LT
1566 other->sk_data_ready(other, len);
1567 sock_put(other);
f78a5fda 1568 scm_destroy(siocb->scm);
1da177e4
LT
1569 return len;
1570
1571out_unlock:
1c92b4e5 1572 unix_state_unlock(other);
1da177e4
LT
1573out_free:
1574 kfree_skb(skb);
1575out:
1576 if (other)
1577 sock_put(other);
f78a5fda 1578 scm_destroy(siocb->scm);
1da177e4
LT
1579 return err;
1580}
1581
ac7bfa62 1582
1da177e4
LT
1583static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1584 struct msghdr *msg, size_t len)
1585{
1586 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1587 struct sock *sk = sock->sk;
1588 struct sock *other = NULL;
6eba6a37 1589 int err, size;
f78a5fda 1590 struct sk_buff *skb;
e27dfcea 1591 int sent = 0;
1da177e4 1592 struct scm_cookie tmp_scm;
8ba69ba6 1593 bool fds_sent = false;
25888e30 1594 int max_level;
1da177e4
LT
1595
1596 if (NULL == siocb->scm)
1597 siocb->scm = &tmp_scm;
5f23b734 1598 wait_for_unix_gc();
1da177e4
LT
1599 err = scm_send(sock, msg, siocb->scm);
1600 if (err < 0)
1601 return err;
1602
1603 err = -EOPNOTSUPP;
1604 if (msg->msg_flags&MSG_OOB)
1605 goto out_err;
1606
1607 if (msg->msg_namelen) {
1608 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1609 goto out_err;
1610 } else {
1da177e4 1611 err = -ENOTCONN;
830a1e5c 1612 other = unix_peer(sk);
1da177e4
LT
1613 if (!other)
1614 goto out_err;
1615 }
1616
1617 if (sk->sk_shutdown & SEND_SHUTDOWN)
1618 goto pipe_err;
1619
6eba6a37 1620 while (sent < len) {
1da177e4 1621 /*
e9df7d7f
BL
1622 * Optimisation for the fact that under 0.01% of X
1623 * messages typically need breaking up.
1da177e4
LT
1624 */
1625
e9df7d7f 1626 size = len-sent;
1da177e4
LT
1627
1628 /* Keep two messages in the pipe so it schedules better */
e9df7d7f
BL
1629 if (size > ((sk->sk_sndbuf >> 1) - 64))
1630 size = (sk->sk_sndbuf >> 1) - 64;
1da177e4
LT
1631
1632 if (size > SKB_MAX_ALLOC)
1633 size = SKB_MAX_ALLOC;
ac7bfa62 1634
1da177e4
LT
1635 /*
1636 * Grab a buffer
1637 */
ac7bfa62 1638
6eba6a37
ED
1639 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1640 &err);
1da177e4 1641
e27dfcea 1642 if (skb == NULL)
1da177e4
LT
1643 goto out_err;
1644
1645 /*
1646 * If you pass two values to the sock_alloc_send_skb
1647 * it tries to grab the large buffer with GFP_NOFS
1648 * (which can fail easily), and if it fails grab the
1649 * fallback size buffer which is under a page and will
1650 * succeed. [Alan]
1651 */
1652 size = min_t(int, size, skb_tailroom(skb));
1653
7361c36c 1654
f78a5fda
DM
1655 /* Only send the fds in the first buffer */
1656 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
25888e30 1657 if (err < 0) {
7361c36c 1658 kfree_skb(skb);
f78a5fda 1659 goto out_err;
6209344f 1660 }
25888e30 1661 max_level = err + 1;
7361c36c 1662 fds_sent = true;
1da177e4 1663
6eba6a37
ED
1664 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1665 if (err) {
1da177e4 1666 kfree_skb(skb);
f78a5fda 1667 goto out_err;
1da177e4
LT
1668 }
1669
1c92b4e5 1670 unix_state_lock(other);
1da177e4
LT
1671
1672 if (sock_flag(other, SOCK_DEAD) ||
1673 (other->sk_shutdown & RCV_SHUTDOWN))
1674 goto pipe_err_free;
1675
16e57262 1676 maybe_add_creds(skb, sock, other);
1da177e4 1677 skb_queue_tail(&other->sk_receive_queue, skb);
25888e30
ED
1678 if (max_level > unix_sk(other)->recursion_level)
1679 unix_sk(other)->recursion_level = max_level;
1c92b4e5 1680 unix_state_unlock(other);
1da177e4 1681 other->sk_data_ready(other, size);
e27dfcea 1682 sent += size;
1da177e4 1683 }
1da177e4 1684
f78a5fda 1685 scm_destroy(siocb->scm);
1da177e4
LT
1686 siocb->scm = NULL;
1687
1688 return sent;
1689
1690pipe_err_free:
1c92b4e5 1691 unix_state_unlock(other);
1da177e4
LT
1692 kfree_skb(skb);
1693pipe_err:
6eba6a37
ED
1694 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1695 send_sig(SIGPIPE, current, 0);
1da177e4
LT
1696 err = -EPIPE;
1697out_err:
f78a5fda 1698 scm_destroy(siocb->scm);
1da177e4
LT
1699 siocb->scm = NULL;
1700 return sent ? : err;
1701}
1702
1703static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1704 struct msghdr *msg, size_t len)
1705{
1706 int err;
1707 struct sock *sk = sock->sk;
ac7bfa62 1708
1da177e4
LT
1709 err = sock_error(sk);
1710 if (err)
1711 return err;
1712
1713 if (sk->sk_state != TCP_ESTABLISHED)
1714 return -ENOTCONN;
1715
1716 if (msg->msg_namelen)
1717 msg->msg_namelen = 0;
1718
1719 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1720}
ac7bfa62 1721
a05d2ad1
EB
1722static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1723 struct msghdr *msg, size_t size,
1724 int flags)
1725{
1726 struct sock *sk = sock->sk;
1727
1728 if (sk->sk_state != TCP_ESTABLISHED)
1729 return -ENOTCONN;
1730
1731 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1732}
1733
1da177e4
LT
1734static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1735{
1736 struct unix_sock *u = unix_sk(sk);
1737
1738 msg->msg_namelen = 0;
1739 if (u->addr) {
1740 msg->msg_namelen = u->addr->len;
1741 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1742 }
1743}
1744
1745static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1746 struct msghdr *msg, size_t size,
1747 int flags)
1748{
1749 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1750 struct scm_cookie tmp_scm;
1751 struct sock *sk = sock->sk;
1752 struct unix_sock *u = unix_sk(sk);
1753 int noblock = flags & MSG_DONTWAIT;
1754 struct sk_buff *skb;
1755 int err;
1756
1757 err = -EOPNOTSUPP;
1758 if (flags&MSG_OOB)
1759 goto out;
1760
1761 msg->msg_namelen = 0;
1762
b3ca9b02
RW
1763 err = mutex_lock_interruptible(&u->readlock);
1764 if (err) {
1765 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1766 goto out;
1767 }
1da177e4
LT
1768
1769 skb = skb_recv_datagram(sk, flags, noblock, &err);
0a112258
FZ
1770 if (!skb) {
1771 unix_state_lock(sk);
1772 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1773 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1774 (sk->sk_shutdown & RCV_SHUTDOWN))
1775 err = 0;
1776 unix_state_unlock(sk);
1da177e4 1777 goto out_unlock;
0a112258 1778 }
1da177e4 1779
67426b75
ED
1780 wake_up_interruptible_sync_poll(&u->peer_wait,
1781 POLLOUT | POLLWRNORM | POLLWRBAND);
1da177e4
LT
1782
1783 if (msg->msg_name)
1784 unix_copy_addr(msg, skb->sk);
1785
1786 if (size > skb->len)
1787 size = skb->len;
1788 else if (size < skb->len)
1789 msg->msg_flags |= MSG_TRUNC;
1790
1791 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1792 if (err)
1793 goto out_free;
1794
3f66116e
AC
1795 if (sock_flag(sk, SOCK_RCVTSTAMP))
1796 __sock_recv_timestamp(msg, sk, skb);
1797
1da177e4
LT
1798 if (!siocb->scm) {
1799 siocb->scm = &tmp_scm;
1800 memset(&tmp_scm, 0, sizeof(tmp_scm));
1801 }
f78a5fda 1802 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
877ce7c1 1803 unix_set_secdata(siocb->scm, skb);
1da177e4 1804
6eba6a37 1805 if (!(flags & MSG_PEEK)) {
1da177e4
LT
1806 if (UNIXCB(skb).fp)
1807 unix_detach_fds(siocb->scm, skb);
6eba6a37 1808 } else {
1da177e4
LT
1809 /* It is questionable: on PEEK we could:
1810 - do not return fds - good, but too simple 8)
1811 - return fds, and do not return them on read (old strategy,
1812 apparently wrong)
1813 - clone fds (I chose it for now, it is the most universal
1814 solution)
ac7bfa62
YH
1815
1816 POSIX 1003.1g does not actually define this clearly
1817 at all. POSIX 1003.1g doesn't define a lot of things
1818 clearly however!
1819
1da177e4
LT
1820 */
1821 if (UNIXCB(skb).fp)
1822 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1823 }
1824 err = size;
1825
1826 scm_recv(sock, msg, siocb->scm, flags);
1827
1828out_free:
6eba6a37 1829 skb_free_datagram(sk, skb);
1da177e4 1830out_unlock:
57b47a53 1831 mutex_unlock(&u->readlock);
1da177e4
LT
1832out:
1833 return err;
1834}
1835
1836/*
1837 * Sleep until data has arrive. But check for races..
1838 */
ac7bfa62 1839
6eba6a37 1840static long unix_stream_data_wait(struct sock *sk, long timeo)
1da177e4
LT
1841{
1842 DEFINE_WAIT(wait);
1843
1c92b4e5 1844 unix_state_lock(sk);
1da177e4
LT
1845
1846 for (;;) {
aa395145 1847 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4 1848
b03efcfb 1849 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1da177e4
LT
1850 sk->sk_err ||
1851 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1852 signal_pending(current) ||
1853 !timeo)
1854 break;
1855
1856 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1c92b4e5 1857 unix_state_unlock(sk);
1da177e4 1858 timeo = schedule_timeout(timeo);
1c92b4e5 1859 unix_state_lock(sk);
1da177e4
LT
1860 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1861 }
1862
aa395145 1863 finish_wait(sk_sleep(sk), &wait);
1c92b4e5 1864 unix_state_unlock(sk);
1da177e4
LT
1865 return timeo;
1866}
1867
1868
1869
1870static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1871 struct msghdr *msg, size_t size,
1872 int flags)
1873{
1874 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1875 struct scm_cookie tmp_scm;
1876 struct sock *sk = sock->sk;
1877 struct unix_sock *u = unix_sk(sk);
e27dfcea 1878 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1879 int copied = 0;
1880 int check_creds = 0;
1881 int target;
1882 int err = 0;
1883 long timeo;
1884
1885 err = -EINVAL;
1886 if (sk->sk_state != TCP_ESTABLISHED)
1887 goto out;
1888
1889 err = -EOPNOTSUPP;
1890 if (flags&MSG_OOB)
1891 goto out;
1892
1893 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1894 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1895
1896 msg->msg_namelen = 0;
1897
1898 /* Lock the socket to prevent queue disordering
1899 * while sleeps in memcpy_tomsg
1900 */
1901
1902 if (!siocb->scm) {
1903 siocb->scm = &tmp_scm;
1904 memset(&tmp_scm, 0, sizeof(tmp_scm));
1905 }
1906
b3ca9b02
RW
1907 err = mutex_lock_interruptible(&u->readlock);
1908 if (err) {
1909 err = sock_intr_errno(timeo);
1910 goto out;
1911 }
1da177e4 1912
6eba6a37 1913 do {
1da177e4
LT
1914 int chunk;
1915 struct sk_buff *skb;
1916
3c0d2f37 1917 unix_state_lock(sk);
1da177e4 1918 skb = skb_dequeue(&sk->sk_receive_queue);
6eba6a37 1919 if (skb == NULL) {
25888e30 1920 unix_sk(sk)->recursion_level = 0;
1da177e4 1921 if (copied >= target)
3c0d2f37 1922 goto unlock;
1da177e4
LT
1923
1924 /*
1925 * POSIX 1003.1g mandates this order.
1926 */
ac7bfa62 1927
6eba6a37
ED
1928 err = sock_error(sk);
1929 if (err)
3c0d2f37 1930 goto unlock;
1da177e4 1931 if (sk->sk_shutdown & RCV_SHUTDOWN)
3c0d2f37
MS
1932 goto unlock;
1933
1934 unix_state_unlock(sk);
1da177e4
LT
1935 err = -EAGAIN;
1936 if (!timeo)
1937 break;
57b47a53 1938 mutex_unlock(&u->readlock);
1da177e4
LT
1939
1940 timeo = unix_stream_data_wait(sk, timeo);
1941
b3ca9b02
RW
1942 if (signal_pending(current)
1943 || mutex_lock_interruptible(&u->readlock)) {
1da177e4
LT
1944 err = sock_intr_errno(timeo);
1945 goto out;
1946 }
b3ca9b02 1947
1da177e4 1948 continue;
3c0d2f37
MS
1949 unlock:
1950 unix_state_unlock(sk);
1951 break;
1da177e4 1952 }
3c0d2f37 1953 unix_state_unlock(sk);
1da177e4
LT
1954
1955 if (check_creds) {
1956 /* Never glue messages from different writers */
7361c36c
EB
1957 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1958 (UNIXCB(skb).cred != siocb->scm->cred)) {
1da177e4
LT
1959 skb_queue_head(&sk->sk_receive_queue, skb);
1960 break;
1961 }
1962 } else {
1963 /* Copy credentials */
f78a5fda 1964 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1da177e4
LT
1965 check_creds = 1;
1966 }
1967
1968 /* Copy address just once */
6eba6a37 1969 if (sunaddr) {
1da177e4
LT
1970 unix_copy_addr(msg, skb->sk);
1971 sunaddr = NULL;
1972 }
1973
1974 chunk = min_t(unsigned int, skb->len, size);
1975 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1976 skb_queue_head(&sk->sk_receive_queue, skb);
1977 if (copied == 0)
1978 copied = -EFAULT;
1979 break;
1980 }
1981 copied += chunk;
1982 size -= chunk;
1983
1984 /* Mark read part of skb as used */
6eba6a37 1985 if (!(flags & MSG_PEEK)) {
1da177e4
LT
1986 skb_pull(skb, chunk);
1987
1988 if (UNIXCB(skb).fp)
1989 unix_detach_fds(siocb->scm, skb);
1990
1991 /* put the skb back if we didn't use it up.. */
6eba6a37 1992 if (skb->len) {
1da177e4
LT
1993 skb_queue_head(&sk->sk_receive_queue, skb);
1994 break;
1995 }
1996
70d4bf6d 1997 consume_skb(skb);
1da177e4
LT
1998
1999 if (siocb->scm->fp)
2000 break;
6eba6a37 2001 } else {
1da177e4
LT
2002 /* It is questionable, see note in unix_dgram_recvmsg.
2003 */
2004 if (UNIXCB(skb).fp)
2005 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2006
2007 /* put message back and return */
2008 skb_queue_head(&sk->sk_receive_queue, skb);
2009 break;
2010 }
2011 } while (size);
2012
57b47a53 2013 mutex_unlock(&u->readlock);
1da177e4
LT
2014 scm_recv(sock, msg, siocb->scm, flags);
2015out:
2016 return copied ? : err;
2017}
2018
2019static int unix_shutdown(struct socket *sock, int mode)
2020{
2021 struct sock *sk = sock->sk;
2022 struct sock *other;
2023
2024 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
2025
7180a031
AC
2026 if (!mode)
2027 return 0;
2028
2029 unix_state_lock(sk);
2030 sk->sk_shutdown |= mode;
2031 other = unix_peer(sk);
2032 if (other)
2033 sock_hold(other);
2034 unix_state_unlock(sk);
2035 sk->sk_state_change(sk);
2036
2037 if (other &&
2038 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2039
2040 int peer_mode = 0;
2041
2042 if (mode&RCV_SHUTDOWN)
2043 peer_mode |= SEND_SHUTDOWN;
2044 if (mode&SEND_SHUTDOWN)
2045 peer_mode |= RCV_SHUTDOWN;
2046 unix_state_lock(other);
2047 other->sk_shutdown |= peer_mode;
2048 unix_state_unlock(other);
2049 other->sk_state_change(other);
2050 if (peer_mode == SHUTDOWN_MASK)
2051 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2052 else if (peer_mode & RCV_SHUTDOWN)
2053 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1da177e4 2054 }
7180a031
AC
2055 if (other)
2056 sock_put(other);
2057
1da177e4
LT
2058 return 0;
2059}
2060
2061static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2062{
2063 struct sock *sk = sock->sk;
e27dfcea 2064 long amount = 0;
1da177e4
LT
2065 int err;
2066
6eba6a37
ED
2067 switch (cmd) {
2068 case SIOCOUTQ:
31e6d363 2069 amount = sk_wmem_alloc_get(sk);
6eba6a37
ED
2070 err = put_user(amount, (int __user *)arg);
2071 break;
2072 case SIOCINQ:
1da177e4
LT
2073 {
2074 struct sk_buff *skb;
2075
2076 if (sk->sk_state == TCP_LISTEN) {
2077 err = -EINVAL;
2078 break;
2079 }
2080
2081 spin_lock(&sk->sk_receive_queue.lock);
2082 if (sk->sk_type == SOCK_STREAM ||
2083 sk->sk_type == SOCK_SEQPACKET) {
2084 skb_queue_walk(&sk->sk_receive_queue, skb)
2085 amount += skb->len;
2086 } else {
2087 skb = skb_peek(&sk->sk_receive_queue);
2088 if (skb)
e27dfcea 2089 amount = skb->len;
1da177e4
LT
2090 }
2091 spin_unlock(&sk->sk_receive_queue.lock);
2092 err = put_user(amount, (int __user *)arg);
2093 break;
2094 }
2095
6eba6a37
ED
2096 default:
2097 err = -ENOIOCTLCMD;
2098 break;
1da177e4
LT
2099 }
2100 return err;
2101}
2102
6eba6a37 2103static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
1da177e4
LT
2104{
2105 struct sock *sk = sock->sk;
2106 unsigned int mask;
2107
aa395145 2108 sock_poll_wait(file, sk_sleep(sk), wait);
1da177e4
LT
2109 mask = 0;
2110
2111 /* exceptional events? */
2112 if (sk->sk_err)
2113 mask |= POLLERR;
2114 if (sk->sk_shutdown == SHUTDOWN_MASK)
2115 mask |= POLLHUP;
f348d70a 2116 if (sk->sk_shutdown & RCV_SHUTDOWN)
db40980f 2117 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
1da177e4
LT
2118
2119 /* readable? */
db40980f 2120 if (!skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
2121 mask |= POLLIN | POLLRDNORM;
2122
2123 /* Connection-based need to check for termination and startup */
6eba6a37
ED
2124 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2125 sk->sk_state == TCP_CLOSE)
1da177e4
LT
2126 mask |= POLLHUP;
2127
2128 /*
2129 * we set writable also when the other side has shut down the
2130 * connection. This prevents stuck sockets.
2131 */
2132 if (unix_writable(sk))
2133 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2134
2135 return mask;
2136}
2137
ec0d215f
RW
2138static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2139 poll_table *wait)
3c73419c 2140{
ec0d215f
RW
2141 struct sock *sk = sock->sk, *other;
2142 unsigned int mask, writable;
3c73419c 2143
aa395145 2144 sock_poll_wait(file, sk_sleep(sk), wait);
3c73419c
RW
2145 mask = 0;
2146
2147 /* exceptional events? */
2148 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2149 mask |= POLLERR;
2150 if (sk->sk_shutdown & RCV_SHUTDOWN)
5456f09a 2151 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
3c73419c
RW
2152 if (sk->sk_shutdown == SHUTDOWN_MASK)
2153 mask |= POLLHUP;
2154
2155 /* readable? */
5456f09a 2156 if (!skb_queue_empty(&sk->sk_receive_queue))
3c73419c
RW
2157 mask |= POLLIN | POLLRDNORM;
2158
2159 /* Connection-based need to check for termination and startup */
2160 if (sk->sk_type == SOCK_SEQPACKET) {
2161 if (sk->sk_state == TCP_CLOSE)
2162 mask |= POLLHUP;
2163 /* connection hasn't started yet? */
2164 if (sk->sk_state == TCP_SYN_SENT)
2165 return mask;
2166 }
2167
973a34aa
ED
2168 /* No write status requested, avoid expensive OUT tests. */
2169 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2170 return mask;
2171
ec0d215f 2172 writable = unix_writable(sk);
5456f09a
ED
2173 other = unix_peer_get(sk);
2174 if (other) {
2175 if (unix_peer(other) != sk) {
2176 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2177 if (unix_recvq_full(other))
2178 writable = 0;
ec0d215f 2179 }
5456f09a 2180 sock_put(other);
ec0d215f
RW
2181 }
2182
2183 if (writable)
3c73419c
RW
2184 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2185 else
2186 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2187
3c73419c
RW
2188 return mask;
2189}
1da177e4
LT
2190
2191#ifdef CONFIG_PROC_FS
a53eb3fe
PE
2192static struct sock *first_unix_socket(int *i)
2193{
2194 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2195 if (!hlist_empty(&unix_socket_table[*i]))
2196 return __sk_head(&unix_socket_table[*i]);
2197 }
2198 return NULL;
2199}
2200
2201static struct sock *next_unix_socket(int *i, struct sock *s)
2202{
2203 struct sock *next = sk_next(s);
2204 /* More in this chain? */
2205 if (next)
2206 return next;
2207 /* Look for next non-empty chain. */
2208 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2209 if (!hlist_empty(&unix_socket_table[*i]))
2210 return __sk_head(&unix_socket_table[*i]);
2211 }
2212 return NULL;
2213}
2214
097e66c5 2215struct unix_iter_state {
e372c414 2216 struct seq_net_private p;
097e66c5
DL
2217 int i;
2218};
e27dfcea 2219
1218854a 2220static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
1da177e4 2221{
1218854a 2222 struct unix_iter_state *iter = seq->private;
1da177e4
LT
2223 loff_t off = 0;
2224 struct sock *s;
2225
097e66c5 2226 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
1218854a 2227 if (sock_net(s) != seq_file_net(seq))
097e66c5 2228 continue;
ac7bfa62 2229 if (off == pos)
1da177e4
LT
2230 return s;
2231 ++off;
2232 }
2233 return NULL;
2234}
2235
1da177e4 2236static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2237 __acquires(unix_table_lock)
1da177e4 2238{
fbe9cc4a 2239 spin_lock(&unix_table_lock);
b9f3124f 2240 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1da177e4
LT
2241}
2242
2243static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2244{
097e66c5
DL
2245 struct unix_iter_state *iter = seq->private;
2246 struct sock *sk = v;
1da177e4
LT
2247 ++*pos;
2248
b9f3124f 2249 if (v == SEQ_START_TOKEN)
097e66c5
DL
2250 sk = first_unix_socket(&iter->i);
2251 else
2252 sk = next_unix_socket(&iter->i, sk);
1218854a 2253 while (sk && (sock_net(sk) != seq_file_net(seq)))
097e66c5
DL
2254 sk = next_unix_socket(&iter->i, sk);
2255 return sk;
1da177e4
LT
2256}
2257
2258static void unix_seq_stop(struct seq_file *seq, void *v)
9a429c49 2259 __releases(unix_table_lock)
1da177e4 2260{
fbe9cc4a 2261 spin_unlock(&unix_table_lock);
1da177e4
LT
2262}
2263
2264static int unix_seq_show(struct seq_file *seq, void *v)
2265{
ac7bfa62 2266
b9f3124f 2267 if (v == SEQ_START_TOKEN)
1da177e4
LT
2268 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2269 "Inode Path\n");
2270 else {
2271 struct sock *s = v;
2272 struct unix_sock *u = unix_sk(s);
1c92b4e5 2273 unix_state_lock(s);
1da177e4 2274
71338aa7 2275 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
1da177e4
LT
2276 s,
2277 atomic_read(&s->sk_refcnt),
2278 0,
2279 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2280 s->sk_type,
2281 s->sk_socket ?
2282 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2283 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2284 sock_i_ino(s));
2285
2286 if (u->addr) {
2287 int i, len;
2288 seq_putc(seq, ' ');
2289
2290 i = 0;
2291 len = u->addr->len - sizeof(short);
2292 if (!UNIX_ABSTRACT(s))
2293 len--;
2294 else {
2295 seq_putc(seq, '@');
2296 i++;
2297 }
2298 for ( ; i < len; i++)
2299 seq_putc(seq, u->addr->name->sun_path[i]);
2300 }
1c92b4e5 2301 unix_state_unlock(s);
1da177e4
LT
2302 seq_putc(seq, '\n');
2303 }
2304
2305 return 0;
2306}
2307
56b3d975 2308static const struct seq_operations unix_seq_ops = {
1da177e4
LT
2309 .start = unix_seq_start,
2310 .next = unix_seq_next,
2311 .stop = unix_seq_stop,
2312 .show = unix_seq_show,
2313};
2314
1da177e4
LT
2315static int unix_seq_open(struct inode *inode, struct file *file)
2316{
e372c414
DL
2317 return seq_open_net(inode, file, &unix_seq_ops,
2318 sizeof(struct unix_iter_state));
1da177e4
LT
2319}
2320
da7071d7 2321static const struct file_operations unix_seq_fops = {
1da177e4
LT
2322 .owner = THIS_MODULE,
2323 .open = unix_seq_open,
2324 .read = seq_read,
2325 .llseek = seq_lseek,
e372c414 2326 .release = seq_release_net,
1da177e4
LT
2327};
2328
2329#endif
2330
ec1b4cf7 2331static const struct net_proto_family unix_family_ops = {
1da177e4
LT
2332 .family = PF_UNIX,
2333 .create = unix_create,
2334 .owner = THIS_MODULE,
2335};
2336
097e66c5 2337
2c8c1e72 2338static int __net_init unix_net_init(struct net *net)
097e66c5
DL
2339{
2340 int error = -ENOMEM;
2341
a0a53c8b 2342 net->unx.sysctl_max_dgram_qlen = 10;
1597fbc0
PE
2343 if (unix_sysctl_register(net))
2344 goto out;
d392e497 2345
097e66c5 2346#ifdef CONFIG_PROC_FS
1597fbc0
PE
2347 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2348 unix_sysctl_unregister(net);
097e66c5 2349 goto out;
1597fbc0 2350 }
097e66c5
DL
2351#endif
2352 error = 0;
2353out:
48dcc33e 2354 return error;
097e66c5
DL
2355}
2356
2c8c1e72 2357static void __net_exit unix_net_exit(struct net *net)
097e66c5 2358{
1597fbc0 2359 unix_sysctl_unregister(net);
097e66c5
DL
2360 proc_net_remove(net, "unix");
2361}
2362
2363static struct pernet_operations unix_net_ops = {
2364 .init = unix_net_init,
2365 .exit = unix_net_exit,
2366};
2367
1da177e4
LT
2368static int __init af_unix_init(void)
2369{
2370 int rc = -1;
2371 struct sk_buff *dummy_skb;
2372
ef047f5e 2373 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
1da177e4
LT
2374
2375 rc = proto_register(&unix_proto, 1);
ac7bfa62
YH
2376 if (rc != 0) {
2377 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
0dc47877 2378 __func__);
1da177e4
LT
2379 goto out;
2380 }
2381
2382 sock_register(&unix_family_ops);
097e66c5 2383 register_pernet_subsys(&unix_net_ops);
1da177e4
LT
2384out:
2385 return rc;
2386}
2387
2388static void __exit af_unix_exit(void)
2389{
2390 sock_unregister(PF_UNIX);
1da177e4 2391 proto_unregister(&unix_proto);
097e66c5 2392 unregister_pernet_subsys(&unix_net_ops);
1da177e4
LT
2393}
2394
3d366960
DW
2395/* Earlier than device_initcall() so that other drivers invoking
2396 request_module() don't end up in a loop when modprobe tries
2397 to use a UNIX socket. But later than subsys_initcall() because
2398 we depend on stuff initialised there */
2399fs_initcall(af_unix_init);
1da177e4
LT
2400module_exit(af_unix_exit);
2401
2402MODULE_LICENSE("GPL");
2403MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 0.832293 seconds and 5 git commands to generate.