net: af_unix should use KERN_INFO instead of KERN_DEBUG
[deliverable/linux.git] / net / unix / af_unix.c
CommitLineData
1da177e4
LT
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
113aa838 4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
1da177e4
LT
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#include <linux/module.h>
1da177e4 84#include <linux/kernel.h>
1da177e4
LT
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
457c4cbc 104#include <net/net_namespace.h>
1da177e4 105#include <net/sock.h>
c752f073 106#include <net/tcp_states.h>
1da177e4
LT
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
1da177e4
LT
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
13111698
AB
118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119static DEFINE_SPINLOCK(unix_table_lock);
1da177e4
LT
120static atomic_t unix_nr_socks = ATOMIC_INIT(0);
121
122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
877ce7c1 126#ifdef CONFIG_SECURITY_NETWORK
dc49c1f9 127static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1 128{
dc49c1f9 129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
877ce7c1
CZ
130}
131
132static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133{
dc49c1f9 134 scm->secid = *UNIXSID(skb);
877ce7c1
CZ
135}
136#else
dc49c1f9 137static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1
CZ
138{ }
139
140static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141{ }
142#endif /* CONFIG_SECURITY_NETWORK */
143
1da177e4
LT
144/*
145 * SMP locking strategy:
fbe9cc4a 146 * hash table is protected with spinlock unix_table_lock
1da177e4
LT
147 * each socket state is protected by separate rwlock.
148 */
149
44bb9363 150static inline unsigned unix_hash_fold(__wsum n)
1da177e4 151{
44bb9363 152 unsigned hash = (__force unsigned)n;
1da177e4
LT
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156}
157
158#define unix_peer(sk) (unix_sk(sk)->peer)
159
160static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161{
162 return unix_peer(osk) == sk;
163}
164
165static inline int unix_may_send(struct sock *sk, struct sock *osk)
166{
6eba6a37 167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
1da177e4
LT
168}
169
3c73419c
RW
170static inline int unix_recvq_full(struct sock const *sk)
171{
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173}
174
1da177e4
LT
175static struct sock *unix_peer_get(struct sock *s)
176{
177 struct sock *peer;
178
1c92b4e5 179 unix_state_lock(s);
1da177e4
LT
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
1c92b4e5 183 unix_state_unlock(s);
1da177e4
LT
184 return peer;
185}
186
187static inline void unix_release_addr(struct unix_address *addr)
188{
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191}
192
193/*
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
198 */
ac7bfa62 199
6eba6a37 200static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
1da177e4
LT
201{
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207 /*
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
213 */
e27dfcea 214 ((char *)sunaddr)[len] = 0;
1da177e4
LT
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
07f0757a 219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
1da177e4
LT
220 return len;
221}
222
223static void __unix_remove_socket(struct sock *sk)
224{
225 sk_del_node_init(sk);
226}
227
228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229{
547b792c 230 WARN_ON(!sk_unhashed(sk));
1da177e4
LT
231 sk_add_node(sk, list);
232}
233
234static inline void unix_remove_socket(struct sock *sk)
235{
fbe9cc4a 236 spin_lock(&unix_table_lock);
1da177e4 237 __unix_remove_socket(sk);
fbe9cc4a 238 spin_unlock(&unix_table_lock);
1da177e4
LT
239}
240
241static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242{
fbe9cc4a 243 spin_lock(&unix_table_lock);
1da177e4 244 __unix_insert_socket(list, sk);
fbe9cc4a 245 spin_unlock(&unix_table_lock);
1da177e4
LT
246}
247
097e66c5
DL
248static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
1da177e4
LT
250 int len, int type, unsigned hash)
251{
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
878628fb 258 if (!net_eq(sock_net(s), net))
097e66c5
DL
259 continue;
260
1da177e4
LT
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266found:
267 return s;
268}
269
097e66c5
DL
270static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
1da177e4
LT
272 int len, int type,
273 unsigned hash)
274{
275 struct sock *s;
276
fbe9cc4a 277 spin_lock(&unix_table_lock);
097e66c5 278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
279 if (s)
280 sock_hold(s);
fbe9cc4a 281 spin_unlock(&unix_table_lock);
1da177e4
LT
282 return s;
283}
284
097e66c5 285static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
1da177e4
LT
286{
287 struct sock *s;
288 struct hlist_node *node;
289
fbe9cc4a 290 spin_lock(&unix_table_lock);
1da177e4
LT
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
878628fb 295 if (!net_eq(sock_net(s), net))
097e66c5
DL
296 continue;
297
6eba6a37 298 if (dentry && dentry->d_inode == i) {
1da177e4
LT
299 sock_hold(s);
300 goto found;
301 }
302 }
303 s = NULL;
304found:
fbe9cc4a 305 spin_unlock(&unix_table_lock);
1da177e4
LT
306 return s;
307}
308
309static inline int unix_writable(struct sock *sk)
310{
311 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
312}
313
314static void unix_write_space(struct sock *sk)
315{
316 read_lock(&sk->sk_callback_lock);
317 if (unix_writable(sk)) {
318 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
71e20f18 319 wake_up_interruptible_sync(sk->sk_sleep);
8d8ad9d7 320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
321 }
322 read_unlock(&sk->sk_callback_lock);
323}
324
325/* When dgram socket disconnects (or changes its peer), we clear its receive
326 * queue of packets arrived from previous peer. First, it allows to do
327 * flow control based only on wmem_alloc; second, sk connected to peer
328 * may receive messages only from that peer. */
329static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
330{
b03efcfb 331 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1da177e4
LT
332 skb_queue_purge(&sk->sk_receive_queue);
333 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
334
335 /* If one link of bidirectional dgram pipe is disconnected,
336 * we signal error. Messages are lost. Do not make this,
337 * when peer was not connected to us.
338 */
339 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
340 other->sk_err = ECONNRESET;
341 other->sk_error_report(other);
342 }
343 }
344}
345
346static void unix_sock_destructor(struct sock *sk)
347{
348 struct unix_sock *u = unix_sk(sk);
349
350 skb_queue_purge(&sk->sk_receive_queue);
351
547b792c
IJ
352 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
353 WARN_ON(!sk_unhashed(sk));
354 WARN_ON(sk->sk_socket);
1da177e4 355 if (!sock_flag(sk, SOCK_DEAD)) {
6b41e7dd 356 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
1da177e4
LT
357 return;
358 }
359
360 if (u->addr)
361 unix_release_addr(u->addr);
362
363 atomic_dec(&unix_nr_socks);
a8076d8d 364 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1da177e4 365#ifdef UNIX_REFCNT_DEBUG
6eba6a37
ED
366 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
367 atomic_read(&unix_nr_socks));
1da177e4
LT
368#endif
369}
370
6eba6a37 371static int unix_release_sock(struct sock *sk, int embrion)
1da177e4
LT
372{
373 struct unix_sock *u = unix_sk(sk);
374 struct dentry *dentry;
375 struct vfsmount *mnt;
376 struct sock *skpair;
377 struct sk_buff *skb;
378 int state;
379
380 unix_remove_socket(sk);
381
382 /* Clear state */
1c92b4e5 383 unix_state_lock(sk);
1da177e4
LT
384 sock_orphan(sk);
385 sk->sk_shutdown = SHUTDOWN_MASK;
386 dentry = u->dentry;
387 u->dentry = NULL;
388 mnt = u->mnt;
389 u->mnt = NULL;
390 state = sk->sk_state;
391 sk->sk_state = TCP_CLOSE;
1c92b4e5 392 unix_state_unlock(sk);
1da177e4
LT
393
394 wake_up_interruptible_all(&u->peer_wait);
395
e27dfcea 396 skpair = unix_peer(sk);
1da177e4 397
e27dfcea 398 if (skpair != NULL) {
1da177e4 399 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
1c92b4e5 400 unix_state_lock(skpair);
1da177e4
LT
401 /* No more writes */
402 skpair->sk_shutdown = SHUTDOWN_MASK;
403 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
404 skpair->sk_err = ECONNRESET;
1c92b4e5 405 unix_state_unlock(skpair);
1da177e4
LT
406 skpair->sk_state_change(skpair);
407 read_lock(&skpair->sk_callback_lock);
8d8ad9d7 408 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4
LT
409 read_unlock(&skpair->sk_callback_lock);
410 }
411 sock_put(skpair); /* It may now die */
412 unix_peer(sk) = NULL;
413 }
414
415 /* Try to flush out this socket. Throw out buffers at least */
416
417 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
e27dfcea 418 if (state == TCP_LISTEN)
1da177e4
LT
419 unix_release_sock(skb->sk, 1);
420 /* passed fds are erased in the kfree_skb hook */
421 kfree_skb(skb);
422 }
423
424 if (dentry) {
425 dput(dentry);
426 mntput(mnt);
427 }
428
429 sock_put(sk);
430
431 /* ---- Socket is dead now and most probably destroyed ---- */
432
433 /*
434 * Fixme: BSD difference: In BSD all sockets connected to use get
435 * ECONNRESET and we die on the spot. In Linux we behave
436 * like files and pipes do and wait for the last
437 * dereference.
438 *
439 * Can't we simply set sock->err?
440 *
441 * What the above comment does talk about? --ANK(980817)
442 */
443
9305cfa4 444 if (unix_tot_inflight)
ac7bfa62 445 unix_gc(); /* Garbage collect fds */
1da177e4
LT
446
447 return 0;
448}
449
450static int unix_listen(struct socket *sock, int backlog)
451{
452 int err;
453 struct sock *sk = sock->sk;
454 struct unix_sock *u = unix_sk(sk);
455
456 err = -EOPNOTSUPP;
6eba6a37
ED
457 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
458 goto out; /* Only stream/seqpacket sockets accept */
1da177e4
LT
459 err = -EINVAL;
460 if (!u->addr)
6eba6a37 461 goto out; /* No listens on an unbound socket */
1c92b4e5 462 unix_state_lock(sk);
1da177e4
LT
463 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
464 goto out_unlock;
465 if (backlog > sk->sk_max_ack_backlog)
466 wake_up_interruptible_all(&u->peer_wait);
467 sk->sk_max_ack_backlog = backlog;
468 sk->sk_state = TCP_LISTEN;
469 /* set credentials so connect can copy them */
b488893a 470 sk->sk_peercred.pid = task_tgid_vnr(current);
1da177e4
LT
471 sk->sk_peercred.uid = current->euid;
472 sk->sk_peercred.gid = current->egid;
473 err = 0;
474
475out_unlock:
1c92b4e5 476 unix_state_unlock(sk);
1da177e4
LT
477out:
478 return err;
479}
480
481static int unix_release(struct socket *);
482static int unix_bind(struct socket *, struct sockaddr *, int);
483static int unix_stream_connect(struct socket *, struct sockaddr *,
484 int addr_len, int flags);
485static int unix_socketpair(struct socket *, struct socket *);
486static int unix_accept(struct socket *, struct socket *, int);
487static int unix_getname(struct socket *, struct sockaddr *, int *, int);
488static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
ec0d215f
RW
489static unsigned int unix_dgram_poll(struct file *, struct socket *,
490 poll_table *);
1da177e4
LT
491static int unix_ioctl(struct socket *, unsigned int, unsigned long);
492static int unix_shutdown(struct socket *, int);
493static int unix_stream_sendmsg(struct kiocb *, struct socket *,
494 struct msghdr *, size_t);
495static int unix_stream_recvmsg(struct kiocb *, struct socket *,
496 struct msghdr *, size_t, int);
497static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
498 struct msghdr *, size_t);
499static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
500 struct msghdr *, size_t, int);
501static int unix_dgram_connect(struct socket *, struct sockaddr *,
502 int, int);
503static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
504 struct msghdr *, size_t);
505
90ddc4f0 506static const struct proto_ops unix_stream_ops = {
1da177e4
LT
507 .family = PF_UNIX,
508 .owner = THIS_MODULE,
509 .release = unix_release,
510 .bind = unix_bind,
511 .connect = unix_stream_connect,
512 .socketpair = unix_socketpair,
513 .accept = unix_accept,
514 .getname = unix_getname,
515 .poll = unix_poll,
516 .ioctl = unix_ioctl,
517 .listen = unix_listen,
518 .shutdown = unix_shutdown,
519 .setsockopt = sock_no_setsockopt,
520 .getsockopt = sock_no_getsockopt,
521 .sendmsg = unix_stream_sendmsg,
522 .recvmsg = unix_stream_recvmsg,
523 .mmap = sock_no_mmap,
524 .sendpage = sock_no_sendpage,
525};
526
90ddc4f0 527static const struct proto_ops unix_dgram_ops = {
1da177e4
LT
528 .family = PF_UNIX,
529 .owner = THIS_MODULE,
530 .release = unix_release,
531 .bind = unix_bind,
532 .connect = unix_dgram_connect,
533 .socketpair = unix_socketpair,
534 .accept = sock_no_accept,
535 .getname = unix_getname,
ec0d215f 536 .poll = unix_dgram_poll,
1da177e4
LT
537 .ioctl = unix_ioctl,
538 .listen = sock_no_listen,
539 .shutdown = unix_shutdown,
540 .setsockopt = sock_no_setsockopt,
541 .getsockopt = sock_no_getsockopt,
542 .sendmsg = unix_dgram_sendmsg,
543 .recvmsg = unix_dgram_recvmsg,
544 .mmap = sock_no_mmap,
545 .sendpage = sock_no_sendpage,
546};
547
90ddc4f0 548static const struct proto_ops unix_seqpacket_ops = {
1da177e4
LT
549 .family = PF_UNIX,
550 .owner = THIS_MODULE,
551 .release = unix_release,
552 .bind = unix_bind,
553 .connect = unix_stream_connect,
554 .socketpair = unix_socketpair,
555 .accept = unix_accept,
556 .getname = unix_getname,
ec0d215f 557 .poll = unix_dgram_poll,
1da177e4
LT
558 .ioctl = unix_ioctl,
559 .listen = unix_listen,
560 .shutdown = unix_shutdown,
561 .setsockopt = sock_no_setsockopt,
562 .getsockopt = sock_no_getsockopt,
563 .sendmsg = unix_seqpacket_sendmsg,
564 .recvmsg = unix_dgram_recvmsg,
565 .mmap = sock_no_mmap,
566 .sendpage = sock_no_sendpage,
567};
568
569static struct proto unix_proto = {
248969ae
ED
570 .name = "UNIX",
571 .owner = THIS_MODULE,
572 .sockets_allocated = &unix_nr_socks,
573 .obj_size = sizeof(struct unix_sock),
1da177e4
LT
574};
575
a09785a2
IM
576/*
577 * AF_UNIX sockets do not interact with hardware, hence they
578 * dont trigger interrupts - so it's safe for them to have
579 * bh-unsafe locking for their sk_receive_queue.lock. Split off
580 * this special lock-class by reinitializing the spinlock key:
581 */
582static struct lock_class_key af_unix_sk_receive_queue_lock_key;
583
6eba6a37 584static struct sock *unix_create1(struct net *net, struct socket *sock)
1da177e4
LT
585{
586 struct sock *sk = NULL;
587 struct unix_sock *u;
588
284b327b
PE
589 atomic_inc(&unix_nr_socks);
590 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
1da177e4
LT
591 goto out;
592
6257ff21 593 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
1da177e4
LT
594 if (!sk)
595 goto out;
596
6eba6a37 597 sock_init_data(sock, sk);
a09785a2
IM
598 lockdep_set_class(&sk->sk_receive_queue.lock,
599 &af_unix_sk_receive_queue_lock_key);
1da177e4
LT
600
601 sk->sk_write_space = unix_write_space;
a0a53c8b 602 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
1da177e4
LT
603 sk->sk_destruct = unix_sock_destructor;
604 u = unix_sk(sk);
605 u->dentry = NULL;
606 u->mnt = NULL;
fd19f329 607 spin_lock_init(&u->lock);
516e0cc5 608 atomic_long_set(&u->inflight, 0);
1fd05ba5 609 INIT_LIST_HEAD(&u->link);
57b47a53 610 mutex_init(&u->readlock); /* single task reading lock */
1da177e4
LT
611 init_waitqueue_head(&u->peer_wait);
612 unix_insert_socket(unix_sockets_unbound, sk);
613out:
284b327b
PE
614 if (sk == NULL)
615 atomic_dec(&unix_nr_socks);
a8076d8d
ED
616 else
617 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
618
1da177e4
LT
619 return sk;
620}
621
1b8d7ae4 622static int unix_create(struct net *net, struct socket *sock, int protocol)
1da177e4
LT
623{
624 if (protocol && protocol != PF_UNIX)
625 return -EPROTONOSUPPORT;
626
627 sock->state = SS_UNCONNECTED;
628
629 switch (sock->type) {
630 case SOCK_STREAM:
631 sock->ops = &unix_stream_ops;
632 break;
633 /*
634 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
635 * nothing uses it.
636 */
637 case SOCK_RAW:
e27dfcea 638 sock->type = SOCK_DGRAM;
1da177e4
LT
639 case SOCK_DGRAM:
640 sock->ops = &unix_dgram_ops;
641 break;
642 case SOCK_SEQPACKET:
643 sock->ops = &unix_seqpacket_ops;
644 break;
645 default:
646 return -ESOCKTNOSUPPORT;
647 }
648
1b8d7ae4 649 return unix_create1(net, sock) ? 0 : -ENOMEM;
1da177e4
LT
650}
651
652static int unix_release(struct socket *sock)
653{
654 struct sock *sk = sock->sk;
655
656 if (!sk)
657 return 0;
658
659 sock->sk = NULL;
660
6eba6a37 661 return unix_release_sock(sk, 0);
1da177e4
LT
662}
663
664static int unix_autobind(struct socket *sock)
665{
666 struct sock *sk = sock->sk;
3b1e0a65 667 struct net *net = sock_net(sk);
1da177e4
LT
668 struct unix_sock *u = unix_sk(sk);
669 static u32 ordernum = 1;
6eba6a37 670 struct unix_address *addr;
1da177e4
LT
671 int err;
672
57b47a53 673 mutex_lock(&u->readlock);
1da177e4
LT
674
675 err = 0;
676 if (u->addr)
677 goto out;
678
679 err = -ENOMEM;
0da974f4 680 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
1da177e4
LT
681 if (!addr)
682 goto out;
683
1da177e4
LT
684 addr->name->sun_family = AF_UNIX;
685 atomic_set(&addr->refcnt, 1);
686
687retry:
688 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
07f0757a 689 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
1da177e4 690
fbe9cc4a 691 spin_lock(&unix_table_lock);
1da177e4
LT
692 ordernum = (ordernum+1)&0xFFFFF;
693
097e66c5 694 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
1da177e4 695 addr->hash)) {
fbe9cc4a 696 spin_unlock(&unix_table_lock);
1da177e4
LT
697 /* Sanity yield. It is unusual case, but yet... */
698 if (!(ordernum&0xFF))
699 yield();
700 goto retry;
701 }
702 addr->hash ^= sk->sk_type;
703
704 __unix_remove_socket(sk);
705 u->addr = addr;
706 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
fbe9cc4a 707 spin_unlock(&unix_table_lock);
1da177e4
LT
708 err = 0;
709
57b47a53 710out: mutex_unlock(&u->readlock);
1da177e4
LT
711 return err;
712}
713
097e66c5
DL
714static struct sock *unix_find_other(struct net *net,
715 struct sockaddr_un *sunname, int len,
1da177e4
LT
716 int type, unsigned hash, int *error)
717{
718 struct sock *u;
421748ec 719 struct path path;
1da177e4 720 int err = 0;
ac7bfa62 721
1da177e4 722 if (sunname->sun_path[0]) {
421748ec
AV
723 struct inode *inode;
724 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
1da177e4
LT
725 if (err)
726 goto fail;
421748ec
AV
727 inode = path.dentry->d_inode;
728 err = inode_permission(inode, MAY_WRITE);
1da177e4
LT
729 if (err)
730 goto put_fail;
731
732 err = -ECONNREFUSED;
421748ec 733 if (!S_ISSOCK(inode->i_mode))
1da177e4 734 goto put_fail;
421748ec 735 u = unix_find_socket_byinode(net, inode);
1da177e4
LT
736 if (!u)
737 goto put_fail;
738
739 if (u->sk_type == type)
421748ec 740 touch_atime(path.mnt, path.dentry);
1da177e4 741
421748ec 742 path_put(&path);
1da177e4 743
e27dfcea 744 err = -EPROTOTYPE;
1da177e4
LT
745 if (u->sk_type != type) {
746 sock_put(u);
747 goto fail;
748 }
749 } else {
750 err = -ECONNREFUSED;
e27dfcea 751 u = unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
752 if (u) {
753 struct dentry *dentry;
754 dentry = unix_sk(u)->dentry;
755 if (dentry)
756 touch_atime(unix_sk(u)->mnt, dentry);
757 } else
758 goto fail;
759 }
760 return u;
761
762put_fail:
421748ec 763 path_put(&path);
1da177e4 764fail:
e27dfcea 765 *error = err;
1da177e4
LT
766 return NULL;
767}
768
769
770static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
771{
772 struct sock *sk = sock->sk;
3b1e0a65 773 struct net *net = sock_net(sk);
1da177e4 774 struct unix_sock *u = unix_sk(sk);
e27dfcea 775 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
6eba6a37 776 struct dentry *dentry = NULL;
1da177e4
LT
777 struct nameidata nd;
778 int err;
779 unsigned hash;
780 struct unix_address *addr;
781 struct hlist_head *list;
782
783 err = -EINVAL;
784 if (sunaddr->sun_family != AF_UNIX)
785 goto out;
786
e27dfcea 787 if (addr_len == sizeof(short)) {
1da177e4
LT
788 err = unix_autobind(sock);
789 goto out;
790 }
791
792 err = unix_mkname(sunaddr, addr_len, &hash);
793 if (err < 0)
794 goto out;
795 addr_len = err;
796
57b47a53 797 mutex_lock(&u->readlock);
1da177e4
LT
798
799 err = -EINVAL;
800 if (u->addr)
801 goto out_up;
802
803 err = -ENOMEM;
804 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
805 if (!addr)
806 goto out_up;
807
808 memcpy(addr->name, sunaddr, addr_len);
809 addr->len = addr_len;
810 addr->hash = hash ^ sk->sk_type;
811 atomic_set(&addr->refcnt, 1);
812
813 if (sunaddr->sun_path[0]) {
814 unsigned int mode;
815 err = 0;
816 /*
817 * Get the parent directory, calculate the hash for last
818 * component.
819 */
820 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
821 if (err)
822 goto out_mknod_parent;
f81a0bff
CH
823
824 dentry = lookup_create(&nd, 0);
1da177e4
LT
825 err = PTR_ERR(dentry);
826 if (IS_ERR(dentry))
827 goto out_mknod_unlock;
f81a0bff 828
1da177e4
LT
829 /*
830 * All right, let's create it.
831 */
832 mode = S_IFSOCK |
833 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
463c3197
DH
834 err = mnt_want_write(nd.path.mnt);
835 if (err)
836 goto out_mknod_dput;
4ac91378 837 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
463c3197 838 mnt_drop_write(nd.path.mnt);
1da177e4
LT
839 if (err)
840 goto out_mknod_dput;
4ac91378
JB
841 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
842 dput(nd.path.dentry);
843 nd.path.dentry = dentry;
1da177e4
LT
844
845 addr->hash = UNIX_HASH_SIZE;
846 }
847
fbe9cc4a 848 spin_lock(&unix_table_lock);
1da177e4
LT
849
850 if (!sunaddr->sun_path[0]) {
851 err = -EADDRINUSE;
097e66c5 852 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1da177e4
LT
853 sk->sk_type, hash)) {
854 unix_release_addr(addr);
855 goto out_unlock;
856 }
857
858 list = &unix_socket_table[addr->hash];
859 } else {
860 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
4ac91378
JB
861 u->dentry = nd.path.dentry;
862 u->mnt = nd.path.mnt;
1da177e4
LT
863 }
864
865 err = 0;
866 __unix_remove_socket(sk);
867 u->addr = addr;
868 __unix_insert_socket(list, sk);
869
870out_unlock:
fbe9cc4a 871 spin_unlock(&unix_table_lock);
1da177e4 872out_up:
57b47a53 873 mutex_unlock(&u->readlock);
1da177e4
LT
874out:
875 return err;
876
877out_mknod_dput:
878 dput(dentry);
879out_mknod_unlock:
4ac91378 880 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
1d957f9b 881 path_put(&nd.path);
1da177e4 882out_mknod_parent:
e27dfcea
JK
883 if (err == -EEXIST)
884 err = -EADDRINUSE;
1da177e4
LT
885 unix_release_addr(addr);
886 goto out_up;
887}
888
278a3de5
DM
889static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
890{
891 if (unlikely(sk1 == sk2) || !sk2) {
892 unix_state_lock(sk1);
893 return;
894 }
895 if (sk1 < sk2) {
896 unix_state_lock(sk1);
897 unix_state_lock_nested(sk2);
898 } else {
899 unix_state_lock(sk2);
900 unix_state_lock_nested(sk1);
901 }
902}
903
904static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
905{
906 if (unlikely(sk1 == sk2) || !sk2) {
907 unix_state_unlock(sk1);
908 return;
909 }
910 unix_state_unlock(sk1);
911 unix_state_unlock(sk2);
912}
913
1da177e4
LT
914static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
915 int alen, int flags)
916{
917 struct sock *sk = sock->sk;
3b1e0a65 918 struct net *net = sock_net(sk);
e27dfcea 919 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1da177e4
LT
920 struct sock *other;
921 unsigned hash;
922 int err;
923
924 if (addr->sa_family != AF_UNSPEC) {
925 err = unix_mkname(sunaddr, alen, &hash);
926 if (err < 0)
927 goto out;
928 alen = err;
929
930 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
931 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
932 goto out;
933
278a3de5 934restart:
e27dfcea 935 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1da177e4
LT
936 if (!other)
937 goto out;
938
278a3de5
DM
939 unix_state_double_lock(sk, other);
940
941 /* Apparently VFS overslept socket death. Retry. */
942 if (sock_flag(other, SOCK_DEAD)) {
943 unix_state_double_unlock(sk, other);
944 sock_put(other);
945 goto restart;
946 }
1da177e4
LT
947
948 err = -EPERM;
949 if (!unix_may_send(sk, other))
950 goto out_unlock;
951
952 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
953 if (err)
954 goto out_unlock;
955
956 } else {
957 /*
958 * 1003.1g breaking connected state with AF_UNSPEC
959 */
960 other = NULL;
278a3de5 961 unix_state_double_lock(sk, other);
1da177e4
LT
962 }
963
964 /*
965 * If it was connected, reconnect.
966 */
967 if (unix_peer(sk)) {
968 struct sock *old_peer = unix_peer(sk);
e27dfcea 969 unix_peer(sk) = other;
278a3de5 970 unix_state_double_unlock(sk, other);
1da177e4
LT
971
972 if (other != old_peer)
973 unix_dgram_disconnected(sk, old_peer);
974 sock_put(old_peer);
975 } else {
e27dfcea 976 unix_peer(sk) = other;
278a3de5 977 unix_state_double_unlock(sk, other);
1da177e4 978 }
ac7bfa62 979 return 0;
1da177e4
LT
980
981out_unlock:
278a3de5 982 unix_state_double_unlock(sk, other);
1da177e4
LT
983 sock_put(other);
984out:
985 return err;
986}
987
988static long unix_wait_for_peer(struct sock *other, long timeo)
989{
990 struct unix_sock *u = unix_sk(other);
991 int sched;
992 DEFINE_WAIT(wait);
993
994 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
995
996 sched = !sock_flag(other, SOCK_DEAD) &&
997 !(other->sk_shutdown & RCV_SHUTDOWN) &&
3c73419c 998 unix_recvq_full(other);
1da177e4 999
1c92b4e5 1000 unix_state_unlock(other);
1da177e4
LT
1001
1002 if (sched)
1003 timeo = schedule_timeout(timeo);
1004
1005 finish_wait(&u->peer_wait, &wait);
1006 return timeo;
1007}
1008
1009static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1010 int addr_len, int flags)
1011{
e27dfcea 1012 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1da177e4 1013 struct sock *sk = sock->sk;
3b1e0a65 1014 struct net *net = sock_net(sk);
1da177e4
LT
1015 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1016 struct sock *newsk = NULL;
1017 struct sock *other = NULL;
1018 struct sk_buff *skb = NULL;
1019 unsigned hash;
1020 int st;
1021 int err;
1022 long timeo;
1023
1024 err = unix_mkname(sunaddr, addr_len, &hash);
1025 if (err < 0)
1026 goto out;
1027 addr_len = err;
1028
1029 if (test_bit(SOCK_PASSCRED, &sock->flags)
1030 && !u->addr && (err = unix_autobind(sock)) != 0)
1031 goto out;
1032
1033 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1034
1035 /* First of all allocate resources.
1036 If we will make it after state is locked,
1037 we will have to recheck all again in any case.
1038 */
1039
1040 err = -ENOMEM;
1041
1042 /* create new sock for complete connection */
3b1e0a65 1043 newsk = unix_create1(sock_net(sk), NULL);
1da177e4
LT
1044 if (newsk == NULL)
1045 goto out;
1046
1047 /* Allocate skb for sending to listening sock */
1048 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1049 if (skb == NULL)
1050 goto out;
1051
1052restart:
1053 /* Find listening sock. */
097e66c5 1054 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1da177e4
LT
1055 if (!other)
1056 goto out;
1057
1058 /* Latch state of peer */
1c92b4e5 1059 unix_state_lock(other);
1da177e4
LT
1060
1061 /* Apparently VFS overslept socket death. Retry. */
1062 if (sock_flag(other, SOCK_DEAD)) {
1c92b4e5 1063 unix_state_unlock(other);
1da177e4
LT
1064 sock_put(other);
1065 goto restart;
1066 }
1067
1068 err = -ECONNREFUSED;
1069 if (other->sk_state != TCP_LISTEN)
1070 goto out_unlock;
1071
3c73419c 1072 if (unix_recvq_full(other)) {
1da177e4
LT
1073 err = -EAGAIN;
1074 if (!timeo)
1075 goto out_unlock;
1076
1077 timeo = unix_wait_for_peer(other, timeo);
1078
1079 err = sock_intr_errno(timeo);
1080 if (signal_pending(current))
1081 goto out;
1082 sock_put(other);
1083 goto restart;
ac7bfa62 1084 }
1da177e4
LT
1085
1086 /* Latch our state.
1087
1088 It is tricky place. We need to grab write lock and cannot
1089 drop lock on peer. It is dangerous because deadlock is
1090 possible. Connect to self case and simultaneous
1091 attempt to connect are eliminated by checking socket
1092 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1093 check this before attempt to grab lock.
1094
1095 Well, and we have to recheck the state after socket locked.
1096 */
1097 st = sk->sk_state;
1098
1099 switch (st) {
1100 case TCP_CLOSE:
1101 /* This is ok... continue with connect */
1102 break;
1103 case TCP_ESTABLISHED:
1104 /* Socket is already connected */
1105 err = -EISCONN;
1106 goto out_unlock;
1107 default:
1108 err = -EINVAL;
1109 goto out_unlock;
1110 }
1111
1c92b4e5 1112 unix_state_lock_nested(sk);
1da177e4
LT
1113
1114 if (sk->sk_state != st) {
1c92b4e5
DM
1115 unix_state_unlock(sk);
1116 unix_state_unlock(other);
1da177e4
LT
1117 sock_put(other);
1118 goto restart;
1119 }
1120
1121 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1122 if (err) {
1c92b4e5 1123 unix_state_unlock(sk);
1da177e4
LT
1124 goto out_unlock;
1125 }
1126
1127 /* The way is open! Fastly set all the necessary fields... */
1128
1129 sock_hold(sk);
1130 unix_peer(newsk) = sk;
1131 newsk->sk_state = TCP_ESTABLISHED;
1132 newsk->sk_type = sk->sk_type;
b488893a 1133 newsk->sk_peercred.pid = task_tgid_vnr(current);
1da177e4
LT
1134 newsk->sk_peercred.uid = current->euid;
1135 newsk->sk_peercred.gid = current->egid;
1136 newu = unix_sk(newsk);
1137 newsk->sk_sleep = &newu->peer_wait;
1138 otheru = unix_sk(other);
1139
1140 /* copy address information from listening to new sock*/
1141 if (otheru->addr) {
1142 atomic_inc(&otheru->addr->refcnt);
1143 newu->addr = otheru->addr;
1144 }
1145 if (otheru->dentry) {
1146 newu->dentry = dget(otheru->dentry);
1147 newu->mnt = mntget(otheru->mnt);
1148 }
1149
1150 /* Set credentials */
1151 sk->sk_peercred = other->sk_peercred;
1152
1da177e4
LT
1153 sock->state = SS_CONNECTED;
1154 sk->sk_state = TCP_ESTABLISHED;
830a1e5c
BL
1155 sock_hold(newsk);
1156
1157 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1158 unix_peer(sk) = newsk;
1da177e4 1159
1c92b4e5 1160 unix_state_unlock(sk);
1da177e4
LT
1161
1162 /* take ten and and send info to listening sock */
1163 spin_lock(&other->sk_receive_queue.lock);
1164 __skb_queue_tail(&other->sk_receive_queue, skb);
1da177e4 1165 spin_unlock(&other->sk_receive_queue.lock);
1c92b4e5 1166 unix_state_unlock(other);
1da177e4
LT
1167 other->sk_data_ready(other, 0);
1168 sock_put(other);
1169 return 0;
1170
1171out_unlock:
1172 if (other)
1c92b4e5 1173 unix_state_unlock(other);
1da177e4
LT
1174
1175out:
1176 if (skb)
1177 kfree_skb(skb);
1178 if (newsk)
1179 unix_release_sock(newsk, 0);
1180 if (other)
1181 sock_put(other);
1182 return err;
1183}
1184
1185static int unix_socketpair(struct socket *socka, struct socket *sockb)
1186{
e27dfcea 1187 struct sock *ska = socka->sk, *skb = sockb->sk;
1da177e4
LT
1188
1189 /* Join our sockets back to back */
1190 sock_hold(ska);
1191 sock_hold(skb);
e27dfcea
JK
1192 unix_peer(ska) = skb;
1193 unix_peer(skb) = ska;
b488893a 1194 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
1da177e4
LT
1195 ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1196 ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1197
1198 if (ska->sk_type != SOCK_DGRAM) {
1199 ska->sk_state = TCP_ESTABLISHED;
1200 skb->sk_state = TCP_ESTABLISHED;
1201 socka->state = SS_CONNECTED;
1202 sockb->state = SS_CONNECTED;
1203 }
1204 return 0;
1205}
1206
1207static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1208{
1209 struct sock *sk = sock->sk;
1210 struct sock *tsk;
1211 struct sk_buff *skb;
1212 int err;
1213
1214 err = -EOPNOTSUPP;
6eba6a37 1215 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1da177e4
LT
1216 goto out;
1217
1218 err = -EINVAL;
1219 if (sk->sk_state != TCP_LISTEN)
1220 goto out;
1221
1222 /* If socket state is TCP_LISTEN it cannot change (for now...),
1223 * so that no locks are necessary.
1224 */
1225
1226 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1227 if (!skb) {
1228 /* This means receive shutdown. */
1229 if (err == 0)
1230 err = -EINVAL;
1231 goto out;
1232 }
1233
1234 tsk = skb->sk;
1235 skb_free_datagram(sk, skb);
1236 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1237
1238 /* attach accepted sock to socket */
1c92b4e5 1239 unix_state_lock(tsk);
1da177e4
LT
1240 newsock->state = SS_CONNECTED;
1241 sock_graft(tsk, newsock);
1c92b4e5 1242 unix_state_unlock(tsk);
1da177e4
LT
1243 return 0;
1244
1245out:
1246 return err;
1247}
1248
1249
1250static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1251{
1252 struct sock *sk = sock->sk;
1253 struct unix_sock *u;
e27dfcea 1254 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1da177e4
LT
1255 int err = 0;
1256
1257 if (peer) {
1258 sk = unix_peer_get(sk);
1259
1260 err = -ENOTCONN;
1261 if (!sk)
1262 goto out;
1263 err = 0;
1264 } else {
1265 sock_hold(sk);
1266 }
1267
1268 u = unix_sk(sk);
1c92b4e5 1269 unix_state_lock(sk);
1da177e4
LT
1270 if (!u->addr) {
1271 sunaddr->sun_family = AF_UNIX;
1272 sunaddr->sun_path[0] = 0;
1273 *uaddr_len = sizeof(short);
1274 } else {
1275 struct unix_address *addr = u->addr;
1276
1277 *uaddr_len = addr->len;
1278 memcpy(sunaddr, addr->name, *uaddr_len);
1279 }
1c92b4e5 1280 unix_state_unlock(sk);
1da177e4
LT
1281 sock_put(sk);
1282out:
1283 return err;
1284}
1285
1286static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1287{
1288 int i;
1289
1290 scm->fp = UNIXCB(skb).fp;
1291 skb->destructor = sock_wfree;
1292 UNIXCB(skb).fp = NULL;
1293
6eba6a37 1294 for (i = scm->fp->count-1; i >= 0; i--)
1da177e4
LT
1295 unix_notinflight(scm->fp->fp[i]);
1296}
1297
1298static void unix_destruct_fds(struct sk_buff *skb)
1299{
1300 struct scm_cookie scm;
1301 memset(&scm, 0, sizeof(scm));
1302 unix_detach_fds(&scm, skb);
1303
1304 /* Alas, it calls VFS */
1305 /* So fscking what? fput() had been SMP-safe since the last Summer */
1306 scm_destroy(&scm);
1307 sock_wfree(skb);
1308}
1309
6209344f 1310static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1da177e4
LT
1311{
1312 int i;
6209344f
MS
1313
1314 /*
1315 * Need to duplicate file references for the sake of garbage
1316 * collection. Otherwise a socket in the fps might become a
1317 * candidate for GC while the skb is not yet queued.
1318 */
1319 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1320 if (!UNIXCB(skb).fp)
1321 return -ENOMEM;
1322
6eba6a37 1323 for (i = scm->fp->count-1; i >= 0; i--)
1da177e4 1324 unix_inflight(scm->fp->fp[i]);
1da177e4 1325 skb->destructor = unix_destruct_fds;
6209344f 1326 return 0;
1da177e4
LT
1327}
1328
1329/*
1330 * Send AF_UNIX data.
1331 */
1332
1333static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1334 struct msghdr *msg, size_t len)
1335{
1336 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1337 struct sock *sk = sock->sk;
3b1e0a65 1338 struct net *net = sock_net(sk);
1da177e4 1339 struct unix_sock *u = unix_sk(sk);
e27dfcea 1340 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1341 struct sock *other = NULL;
1342 int namelen = 0; /* fake GCC */
1343 int err;
1344 unsigned hash;
1345 struct sk_buff *skb;
1346 long timeo;
1347 struct scm_cookie tmp_scm;
1348
1349 if (NULL == siocb->scm)
1350 siocb->scm = &tmp_scm;
1351 err = scm_send(sock, msg, siocb->scm);
1352 if (err < 0)
1353 return err;
1354
1355 err = -EOPNOTSUPP;
1356 if (msg->msg_flags&MSG_OOB)
1357 goto out;
1358
1359 if (msg->msg_namelen) {
1360 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1361 if (err < 0)
1362 goto out;
1363 namelen = err;
1364 } else {
1365 sunaddr = NULL;
1366 err = -ENOTCONN;
1367 other = unix_peer_get(sk);
1368 if (!other)
1369 goto out;
1370 }
1371
1372 if (test_bit(SOCK_PASSCRED, &sock->flags)
1373 && !u->addr && (err = unix_autobind(sock)) != 0)
1374 goto out;
1375
1376 err = -EMSGSIZE;
1377 if (len > sk->sk_sndbuf - 32)
1378 goto out;
1379
1380 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
e27dfcea 1381 if (skb == NULL)
1da177e4
LT
1382 goto out;
1383
1384 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
6209344f
MS
1385 if (siocb->scm->fp) {
1386 err = unix_attach_fds(siocb->scm, skb);
1387 if (err)
1388 goto out_free;
1389 }
dc49c1f9 1390 unix_get_secdata(siocb->scm, skb);
877ce7c1 1391
badff6d0 1392 skb_reset_transport_header(skb);
6eba6a37 1393 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1da177e4
LT
1394 if (err)
1395 goto out_free;
1396
1397 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1398
1399restart:
1400 if (!other) {
1401 err = -ECONNRESET;
1402 if (sunaddr == NULL)
1403 goto out_free;
1404
097e66c5 1405 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1da177e4 1406 hash, &err);
e27dfcea 1407 if (other == NULL)
1da177e4
LT
1408 goto out_free;
1409 }
1410
1c92b4e5 1411 unix_state_lock(other);
1da177e4
LT
1412 err = -EPERM;
1413 if (!unix_may_send(sk, other))
1414 goto out_unlock;
1415
1416 if (sock_flag(other, SOCK_DEAD)) {
1417 /*
1418 * Check with 1003.1g - what should
1419 * datagram error
1420 */
1c92b4e5 1421 unix_state_unlock(other);
1da177e4
LT
1422 sock_put(other);
1423
1424 err = 0;
1c92b4e5 1425 unix_state_lock(sk);
1da177e4 1426 if (unix_peer(sk) == other) {
e27dfcea 1427 unix_peer(sk) = NULL;
1c92b4e5 1428 unix_state_unlock(sk);
1da177e4
LT
1429
1430 unix_dgram_disconnected(sk, other);
1431 sock_put(other);
1432 err = -ECONNREFUSED;
1433 } else {
1c92b4e5 1434 unix_state_unlock(sk);
1da177e4
LT
1435 }
1436
1437 other = NULL;
1438 if (err)
1439 goto out_free;
1440 goto restart;
1441 }
1442
1443 err = -EPIPE;
1444 if (other->sk_shutdown & RCV_SHUTDOWN)
1445 goto out_unlock;
1446
1447 if (sk->sk_type != SOCK_SEQPACKET) {
1448 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1449 if (err)
1450 goto out_unlock;
1451 }
1452
3c73419c 1453 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1da177e4
LT
1454 if (!timeo) {
1455 err = -EAGAIN;
1456 goto out_unlock;
1457 }
1458
1459 timeo = unix_wait_for_peer(other, timeo);
1460
1461 err = sock_intr_errno(timeo);
1462 if (signal_pending(current))
1463 goto out_free;
1464
1465 goto restart;
1466 }
1467
1468 skb_queue_tail(&other->sk_receive_queue, skb);
1c92b4e5 1469 unix_state_unlock(other);
1da177e4
LT
1470 other->sk_data_ready(other, len);
1471 sock_put(other);
1472 scm_destroy(siocb->scm);
1473 return len;
1474
1475out_unlock:
1c92b4e5 1476 unix_state_unlock(other);
1da177e4
LT
1477out_free:
1478 kfree_skb(skb);
1479out:
1480 if (other)
1481 sock_put(other);
1482 scm_destroy(siocb->scm);
1483 return err;
1484}
1485
ac7bfa62 1486
1da177e4
LT
1487static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1488 struct msghdr *msg, size_t len)
1489{
1490 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1491 struct sock *sk = sock->sk;
1492 struct sock *other = NULL;
e27dfcea 1493 struct sockaddr_un *sunaddr = msg->msg_name;
6eba6a37 1494 int err, size;
1da177e4 1495 struct sk_buff *skb;
e27dfcea 1496 int sent = 0;
1da177e4
LT
1497 struct scm_cookie tmp_scm;
1498
1499 if (NULL == siocb->scm)
1500 siocb->scm = &tmp_scm;
1501 err = scm_send(sock, msg, siocb->scm);
1502 if (err < 0)
1503 return err;
1504
1505 err = -EOPNOTSUPP;
1506 if (msg->msg_flags&MSG_OOB)
1507 goto out_err;
1508
1509 if (msg->msg_namelen) {
1510 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1511 goto out_err;
1512 } else {
1513 sunaddr = NULL;
1514 err = -ENOTCONN;
830a1e5c 1515 other = unix_peer(sk);
1da177e4
LT
1516 if (!other)
1517 goto out_err;
1518 }
1519
1520 if (sk->sk_shutdown & SEND_SHUTDOWN)
1521 goto pipe_err;
1522
6eba6a37 1523 while (sent < len) {
1da177e4 1524 /*
e9df7d7f
BL
1525 * Optimisation for the fact that under 0.01% of X
1526 * messages typically need breaking up.
1da177e4
LT
1527 */
1528
e9df7d7f 1529 size = len-sent;
1da177e4
LT
1530
1531 /* Keep two messages in the pipe so it schedules better */
e9df7d7f
BL
1532 if (size > ((sk->sk_sndbuf >> 1) - 64))
1533 size = (sk->sk_sndbuf >> 1) - 64;
1da177e4
LT
1534
1535 if (size > SKB_MAX_ALLOC)
1536 size = SKB_MAX_ALLOC;
ac7bfa62 1537
1da177e4
LT
1538 /*
1539 * Grab a buffer
1540 */
ac7bfa62 1541
6eba6a37
ED
1542 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1543 &err);
1da177e4 1544
e27dfcea 1545 if (skb == NULL)
1da177e4
LT
1546 goto out_err;
1547
1548 /*
1549 * If you pass two values to the sock_alloc_send_skb
1550 * it tries to grab the large buffer with GFP_NOFS
1551 * (which can fail easily), and if it fails grab the
1552 * fallback size buffer which is under a page and will
1553 * succeed. [Alan]
1554 */
1555 size = min_t(int, size, skb_tailroom(skb));
1556
1557 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
6209344f
MS
1558 if (siocb->scm->fp) {
1559 err = unix_attach_fds(siocb->scm, skb);
1560 if (err) {
1561 kfree_skb(skb);
1562 goto out_err;
1563 }
1564 }
1da177e4 1565
6eba6a37
ED
1566 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1567 if (err) {
1da177e4
LT
1568 kfree_skb(skb);
1569 goto out_err;
1570 }
1571
1c92b4e5 1572 unix_state_lock(other);
1da177e4
LT
1573
1574 if (sock_flag(other, SOCK_DEAD) ||
1575 (other->sk_shutdown & RCV_SHUTDOWN))
1576 goto pipe_err_free;
1577
1578 skb_queue_tail(&other->sk_receive_queue, skb);
1c92b4e5 1579 unix_state_unlock(other);
1da177e4 1580 other->sk_data_ready(other, size);
e27dfcea 1581 sent += size;
1da177e4 1582 }
1da177e4
LT
1583
1584 scm_destroy(siocb->scm);
1585 siocb->scm = NULL;
1586
1587 return sent;
1588
1589pipe_err_free:
1c92b4e5 1590 unix_state_unlock(other);
1da177e4
LT
1591 kfree_skb(skb);
1592pipe_err:
6eba6a37
ED
1593 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1594 send_sig(SIGPIPE, current, 0);
1da177e4
LT
1595 err = -EPIPE;
1596out_err:
1da177e4
LT
1597 scm_destroy(siocb->scm);
1598 siocb->scm = NULL;
1599 return sent ? : err;
1600}
1601
1602static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1603 struct msghdr *msg, size_t len)
1604{
1605 int err;
1606 struct sock *sk = sock->sk;
ac7bfa62 1607
1da177e4
LT
1608 err = sock_error(sk);
1609 if (err)
1610 return err;
1611
1612 if (sk->sk_state != TCP_ESTABLISHED)
1613 return -ENOTCONN;
1614
1615 if (msg->msg_namelen)
1616 msg->msg_namelen = 0;
1617
1618 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1619}
ac7bfa62 1620
1da177e4
LT
1621static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1622{
1623 struct unix_sock *u = unix_sk(sk);
1624
1625 msg->msg_namelen = 0;
1626 if (u->addr) {
1627 msg->msg_namelen = u->addr->len;
1628 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1629 }
1630}
1631
1632static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1633 struct msghdr *msg, size_t size,
1634 int flags)
1635{
1636 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1637 struct scm_cookie tmp_scm;
1638 struct sock *sk = sock->sk;
1639 struct unix_sock *u = unix_sk(sk);
1640 int noblock = flags & MSG_DONTWAIT;
1641 struct sk_buff *skb;
1642 int err;
1643
1644 err = -EOPNOTSUPP;
1645 if (flags&MSG_OOB)
1646 goto out;
1647
1648 msg->msg_namelen = 0;
1649
57b47a53 1650 mutex_lock(&u->readlock);
1da177e4
LT
1651
1652 skb = skb_recv_datagram(sk, flags, noblock, &err);
0a112258
FZ
1653 if (!skb) {
1654 unix_state_lock(sk);
1655 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1656 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1657 (sk->sk_shutdown & RCV_SHUTDOWN))
1658 err = 0;
1659 unix_state_unlock(sk);
1da177e4 1660 goto out_unlock;
0a112258 1661 }
1da177e4 1662
71e20f18 1663 wake_up_interruptible_sync(&u->peer_wait);
1da177e4
LT
1664
1665 if (msg->msg_name)
1666 unix_copy_addr(msg, skb->sk);
1667
1668 if (size > skb->len)
1669 size = skb->len;
1670 else if (size < skb->len)
1671 msg->msg_flags |= MSG_TRUNC;
1672
1673 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1674 if (err)
1675 goto out_free;
1676
1677 if (!siocb->scm) {
1678 siocb->scm = &tmp_scm;
1679 memset(&tmp_scm, 0, sizeof(tmp_scm));
1680 }
1681 siocb->scm->creds = *UNIXCREDS(skb);
877ce7c1 1682 unix_set_secdata(siocb->scm, skb);
1da177e4 1683
6eba6a37 1684 if (!(flags & MSG_PEEK)) {
1da177e4
LT
1685 if (UNIXCB(skb).fp)
1686 unix_detach_fds(siocb->scm, skb);
6eba6a37 1687 } else {
1da177e4
LT
1688 /* It is questionable: on PEEK we could:
1689 - do not return fds - good, but too simple 8)
1690 - return fds, and do not return them on read (old strategy,
1691 apparently wrong)
1692 - clone fds (I chose it for now, it is the most universal
1693 solution)
ac7bfa62
YH
1694
1695 POSIX 1003.1g does not actually define this clearly
1696 at all. POSIX 1003.1g doesn't define a lot of things
1697 clearly however!
1698
1da177e4
LT
1699 */
1700 if (UNIXCB(skb).fp)
1701 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1702 }
1703 err = size;
1704
1705 scm_recv(sock, msg, siocb->scm, flags);
1706
1707out_free:
6eba6a37 1708 skb_free_datagram(sk, skb);
1da177e4 1709out_unlock:
57b47a53 1710 mutex_unlock(&u->readlock);
1da177e4
LT
1711out:
1712 return err;
1713}
1714
1715/*
1716 * Sleep until data has arrive. But check for races..
1717 */
ac7bfa62 1718
6eba6a37 1719static long unix_stream_data_wait(struct sock *sk, long timeo)
1da177e4
LT
1720{
1721 DEFINE_WAIT(wait);
1722
1c92b4e5 1723 unix_state_lock(sk);
1da177e4
LT
1724
1725 for (;;) {
1726 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1727
b03efcfb 1728 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1da177e4
LT
1729 sk->sk_err ||
1730 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1731 signal_pending(current) ||
1732 !timeo)
1733 break;
1734
1735 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1c92b4e5 1736 unix_state_unlock(sk);
1da177e4 1737 timeo = schedule_timeout(timeo);
1c92b4e5 1738 unix_state_lock(sk);
1da177e4
LT
1739 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1740 }
1741
1742 finish_wait(sk->sk_sleep, &wait);
1c92b4e5 1743 unix_state_unlock(sk);
1da177e4
LT
1744 return timeo;
1745}
1746
1747
1748
1749static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1750 struct msghdr *msg, size_t size,
1751 int flags)
1752{
1753 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1754 struct scm_cookie tmp_scm;
1755 struct sock *sk = sock->sk;
1756 struct unix_sock *u = unix_sk(sk);
e27dfcea 1757 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1758 int copied = 0;
1759 int check_creds = 0;
1760 int target;
1761 int err = 0;
1762 long timeo;
1763
1764 err = -EINVAL;
1765 if (sk->sk_state != TCP_ESTABLISHED)
1766 goto out;
1767
1768 err = -EOPNOTSUPP;
1769 if (flags&MSG_OOB)
1770 goto out;
1771
1772 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1773 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1774
1775 msg->msg_namelen = 0;
1776
1777 /* Lock the socket to prevent queue disordering
1778 * while sleeps in memcpy_tomsg
1779 */
1780
1781 if (!siocb->scm) {
1782 siocb->scm = &tmp_scm;
1783 memset(&tmp_scm, 0, sizeof(tmp_scm));
1784 }
1785
57b47a53 1786 mutex_lock(&u->readlock);
1da177e4 1787
6eba6a37 1788 do {
1da177e4
LT
1789 int chunk;
1790 struct sk_buff *skb;
1791
3c0d2f37 1792 unix_state_lock(sk);
1da177e4 1793 skb = skb_dequeue(&sk->sk_receive_queue);
6eba6a37 1794 if (skb == NULL) {
1da177e4 1795 if (copied >= target)
3c0d2f37 1796 goto unlock;
1da177e4
LT
1797
1798 /*
1799 * POSIX 1003.1g mandates this order.
1800 */
ac7bfa62 1801
6eba6a37
ED
1802 err = sock_error(sk);
1803 if (err)
3c0d2f37 1804 goto unlock;
1da177e4 1805 if (sk->sk_shutdown & RCV_SHUTDOWN)
3c0d2f37
MS
1806 goto unlock;
1807
1808 unix_state_unlock(sk);
1da177e4
LT
1809 err = -EAGAIN;
1810 if (!timeo)
1811 break;
57b47a53 1812 mutex_unlock(&u->readlock);
1da177e4
LT
1813
1814 timeo = unix_stream_data_wait(sk, timeo);
1815
1816 if (signal_pending(current)) {
1817 err = sock_intr_errno(timeo);
1818 goto out;
1819 }
57b47a53 1820 mutex_lock(&u->readlock);
1da177e4 1821 continue;
3c0d2f37
MS
1822 unlock:
1823 unix_state_unlock(sk);
1824 break;
1da177e4 1825 }
3c0d2f37 1826 unix_state_unlock(sk);
1da177e4
LT
1827
1828 if (check_creds) {
1829 /* Never glue messages from different writers */
6eba6a37
ED
1830 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
1831 sizeof(siocb->scm->creds)) != 0) {
1da177e4
LT
1832 skb_queue_head(&sk->sk_receive_queue, skb);
1833 break;
1834 }
1835 } else {
1836 /* Copy credentials */
1837 siocb->scm->creds = *UNIXCREDS(skb);
1838 check_creds = 1;
1839 }
1840
1841 /* Copy address just once */
6eba6a37 1842 if (sunaddr) {
1da177e4
LT
1843 unix_copy_addr(msg, skb->sk);
1844 sunaddr = NULL;
1845 }
1846
1847 chunk = min_t(unsigned int, skb->len, size);
1848 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1849 skb_queue_head(&sk->sk_receive_queue, skb);
1850 if (copied == 0)
1851 copied = -EFAULT;
1852 break;
1853 }
1854 copied += chunk;
1855 size -= chunk;
1856
1857 /* Mark read part of skb as used */
6eba6a37 1858 if (!(flags & MSG_PEEK)) {
1da177e4
LT
1859 skb_pull(skb, chunk);
1860
1861 if (UNIXCB(skb).fp)
1862 unix_detach_fds(siocb->scm, skb);
1863
1864 /* put the skb back if we didn't use it up.. */
6eba6a37 1865 if (skb->len) {
1da177e4
LT
1866 skb_queue_head(&sk->sk_receive_queue, skb);
1867 break;
1868 }
1869
1870 kfree_skb(skb);
1871
1872 if (siocb->scm->fp)
1873 break;
6eba6a37 1874 } else {
1da177e4
LT
1875 /* It is questionable, see note in unix_dgram_recvmsg.
1876 */
1877 if (UNIXCB(skb).fp)
1878 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1879
1880 /* put message back and return */
1881 skb_queue_head(&sk->sk_receive_queue, skb);
1882 break;
1883 }
1884 } while (size);
1885
57b47a53 1886 mutex_unlock(&u->readlock);
1da177e4
LT
1887 scm_recv(sock, msg, siocb->scm, flags);
1888out:
1889 return copied ? : err;
1890}
1891
1892static int unix_shutdown(struct socket *sock, int mode)
1893{
1894 struct sock *sk = sock->sk;
1895 struct sock *other;
1896
1897 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1898
1899 if (mode) {
1c92b4e5 1900 unix_state_lock(sk);
1da177e4 1901 sk->sk_shutdown |= mode;
e27dfcea 1902 other = unix_peer(sk);
1da177e4
LT
1903 if (other)
1904 sock_hold(other);
1c92b4e5 1905 unix_state_unlock(sk);
1da177e4
LT
1906 sk->sk_state_change(sk);
1907
1908 if (other &&
1909 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1910
1911 int peer_mode = 0;
1912
1913 if (mode&RCV_SHUTDOWN)
1914 peer_mode |= SEND_SHUTDOWN;
1915 if (mode&SEND_SHUTDOWN)
1916 peer_mode |= RCV_SHUTDOWN;
1c92b4e5 1917 unix_state_lock(other);
1da177e4 1918 other->sk_shutdown |= peer_mode;
1c92b4e5 1919 unix_state_unlock(other);
1da177e4
LT
1920 other->sk_state_change(other);
1921 read_lock(&other->sk_callback_lock);
1922 if (peer_mode == SHUTDOWN_MASK)
8d8ad9d7 1923 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4 1924 else if (peer_mode & RCV_SHUTDOWN)
8d8ad9d7 1925 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1da177e4
LT
1926 read_unlock(&other->sk_callback_lock);
1927 }
1928 if (other)
1929 sock_put(other);
1930 }
1931 return 0;
1932}
1933
1934static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1935{
1936 struct sock *sk = sock->sk;
e27dfcea 1937 long amount = 0;
1da177e4
LT
1938 int err;
1939
6eba6a37
ED
1940 switch (cmd) {
1941 case SIOCOUTQ:
1942 amount = atomic_read(&sk->sk_wmem_alloc);
1943 err = put_user(amount, (int __user *)arg);
1944 break;
1945 case SIOCINQ:
1da177e4
LT
1946 {
1947 struct sk_buff *skb;
1948
1949 if (sk->sk_state == TCP_LISTEN) {
1950 err = -EINVAL;
1951 break;
1952 }
1953
1954 spin_lock(&sk->sk_receive_queue.lock);
1955 if (sk->sk_type == SOCK_STREAM ||
1956 sk->sk_type == SOCK_SEQPACKET) {
1957 skb_queue_walk(&sk->sk_receive_queue, skb)
1958 amount += skb->len;
1959 } else {
1960 skb = skb_peek(&sk->sk_receive_queue);
1961 if (skb)
e27dfcea 1962 amount = skb->len;
1da177e4
LT
1963 }
1964 spin_unlock(&sk->sk_receive_queue.lock);
1965 err = put_user(amount, (int __user *)arg);
1966 break;
1967 }
1968
6eba6a37
ED
1969 default:
1970 err = -ENOIOCTLCMD;
1971 break;
1da177e4
LT
1972 }
1973 return err;
1974}
1975
6eba6a37 1976static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
1da177e4
LT
1977{
1978 struct sock *sk = sock->sk;
1979 unsigned int mask;
1980
1981 poll_wait(file, sk->sk_sleep, wait);
1982 mask = 0;
1983
1984 /* exceptional events? */
1985 if (sk->sk_err)
1986 mask |= POLLERR;
1987 if (sk->sk_shutdown == SHUTDOWN_MASK)
1988 mask |= POLLHUP;
f348d70a
DL
1989 if (sk->sk_shutdown & RCV_SHUTDOWN)
1990 mask |= POLLRDHUP;
1da177e4
LT
1991
1992 /* readable? */
1993 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1994 (sk->sk_shutdown & RCV_SHUTDOWN))
1995 mask |= POLLIN | POLLRDNORM;
1996
1997 /* Connection-based need to check for termination and startup */
6eba6a37
ED
1998 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
1999 sk->sk_state == TCP_CLOSE)
1da177e4
LT
2000 mask |= POLLHUP;
2001
2002 /*
2003 * we set writable also when the other side has shut down the
2004 * connection. This prevents stuck sockets.
2005 */
2006 if (unix_writable(sk))
2007 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2008
2009 return mask;
2010}
2011
ec0d215f
RW
2012static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2013 poll_table *wait)
3c73419c 2014{
ec0d215f
RW
2015 struct sock *sk = sock->sk, *other;
2016 unsigned int mask, writable;
3c73419c
RW
2017
2018 poll_wait(file, sk->sk_sleep, wait);
3c73419c
RW
2019 mask = 0;
2020
2021 /* exceptional events? */
2022 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2023 mask |= POLLERR;
2024 if (sk->sk_shutdown & RCV_SHUTDOWN)
2025 mask |= POLLRDHUP;
2026 if (sk->sk_shutdown == SHUTDOWN_MASK)
2027 mask |= POLLHUP;
2028
2029 /* readable? */
2030 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2031 (sk->sk_shutdown & RCV_SHUTDOWN))
2032 mask |= POLLIN | POLLRDNORM;
2033
2034 /* Connection-based need to check for termination and startup */
2035 if (sk->sk_type == SOCK_SEQPACKET) {
2036 if (sk->sk_state == TCP_CLOSE)
2037 mask |= POLLHUP;
2038 /* connection hasn't started yet? */
2039 if (sk->sk_state == TCP_SYN_SENT)
2040 return mask;
2041 }
2042
2043 /* writable? */
ec0d215f
RW
2044 writable = unix_writable(sk);
2045 if (writable) {
2046 other = unix_peer_get(sk);
2047 if (other) {
2048 if (unix_peer(other) != sk) {
2049 poll_wait(file, &unix_sk(other)->peer_wait,
2050 wait);
2051 if (unix_recvq_full(other))
2052 writable = 0;
2053 }
2054
2055 sock_put(other);
2056 }
2057 }
2058
2059 if (writable)
3c73419c
RW
2060 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2061 else
2062 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2063
3c73419c
RW
2064 return mask;
2065}
1da177e4
LT
2066
2067#ifdef CONFIG_PROC_FS
a53eb3fe
PE
2068static struct sock *first_unix_socket(int *i)
2069{
2070 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2071 if (!hlist_empty(&unix_socket_table[*i]))
2072 return __sk_head(&unix_socket_table[*i]);
2073 }
2074 return NULL;
2075}
2076
2077static struct sock *next_unix_socket(int *i, struct sock *s)
2078{
2079 struct sock *next = sk_next(s);
2080 /* More in this chain? */
2081 if (next)
2082 return next;
2083 /* Look for next non-empty chain. */
2084 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2085 if (!hlist_empty(&unix_socket_table[*i]))
2086 return __sk_head(&unix_socket_table[*i]);
2087 }
2088 return NULL;
2089}
2090
097e66c5 2091struct unix_iter_state {
e372c414 2092 struct seq_net_private p;
097e66c5
DL
2093 int i;
2094};
e27dfcea 2095
1218854a 2096static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
1da177e4 2097{
1218854a 2098 struct unix_iter_state *iter = seq->private;
1da177e4
LT
2099 loff_t off = 0;
2100 struct sock *s;
2101
097e66c5 2102 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
1218854a 2103 if (sock_net(s) != seq_file_net(seq))
097e66c5 2104 continue;
ac7bfa62 2105 if (off == pos)
1da177e4
LT
2106 return s;
2107 ++off;
2108 }
2109 return NULL;
2110}
2111
1da177e4 2112static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2113 __acquires(unix_table_lock)
1da177e4 2114{
fbe9cc4a 2115 spin_lock(&unix_table_lock);
b9f3124f 2116 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1da177e4
LT
2117}
2118
2119static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2120{
097e66c5
DL
2121 struct unix_iter_state *iter = seq->private;
2122 struct sock *sk = v;
1da177e4
LT
2123 ++*pos;
2124
b9f3124f 2125 if (v == SEQ_START_TOKEN)
097e66c5
DL
2126 sk = first_unix_socket(&iter->i);
2127 else
2128 sk = next_unix_socket(&iter->i, sk);
1218854a 2129 while (sk && (sock_net(sk) != seq_file_net(seq)))
097e66c5
DL
2130 sk = next_unix_socket(&iter->i, sk);
2131 return sk;
1da177e4
LT
2132}
2133
2134static void unix_seq_stop(struct seq_file *seq, void *v)
9a429c49 2135 __releases(unix_table_lock)
1da177e4 2136{
fbe9cc4a 2137 spin_unlock(&unix_table_lock);
1da177e4
LT
2138}
2139
2140static int unix_seq_show(struct seq_file *seq, void *v)
2141{
ac7bfa62 2142
b9f3124f 2143 if (v == SEQ_START_TOKEN)
1da177e4
LT
2144 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2145 "Inode Path\n");
2146 else {
2147 struct sock *s = v;
2148 struct unix_sock *u = unix_sk(s);
1c92b4e5 2149 unix_state_lock(s);
1da177e4
LT
2150
2151 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2152 s,
2153 atomic_read(&s->sk_refcnt),
2154 0,
2155 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2156 s->sk_type,
2157 s->sk_socket ?
2158 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2159 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2160 sock_i_ino(s));
2161
2162 if (u->addr) {
2163 int i, len;
2164 seq_putc(seq, ' ');
2165
2166 i = 0;
2167 len = u->addr->len - sizeof(short);
2168 if (!UNIX_ABSTRACT(s))
2169 len--;
2170 else {
2171 seq_putc(seq, '@');
2172 i++;
2173 }
2174 for ( ; i < len; i++)
2175 seq_putc(seq, u->addr->name->sun_path[i]);
2176 }
1c92b4e5 2177 unix_state_unlock(s);
1da177e4
LT
2178 seq_putc(seq, '\n');
2179 }
2180
2181 return 0;
2182}
2183
56b3d975 2184static const struct seq_operations unix_seq_ops = {
1da177e4
LT
2185 .start = unix_seq_start,
2186 .next = unix_seq_next,
2187 .stop = unix_seq_stop,
2188 .show = unix_seq_show,
2189};
2190
1da177e4
LT
2191static int unix_seq_open(struct inode *inode, struct file *file)
2192{
e372c414
DL
2193 return seq_open_net(inode, file, &unix_seq_ops,
2194 sizeof(struct unix_iter_state));
1da177e4
LT
2195}
2196
da7071d7 2197static const struct file_operations unix_seq_fops = {
1da177e4
LT
2198 .owner = THIS_MODULE,
2199 .open = unix_seq_open,
2200 .read = seq_read,
2201 .llseek = seq_lseek,
e372c414 2202 .release = seq_release_net,
1da177e4
LT
2203};
2204
2205#endif
2206
2207static struct net_proto_family unix_family_ops = {
2208 .family = PF_UNIX,
2209 .create = unix_create,
2210 .owner = THIS_MODULE,
2211};
2212
097e66c5
DL
2213
2214static int unix_net_init(struct net *net)
2215{
2216 int error = -ENOMEM;
2217
a0a53c8b 2218 net->unx.sysctl_max_dgram_qlen = 10;
1597fbc0
PE
2219 if (unix_sysctl_register(net))
2220 goto out;
d392e497 2221
097e66c5 2222#ifdef CONFIG_PROC_FS
1597fbc0
PE
2223 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2224 unix_sysctl_unregister(net);
097e66c5 2225 goto out;
1597fbc0 2226 }
097e66c5
DL
2227#endif
2228 error = 0;
2229out:
48dcc33e 2230 return error;
097e66c5
DL
2231}
2232
2233static void unix_net_exit(struct net *net)
2234{
1597fbc0 2235 unix_sysctl_unregister(net);
097e66c5
DL
2236 proc_net_remove(net, "unix");
2237}
2238
2239static struct pernet_operations unix_net_ops = {
2240 .init = unix_net_init,
2241 .exit = unix_net_exit,
2242};
2243
1da177e4
LT
2244static int __init af_unix_init(void)
2245{
2246 int rc = -1;
2247 struct sk_buff *dummy_skb;
2248
ef047f5e 2249 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
1da177e4
LT
2250
2251 rc = proto_register(&unix_proto, 1);
ac7bfa62
YH
2252 if (rc != 0) {
2253 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
0dc47877 2254 __func__);
1da177e4
LT
2255 goto out;
2256 }
2257
2258 sock_register(&unix_family_ops);
097e66c5 2259 register_pernet_subsys(&unix_net_ops);
1da177e4
LT
2260out:
2261 return rc;
2262}
2263
2264static void __exit af_unix_exit(void)
2265{
2266 sock_unregister(PF_UNIX);
1da177e4 2267 proto_unregister(&unix_proto);
097e66c5 2268 unregister_pernet_subsys(&unix_net_ops);
1da177e4
LT
2269}
2270
3d366960
DW
2271/* Earlier than device_initcall() so that other drivers invoking
2272 request_module() don't end up in a loop when modprobe tries
2273 to use a UNIX socket. But later than subsys_initcall() because
2274 we depend on stuff initialised there */
2275fs_initcall(af_unix_init);
1da177e4
LT
2276module_exit(af_unix_exit);
2277
2278MODULE_LICENSE("GPL");
2279MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 0.554086 seconds and 5 git commands to generate.