2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
53 * Known differences from reference BSD that was tested:
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
121 struct hlist_head unix_socket_table
[2 * UNIX_HASH_SIZE
];
122 EXPORT_SYMBOL_GPL(unix_socket_table
);
123 DEFINE_SPINLOCK(unix_table_lock
);
124 EXPORT_SYMBOL_GPL(unix_table_lock
);
125 static atomic_long_t unix_nr_socks
;
128 static struct hlist_head
*unix_sockets_unbound(void *addr
)
130 unsigned long hash
= (unsigned long)addr
;
134 hash
%= UNIX_HASH_SIZE
;
135 return &unix_socket_table
[UNIX_HASH_SIZE
+ hash
];
138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
140 #ifdef CONFIG_SECURITY_NETWORK
141 static void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
143 UNIXCB(skb
).secid
= scm
->secid
;
146 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
148 scm
->secid
= UNIXCB(skb
).secid
;
151 static inline bool unix_secdata_eq(struct scm_cookie
*scm
, struct sk_buff
*skb
)
153 return (scm
->secid
== UNIXCB(skb
).secid
);
156 static inline void unix_get_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
159 static inline void unix_set_secdata(struct scm_cookie
*scm
, struct sk_buff
*skb
)
162 static inline bool unix_secdata_eq(struct scm_cookie
*scm
, struct sk_buff
*skb
)
166 #endif /* CONFIG_SECURITY_NETWORK */
169 * SMP locking strategy:
170 * hash table is protected with spinlock unix_table_lock
171 * each socket state is protected by separate spin lock.
174 static inline unsigned int unix_hash_fold(__wsum n
)
176 unsigned int hash
= (__force
unsigned int)csum_fold(n
);
179 return hash
&(UNIX_HASH_SIZE
-1);
182 #define unix_peer(sk) (unix_sk(sk)->peer)
184 static inline int unix_our_peer(struct sock
*sk
, struct sock
*osk
)
186 return unix_peer(osk
) == sk
;
189 static inline int unix_may_send(struct sock
*sk
, struct sock
*osk
)
191 return unix_peer(osk
) == NULL
|| unix_our_peer(sk
, osk
);
194 static inline int unix_recvq_full(struct sock
const *sk
)
196 return skb_queue_len(&sk
->sk_receive_queue
) > sk
->sk_max_ack_backlog
;
199 struct sock
*unix_peer_get(struct sock
*s
)
207 unix_state_unlock(s
);
210 EXPORT_SYMBOL_GPL(unix_peer_get
);
212 static inline void unix_release_addr(struct unix_address
*addr
)
214 if (atomic_dec_and_test(&addr
->refcnt
))
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
225 static int unix_mkname(struct sockaddr_un
*sunaddr
, int len
, unsigned int *hashp
)
227 if (len
<= sizeof(short) || len
> sizeof(*sunaddr
))
229 if (!sunaddr
|| sunaddr
->sun_family
!= AF_UNIX
)
231 if (sunaddr
->sun_path
[0]) {
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
235 * sun_path[108] doesn't as such exist. However in kernel space
236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
239 ((char *)sunaddr
)[len
] = 0;
240 len
= strlen(sunaddr
->sun_path
)+1+sizeof(short);
244 *hashp
= unix_hash_fold(csum_partial(sunaddr
, len
, 0));
248 static void __unix_remove_socket(struct sock
*sk
)
250 sk_del_node_init(sk
);
253 static void __unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
255 WARN_ON(!sk_unhashed(sk
));
256 sk_add_node(sk
, list
);
259 static inline void unix_remove_socket(struct sock
*sk
)
261 spin_lock(&unix_table_lock
);
262 __unix_remove_socket(sk
);
263 spin_unlock(&unix_table_lock
);
266 static inline void unix_insert_socket(struct hlist_head
*list
, struct sock
*sk
)
268 spin_lock(&unix_table_lock
);
269 __unix_insert_socket(list
, sk
);
270 spin_unlock(&unix_table_lock
);
273 static struct sock
*__unix_find_socket_byname(struct net
*net
,
274 struct sockaddr_un
*sunname
,
275 int len
, int type
, unsigned int hash
)
279 sk_for_each(s
, &unix_socket_table
[hash
^ type
]) {
280 struct unix_sock
*u
= unix_sk(s
);
282 if (!net_eq(sock_net(s
), net
))
285 if (u
->addr
->len
== len
&&
286 !memcmp(u
->addr
->name
, sunname
, len
))
294 static inline struct sock
*unix_find_socket_byname(struct net
*net
,
295 struct sockaddr_un
*sunname
,
301 spin_lock(&unix_table_lock
);
302 s
= __unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
305 spin_unlock(&unix_table_lock
);
309 static struct sock
*unix_find_socket_byinode(struct inode
*i
)
313 spin_lock(&unix_table_lock
);
315 &unix_socket_table
[i
->i_ino
& (UNIX_HASH_SIZE
- 1)]) {
316 struct dentry
*dentry
= unix_sk(s
)->path
.dentry
;
318 if (dentry
&& d_backing_inode(dentry
) == i
) {
325 spin_unlock(&unix_table_lock
);
329 static int unix_writable(const struct sock
*sk
)
331 return sk
->sk_state
!= TCP_LISTEN
&&
332 (atomic_read(&sk
->sk_wmem_alloc
) << 2) <= sk
->sk_sndbuf
;
335 static void unix_write_space(struct sock
*sk
)
337 struct socket_wq
*wq
;
340 if (unix_writable(sk
)) {
341 wq
= rcu_dereference(sk
->sk_wq
);
342 if (wq_has_sleeper(wq
))
343 wake_up_interruptible_sync_poll(&wq
->wait
,
344 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
345 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
350 /* When dgram socket disconnects (or changes its peer), we clear its receive
351 * queue of packets arrived from previous peer. First, it allows to do
352 * flow control based only on wmem_alloc; second, sk connected to peer
353 * may receive messages only from that peer. */
354 static void unix_dgram_disconnected(struct sock
*sk
, struct sock
*other
)
356 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
357 skb_queue_purge(&sk
->sk_receive_queue
);
358 wake_up_interruptible_all(&unix_sk(sk
)->peer_wait
);
360 /* If one link of bidirectional dgram pipe is disconnected,
361 * we signal error. Messages are lost. Do not make this,
362 * when peer was not connected to us.
364 if (!sock_flag(other
, SOCK_DEAD
) && unix_peer(other
) == sk
) {
365 other
->sk_err
= ECONNRESET
;
366 other
->sk_error_report(other
);
371 static void unix_sock_destructor(struct sock
*sk
)
373 struct unix_sock
*u
= unix_sk(sk
);
375 skb_queue_purge(&sk
->sk_receive_queue
);
377 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
378 WARN_ON(!sk_unhashed(sk
));
379 WARN_ON(sk
->sk_socket
);
380 if (!sock_flag(sk
, SOCK_DEAD
)) {
381 pr_info("Attempt to release alive unix socket: %p\n", sk
);
386 unix_release_addr(u
->addr
);
388 atomic_long_dec(&unix_nr_socks
);
390 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
392 #ifdef UNIX_REFCNT_DEBUG
393 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk
,
394 atomic_long_read(&unix_nr_socks
));
398 static void unix_release_sock(struct sock
*sk
, int embrion
)
400 struct unix_sock
*u
= unix_sk(sk
);
406 unix_remove_socket(sk
);
411 sk
->sk_shutdown
= SHUTDOWN_MASK
;
413 u
->path
.dentry
= NULL
;
415 state
= sk
->sk_state
;
416 sk
->sk_state
= TCP_CLOSE
;
417 unix_state_unlock(sk
);
419 wake_up_interruptible_all(&u
->peer_wait
);
421 skpair
= unix_peer(sk
);
423 if (skpair
!= NULL
) {
424 if (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) {
425 unix_state_lock(skpair
);
427 skpair
->sk_shutdown
= SHUTDOWN_MASK
;
428 if (!skb_queue_empty(&sk
->sk_receive_queue
) || embrion
)
429 skpair
->sk_err
= ECONNRESET
;
430 unix_state_unlock(skpair
);
431 skpair
->sk_state_change(skpair
);
432 sk_wake_async(skpair
, SOCK_WAKE_WAITD
, POLL_HUP
);
434 sock_put(skpair
); /* It may now die */
435 unix_peer(sk
) = NULL
;
438 /* Try to flush out this socket. Throw out buffers at least */
440 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)) != NULL
) {
441 if (state
== TCP_LISTEN
)
442 unix_release_sock(skb
->sk
, 1);
443 /* passed fds are erased in the kfree_skb hook */
444 UNIXCB(skb
).consumed
= skb
->len
;
453 /* ---- Socket is dead now and most probably destroyed ---- */
456 * Fixme: BSD difference: In BSD all sockets connected to us get
457 * ECONNRESET and we die on the spot. In Linux we behave
458 * like files and pipes do and wait for the last
461 * Can't we simply set sock->err?
463 * What the above comment does talk about? --ANK(980817)
466 if (unix_tot_inflight
)
467 unix_gc(); /* Garbage collect fds */
470 static void init_peercred(struct sock
*sk
)
472 put_pid(sk
->sk_peer_pid
);
473 if (sk
->sk_peer_cred
)
474 put_cred(sk
->sk_peer_cred
);
475 sk
->sk_peer_pid
= get_pid(task_tgid(current
));
476 sk
->sk_peer_cred
= get_current_cred();
479 static void copy_peercred(struct sock
*sk
, struct sock
*peersk
)
481 put_pid(sk
->sk_peer_pid
);
482 if (sk
->sk_peer_cred
)
483 put_cred(sk
->sk_peer_cred
);
484 sk
->sk_peer_pid
= get_pid(peersk
->sk_peer_pid
);
485 sk
->sk_peer_cred
= get_cred(peersk
->sk_peer_cred
);
488 static int unix_listen(struct socket
*sock
, int backlog
)
491 struct sock
*sk
= sock
->sk
;
492 struct unix_sock
*u
= unix_sk(sk
);
493 struct pid
*old_pid
= NULL
;
496 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
497 goto out
; /* Only stream/seqpacket sockets accept */
500 goto out
; /* No listens on an unbound socket */
502 if (sk
->sk_state
!= TCP_CLOSE
&& sk
->sk_state
!= TCP_LISTEN
)
504 if (backlog
> sk
->sk_max_ack_backlog
)
505 wake_up_interruptible_all(&u
->peer_wait
);
506 sk
->sk_max_ack_backlog
= backlog
;
507 sk
->sk_state
= TCP_LISTEN
;
508 /* set credentials so connect can copy them */
513 unix_state_unlock(sk
);
519 static int unix_release(struct socket
*);
520 static int unix_bind(struct socket
*, struct sockaddr
*, int);
521 static int unix_stream_connect(struct socket
*, struct sockaddr
*,
522 int addr_len
, int flags
);
523 static int unix_socketpair(struct socket
*, struct socket
*);
524 static int unix_accept(struct socket
*, struct socket
*, int);
525 static int unix_getname(struct socket
*, struct sockaddr
*, int *, int);
526 static unsigned int unix_poll(struct file
*, struct socket
*, poll_table
*);
527 static unsigned int unix_dgram_poll(struct file
*, struct socket
*,
529 static int unix_ioctl(struct socket
*, unsigned int, unsigned long);
530 static int unix_shutdown(struct socket
*, int);
531 static int unix_stream_sendmsg(struct socket
*, struct msghdr
*, size_t);
532 static int unix_stream_recvmsg(struct socket
*, struct msghdr
*, size_t, int);
533 static ssize_t
unix_stream_sendpage(struct socket
*, struct page
*, int offset
,
534 size_t size
, int flags
);
535 static ssize_t
unix_stream_splice_read(struct socket
*, loff_t
*ppos
,
536 struct pipe_inode_info
*, size_t size
,
538 static int unix_dgram_sendmsg(struct socket
*, struct msghdr
*, size_t);
539 static int unix_dgram_recvmsg(struct socket
*, struct msghdr
*, size_t, int);
540 static int unix_dgram_connect(struct socket
*, struct sockaddr
*,
542 static int unix_seqpacket_sendmsg(struct socket
*, struct msghdr
*, size_t);
543 static int unix_seqpacket_recvmsg(struct socket
*, struct msghdr
*, size_t,
546 static int unix_set_peek_off(struct sock
*sk
, int val
)
548 struct unix_sock
*u
= unix_sk(sk
);
550 if (mutex_lock_interruptible(&u
->readlock
))
553 sk
->sk_peek_off
= val
;
554 mutex_unlock(&u
->readlock
);
560 static const struct proto_ops unix_stream_ops
= {
562 .owner
= THIS_MODULE
,
563 .release
= unix_release
,
565 .connect
= unix_stream_connect
,
566 .socketpair
= unix_socketpair
,
567 .accept
= unix_accept
,
568 .getname
= unix_getname
,
571 .listen
= unix_listen
,
572 .shutdown
= unix_shutdown
,
573 .setsockopt
= sock_no_setsockopt
,
574 .getsockopt
= sock_no_getsockopt
,
575 .sendmsg
= unix_stream_sendmsg
,
576 .recvmsg
= unix_stream_recvmsg
,
577 .mmap
= sock_no_mmap
,
578 .sendpage
= unix_stream_sendpage
,
579 .splice_read
= unix_stream_splice_read
,
580 .set_peek_off
= unix_set_peek_off
,
583 static const struct proto_ops unix_dgram_ops
= {
585 .owner
= THIS_MODULE
,
586 .release
= unix_release
,
588 .connect
= unix_dgram_connect
,
589 .socketpair
= unix_socketpair
,
590 .accept
= sock_no_accept
,
591 .getname
= unix_getname
,
592 .poll
= unix_dgram_poll
,
594 .listen
= sock_no_listen
,
595 .shutdown
= unix_shutdown
,
596 .setsockopt
= sock_no_setsockopt
,
597 .getsockopt
= sock_no_getsockopt
,
598 .sendmsg
= unix_dgram_sendmsg
,
599 .recvmsg
= unix_dgram_recvmsg
,
600 .mmap
= sock_no_mmap
,
601 .sendpage
= sock_no_sendpage
,
602 .set_peek_off
= unix_set_peek_off
,
605 static const struct proto_ops unix_seqpacket_ops
= {
607 .owner
= THIS_MODULE
,
608 .release
= unix_release
,
610 .connect
= unix_stream_connect
,
611 .socketpair
= unix_socketpair
,
612 .accept
= unix_accept
,
613 .getname
= unix_getname
,
614 .poll
= unix_dgram_poll
,
616 .listen
= unix_listen
,
617 .shutdown
= unix_shutdown
,
618 .setsockopt
= sock_no_setsockopt
,
619 .getsockopt
= sock_no_getsockopt
,
620 .sendmsg
= unix_seqpacket_sendmsg
,
621 .recvmsg
= unix_seqpacket_recvmsg
,
622 .mmap
= sock_no_mmap
,
623 .sendpage
= sock_no_sendpage
,
624 .set_peek_off
= unix_set_peek_off
,
627 static struct proto unix_proto
= {
629 .owner
= THIS_MODULE
,
630 .obj_size
= sizeof(struct unix_sock
),
634 * AF_UNIX sockets do not interact with hardware, hence they
635 * dont trigger interrupts - so it's safe for them to have
636 * bh-unsafe locking for their sk_receive_queue.lock. Split off
637 * this special lock-class by reinitializing the spinlock key:
639 static struct lock_class_key af_unix_sk_receive_queue_lock_key
;
641 static struct sock
*unix_create1(struct net
*net
, struct socket
*sock
, int kern
)
643 struct sock
*sk
= NULL
;
646 atomic_long_inc(&unix_nr_socks
);
647 if (atomic_long_read(&unix_nr_socks
) > 2 * get_max_files())
650 sk
= sk_alloc(net
, PF_UNIX
, GFP_KERNEL
, &unix_proto
, kern
);
654 sock_init_data(sock
, sk
);
655 lockdep_set_class(&sk
->sk_receive_queue
.lock
,
656 &af_unix_sk_receive_queue_lock_key
);
658 sk
->sk_write_space
= unix_write_space
;
659 sk
->sk_max_ack_backlog
= net
->unx
.sysctl_max_dgram_qlen
;
660 sk
->sk_destruct
= unix_sock_destructor
;
662 u
->path
.dentry
= NULL
;
664 spin_lock_init(&u
->lock
);
665 atomic_long_set(&u
->inflight
, 0);
666 INIT_LIST_HEAD(&u
->link
);
667 mutex_init(&u
->readlock
); /* single task reading lock */
668 init_waitqueue_head(&u
->peer_wait
);
669 unix_insert_socket(unix_sockets_unbound(sk
), sk
);
672 atomic_long_dec(&unix_nr_socks
);
675 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
681 static int unix_create(struct net
*net
, struct socket
*sock
, int protocol
,
684 if (protocol
&& protocol
!= PF_UNIX
)
685 return -EPROTONOSUPPORT
;
687 sock
->state
= SS_UNCONNECTED
;
689 switch (sock
->type
) {
691 sock
->ops
= &unix_stream_ops
;
694 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
698 sock
->type
= SOCK_DGRAM
;
700 sock
->ops
= &unix_dgram_ops
;
703 sock
->ops
= &unix_seqpacket_ops
;
706 return -ESOCKTNOSUPPORT
;
709 return unix_create1(net
, sock
, kern
) ? 0 : -ENOMEM
;
712 static int unix_release(struct socket
*sock
)
714 struct sock
*sk
= sock
->sk
;
719 unix_release_sock(sk
, 0);
725 static int unix_autobind(struct socket
*sock
)
727 struct sock
*sk
= sock
->sk
;
728 struct net
*net
= sock_net(sk
);
729 struct unix_sock
*u
= unix_sk(sk
);
730 static u32 ordernum
= 1;
731 struct unix_address
*addr
;
733 unsigned int retries
= 0;
735 err
= mutex_lock_interruptible(&u
->readlock
);
744 addr
= kzalloc(sizeof(*addr
) + sizeof(short) + 16, GFP_KERNEL
);
748 addr
->name
->sun_family
= AF_UNIX
;
749 atomic_set(&addr
->refcnt
, 1);
752 addr
->len
= sprintf(addr
->name
->sun_path
+1, "%05x", ordernum
) + 1 + sizeof(short);
753 addr
->hash
= unix_hash_fold(csum_partial(addr
->name
, addr
->len
, 0));
755 spin_lock(&unix_table_lock
);
756 ordernum
= (ordernum
+1)&0xFFFFF;
758 if (__unix_find_socket_byname(net
, addr
->name
, addr
->len
, sock
->type
,
760 spin_unlock(&unix_table_lock
);
762 * __unix_find_socket_byname() may take long time if many names
763 * are already in use.
766 /* Give up if all names seems to be in use. */
767 if (retries
++ == 0xFFFFF) {
774 addr
->hash
^= sk
->sk_type
;
776 __unix_remove_socket(sk
);
778 __unix_insert_socket(&unix_socket_table
[addr
->hash
], sk
);
779 spin_unlock(&unix_table_lock
);
782 out
: mutex_unlock(&u
->readlock
);
786 static struct sock
*unix_find_other(struct net
*net
,
787 struct sockaddr_un
*sunname
, int len
,
788 int type
, unsigned int hash
, int *error
)
794 if (sunname
->sun_path
[0]) {
796 err
= kern_path(sunname
->sun_path
, LOOKUP_FOLLOW
, &path
);
799 inode
= d_backing_inode(path
.dentry
);
800 err
= inode_permission(inode
, MAY_WRITE
);
805 if (!S_ISSOCK(inode
->i_mode
))
807 u
= unix_find_socket_byinode(inode
);
811 if (u
->sk_type
== type
)
817 if (u
->sk_type
!= type
) {
823 u
= unix_find_socket_byname(net
, sunname
, len
, type
, hash
);
825 struct dentry
*dentry
;
826 dentry
= unix_sk(u
)->path
.dentry
;
828 touch_atime(&unix_sk(u
)->path
);
841 static int unix_mknod(const char *sun_path
, umode_t mode
, struct path
*res
)
843 struct dentry
*dentry
;
847 * Get the parent directory, calculate the hash for last
850 dentry
= kern_path_create(AT_FDCWD
, sun_path
, &path
, 0);
851 err
= PTR_ERR(dentry
);
856 * All right, let's create it.
858 err
= security_path_mknod(&path
, dentry
, mode
, 0);
860 err
= vfs_mknod(d_inode(path
.dentry
), dentry
, mode
, 0);
862 res
->mnt
= mntget(path
.mnt
);
863 res
->dentry
= dget(dentry
);
866 done_path_create(&path
, dentry
);
870 static int unix_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
872 struct sock
*sk
= sock
->sk
;
873 struct net
*net
= sock_net(sk
);
874 struct unix_sock
*u
= unix_sk(sk
);
875 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
876 char *sun_path
= sunaddr
->sun_path
;
879 struct unix_address
*addr
;
880 struct hlist_head
*list
;
883 if (sunaddr
->sun_family
!= AF_UNIX
)
886 if (addr_len
== sizeof(short)) {
887 err
= unix_autobind(sock
);
891 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
896 err
= mutex_lock_interruptible(&u
->readlock
);
905 addr
= kmalloc(sizeof(*addr
)+addr_len
, GFP_KERNEL
);
909 memcpy(addr
->name
, sunaddr
, addr_len
);
910 addr
->len
= addr_len
;
911 addr
->hash
= hash
^ sk
->sk_type
;
912 atomic_set(&addr
->refcnt
, 1);
916 umode_t mode
= S_IFSOCK
|
917 (SOCK_INODE(sock
)->i_mode
& ~current_umask());
918 err
= unix_mknod(sun_path
, mode
, &path
);
922 unix_release_addr(addr
);
925 addr
->hash
= UNIX_HASH_SIZE
;
926 hash
= d_backing_inode(path
.dentry
)->i_ino
& (UNIX_HASH_SIZE
-1);
927 spin_lock(&unix_table_lock
);
929 list
= &unix_socket_table
[hash
];
931 spin_lock(&unix_table_lock
);
933 if (__unix_find_socket_byname(net
, sunaddr
, addr_len
,
934 sk
->sk_type
, hash
)) {
935 unix_release_addr(addr
);
939 list
= &unix_socket_table
[addr
->hash
];
943 __unix_remove_socket(sk
);
945 __unix_insert_socket(list
, sk
);
948 spin_unlock(&unix_table_lock
);
950 mutex_unlock(&u
->readlock
);
955 static void unix_state_double_lock(struct sock
*sk1
, struct sock
*sk2
)
957 if (unlikely(sk1
== sk2
) || !sk2
) {
958 unix_state_lock(sk1
);
962 unix_state_lock(sk1
);
963 unix_state_lock_nested(sk2
);
965 unix_state_lock(sk2
);
966 unix_state_lock_nested(sk1
);
970 static void unix_state_double_unlock(struct sock
*sk1
, struct sock
*sk2
)
972 if (unlikely(sk1
== sk2
) || !sk2
) {
973 unix_state_unlock(sk1
);
976 unix_state_unlock(sk1
);
977 unix_state_unlock(sk2
);
980 static int unix_dgram_connect(struct socket
*sock
, struct sockaddr
*addr
,
983 struct sock
*sk
= sock
->sk
;
984 struct net
*net
= sock_net(sk
);
985 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)addr
;
990 if (addr
->sa_family
!= AF_UNSPEC
) {
991 err
= unix_mkname(sunaddr
, alen
, &hash
);
996 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) &&
997 !unix_sk(sk
)->addr
&& (err
= unix_autobind(sock
)) != 0)
1001 other
= unix_find_other(net
, sunaddr
, alen
, sock
->type
, hash
, &err
);
1005 unix_state_double_lock(sk
, other
);
1007 /* Apparently VFS overslept socket death. Retry. */
1008 if (sock_flag(other
, SOCK_DEAD
)) {
1009 unix_state_double_unlock(sk
, other
);
1015 if (!unix_may_send(sk
, other
))
1018 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1024 * 1003.1g breaking connected state with AF_UNSPEC
1027 unix_state_double_lock(sk
, other
);
1031 * If it was connected, reconnect.
1033 if (unix_peer(sk
)) {
1034 struct sock
*old_peer
= unix_peer(sk
);
1035 unix_peer(sk
) = other
;
1036 unix_state_double_unlock(sk
, other
);
1038 if (other
!= old_peer
)
1039 unix_dgram_disconnected(sk
, old_peer
);
1042 unix_peer(sk
) = other
;
1043 unix_state_double_unlock(sk
, other
);
1048 unix_state_double_unlock(sk
, other
);
1054 static long unix_wait_for_peer(struct sock
*other
, long timeo
)
1056 struct unix_sock
*u
= unix_sk(other
);
1060 prepare_to_wait_exclusive(&u
->peer_wait
, &wait
, TASK_INTERRUPTIBLE
);
1062 sched
= !sock_flag(other
, SOCK_DEAD
) &&
1063 !(other
->sk_shutdown
& RCV_SHUTDOWN
) &&
1064 unix_recvq_full(other
);
1066 unix_state_unlock(other
);
1069 timeo
= schedule_timeout(timeo
);
1071 finish_wait(&u
->peer_wait
, &wait
);
1075 static int unix_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
1076 int addr_len
, int flags
)
1078 struct sockaddr_un
*sunaddr
= (struct sockaddr_un
*)uaddr
;
1079 struct sock
*sk
= sock
->sk
;
1080 struct net
*net
= sock_net(sk
);
1081 struct unix_sock
*u
= unix_sk(sk
), *newu
, *otheru
;
1082 struct sock
*newsk
= NULL
;
1083 struct sock
*other
= NULL
;
1084 struct sk_buff
*skb
= NULL
;
1090 err
= unix_mkname(sunaddr
, addr_len
, &hash
);
1095 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
&&
1096 (err
= unix_autobind(sock
)) != 0)
1099 timeo
= sock_sndtimeo(sk
, flags
& O_NONBLOCK
);
1101 /* First of all allocate resources.
1102 If we will make it after state is locked,
1103 we will have to recheck all again in any case.
1108 /* create new sock for complete connection */
1109 newsk
= unix_create1(sock_net(sk
), NULL
, 0);
1113 /* Allocate skb for sending to listening sock */
1114 skb
= sock_wmalloc(newsk
, 1, 0, GFP_KERNEL
);
1119 /* Find listening sock. */
1120 other
= unix_find_other(net
, sunaddr
, addr_len
, sk
->sk_type
, hash
, &err
);
1124 /* Latch state of peer */
1125 unix_state_lock(other
);
1127 /* Apparently VFS overslept socket death. Retry. */
1128 if (sock_flag(other
, SOCK_DEAD
)) {
1129 unix_state_unlock(other
);
1134 err
= -ECONNREFUSED
;
1135 if (other
->sk_state
!= TCP_LISTEN
)
1137 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1140 if (unix_recvq_full(other
)) {
1145 timeo
= unix_wait_for_peer(other
, timeo
);
1147 err
= sock_intr_errno(timeo
);
1148 if (signal_pending(current
))
1156 It is tricky place. We need to grab our state lock and cannot
1157 drop lock on peer. It is dangerous because deadlock is
1158 possible. Connect to self case and simultaneous
1159 attempt to connect are eliminated by checking socket
1160 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1161 check this before attempt to grab lock.
1163 Well, and we have to recheck the state after socket locked.
1169 /* This is ok... continue with connect */
1171 case TCP_ESTABLISHED
:
1172 /* Socket is already connected */
1180 unix_state_lock_nested(sk
);
1182 if (sk
->sk_state
!= st
) {
1183 unix_state_unlock(sk
);
1184 unix_state_unlock(other
);
1189 err
= security_unix_stream_connect(sk
, other
, newsk
);
1191 unix_state_unlock(sk
);
1195 /* The way is open! Fastly set all the necessary fields... */
1198 unix_peer(newsk
) = sk
;
1199 newsk
->sk_state
= TCP_ESTABLISHED
;
1200 newsk
->sk_type
= sk
->sk_type
;
1201 init_peercred(newsk
);
1202 newu
= unix_sk(newsk
);
1203 RCU_INIT_POINTER(newsk
->sk_wq
, &newu
->peer_wq
);
1204 otheru
= unix_sk(other
);
1206 /* copy address information from listening to new sock*/
1208 atomic_inc(&otheru
->addr
->refcnt
);
1209 newu
->addr
= otheru
->addr
;
1211 if (otheru
->path
.dentry
) {
1212 path_get(&otheru
->path
);
1213 newu
->path
= otheru
->path
;
1216 /* Set credentials */
1217 copy_peercred(sk
, other
);
1219 sock
->state
= SS_CONNECTED
;
1220 sk
->sk_state
= TCP_ESTABLISHED
;
1223 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1224 unix_peer(sk
) = newsk
;
1226 unix_state_unlock(sk
);
1228 /* take ten and and send info to listening sock */
1229 spin_lock(&other
->sk_receive_queue
.lock
);
1230 __skb_queue_tail(&other
->sk_receive_queue
, skb
);
1231 spin_unlock(&other
->sk_receive_queue
.lock
);
1232 unix_state_unlock(other
);
1233 other
->sk_data_ready(other
);
1239 unix_state_unlock(other
);
1244 unix_release_sock(newsk
, 0);
1250 static int unix_socketpair(struct socket
*socka
, struct socket
*sockb
)
1252 struct sock
*ska
= socka
->sk
, *skb
= sockb
->sk
;
1254 /* Join our sockets back to back */
1257 unix_peer(ska
) = skb
;
1258 unix_peer(skb
) = ska
;
1262 if (ska
->sk_type
!= SOCK_DGRAM
) {
1263 ska
->sk_state
= TCP_ESTABLISHED
;
1264 skb
->sk_state
= TCP_ESTABLISHED
;
1265 socka
->state
= SS_CONNECTED
;
1266 sockb
->state
= SS_CONNECTED
;
1271 static void unix_sock_inherit_flags(const struct socket
*old
,
1274 if (test_bit(SOCK_PASSCRED
, &old
->flags
))
1275 set_bit(SOCK_PASSCRED
, &new->flags
);
1276 if (test_bit(SOCK_PASSSEC
, &old
->flags
))
1277 set_bit(SOCK_PASSSEC
, &new->flags
);
1280 static int unix_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1282 struct sock
*sk
= sock
->sk
;
1284 struct sk_buff
*skb
;
1288 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
1292 if (sk
->sk_state
!= TCP_LISTEN
)
1295 /* If socket state is TCP_LISTEN it cannot change (for now...),
1296 * so that no locks are necessary.
1299 skb
= skb_recv_datagram(sk
, 0, flags
&O_NONBLOCK
, &err
);
1301 /* This means receive shutdown. */
1308 skb_free_datagram(sk
, skb
);
1309 wake_up_interruptible(&unix_sk(sk
)->peer_wait
);
1311 /* attach accepted sock to socket */
1312 unix_state_lock(tsk
);
1313 newsock
->state
= SS_CONNECTED
;
1314 unix_sock_inherit_flags(sock
, newsock
);
1315 sock_graft(tsk
, newsock
);
1316 unix_state_unlock(tsk
);
1324 static int unix_getname(struct socket
*sock
, struct sockaddr
*uaddr
, int *uaddr_len
, int peer
)
1326 struct sock
*sk
= sock
->sk
;
1327 struct unix_sock
*u
;
1328 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, uaddr
);
1332 sk
= unix_peer_get(sk
);
1343 unix_state_lock(sk
);
1345 sunaddr
->sun_family
= AF_UNIX
;
1346 sunaddr
->sun_path
[0] = 0;
1347 *uaddr_len
= sizeof(short);
1349 struct unix_address
*addr
= u
->addr
;
1351 *uaddr_len
= addr
->len
;
1352 memcpy(sunaddr
, addr
->name
, *uaddr_len
);
1354 unix_state_unlock(sk
);
1360 static void unix_detach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1364 scm
->fp
= UNIXCB(skb
).fp
;
1365 UNIXCB(skb
).fp
= NULL
;
1367 for (i
= scm
->fp
->count
-1; i
>= 0; i
--)
1368 unix_notinflight(scm
->fp
->fp
[i
]);
1371 static void unix_destruct_scm(struct sk_buff
*skb
)
1373 struct scm_cookie scm
;
1374 memset(&scm
, 0, sizeof(scm
));
1375 scm
.pid
= UNIXCB(skb
).pid
;
1377 unix_detach_fds(&scm
, skb
);
1379 /* Alas, it calls VFS */
1380 /* So fscking what? fput() had been SMP-safe since the last Summer */
1385 #define MAX_RECURSION_LEVEL 4
1387 static int unix_attach_fds(struct scm_cookie
*scm
, struct sk_buff
*skb
)
1390 unsigned char max_level
= 0;
1391 int unix_sock_count
= 0;
1393 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--) {
1394 struct sock
*sk
= unix_get_socket(scm
->fp
->fp
[i
]);
1398 max_level
= max(max_level
,
1399 unix_sk(sk
)->recursion_level
);
1402 if (unlikely(max_level
> MAX_RECURSION_LEVEL
))
1403 return -ETOOMANYREFS
;
1406 * Need to duplicate file references for the sake of garbage
1407 * collection. Otherwise a socket in the fps might become a
1408 * candidate for GC while the skb is not yet queued.
1410 UNIXCB(skb
).fp
= scm_fp_dup(scm
->fp
);
1411 if (!UNIXCB(skb
).fp
)
1414 if (unix_sock_count
) {
1415 for (i
= scm
->fp
->count
- 1; i
>= 0; i
--)
1416 unix_inflight(scm
->fp
->fp
[i
]);
1421 static int unix_scm_to_skb(struct scm_cookie
*scm
, struct sk_buff
*skb
, bool send_fds
)
1425 UNIXCB(skb
).pid
= get_pid(scm
->pid
);
1426 UNIXCB(skb
).uid
= scm
->creds
.uid
;
1427 UNIXCB(skb
).gid
= scm
->creds
.gid
;
1428 UNIXCB(skb
).fp
= NULL
;
1429 unix_get_secdata(scm
, skb
);
1430 if (scm
->fp
&& send_fds
)
1431 err
= unix_attach_fds(scm
, skb
);
1433 skb
->destructor
= unix_destruct_scm
;
1438 * Some apps rely on write() giving SCM_CREDENTIALS
1439 * We include credentials if source or destination socket
1440 * asserted SOCK_PASSCRED.
1442 static void maybe_add_creds(struct sk_buff
*skb
, const struct socket
*sock
,
1443 const struct sock
*other
)
1445 if (UNIXCB(skb
).pid
)
1447 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) ||
1448 !other
->sk_socket
||
1449 test_bit(SOCK_PASSCRED
, &other
->sk_socket
->flags
)) {
1450 UNIXCB(skb
).pid
= get_pid(task_tgid(current
));
1451 current_uid_gid(&UNIXCB(skb
).uid
, &UNIXCB(skb
).gid
);
1456 * Send AF_UNIX data.
1459 static int unix_dgram_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1462 struct sock
*sk
= sock
->sk
;
1463 struct net
*net
= sock_net(sk
);
1464 struct unix_sock
*u
= unix_sk(sk
);
1465 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
, msg
->msg_name
);
1466 struct sock
*other
= NULL
;
1467 int namelen
= 0; /* fake GCC */
1470 struct sk_buff
*skb
;
1472 struct scm_cookie scm
;
1477 err
= scm_send(sock
, msg
, &scm
, false);
1482 if (msg
->msg_flags
&MSG_OOB
)
1485 if (msg
->msg_namelen
) {
1486 err
= unix_mkname(sunaddr
, msg
->msg_namelen
, &hash
);
1493 other
= unix_peer_get(sk
);
1498 if (test_bit(SOCK_PASSCRED
, &sock
->flags
) && !u
->addr
1499 && (err
= unix_autobind(sock
)) != 0)
1503 if (len
> sk
->sk_sndbuf
- 32)
1506 if (len
> SKB_MAX_ALLOC
) {
1507 data_len
= min_t(size_t,
1508 len
- SKB_MAX_ALLOC
,
1509 MAX_SKB_FRAGS
* PAGE_SIZE
);
1510 data_len
= PAGE_ALIGN(data_len
);
1512 BUILD_BUG_ON(SKB_MAX_ALLOC
< PAGE_SIZE
);
1515 skb
= sock_alloc_send_pskb(sk
, len
- data_len
, data_len
,
1516 msg
->msg_flags
& MSG_DONTWAIT
, &err
,
1517 PAGE_ALLOC_COSTLY_ORDER
);
1521 err
= unix_scm_to_skb(&scm
, skb
, true);
1524 max_level
= err
+ 1;
1526 skb_put(skb
, len
- data_len
);
1527 skb
->data_len
= data_len
;
1529 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, len
);
1533 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1538 if (sunaddr
== NULL
)
1541 other
= unix_find_other(net
, sunaddr
, namelen
, sk
->sk_type
,
1547 if (sk_filter(other
, skb
) < 0) {
1548 /* Toss the packet but do not return any error to the sender */
1553 unix_state_lock(other
);
1555 if (!unix_may_send(sk
, other
))
1558 if (sock_flag(other
, SOCK_DEAD
)) {
1560 * Check with 1003.1g - what should
1563 unix_state_unlock(other
);
1567 unix_state_lock(sk
);
1568 if (unix_peer(sk
) == other
) {
1569 unix_peer(sk
) = NULL
;
1570 unix_state_unlock(sk
);
1572 unix_dgram_disconnected(sk
, other
);
1574 err
= -ECONNREFUSED
;
1576 unix_state_unlock(sk
);
1586 if (other
->sk_shutdown
& RCV_SHUTDOWN
)
1589 if (sk
->sk_type
!= SOCK_SEQPACKET
) {
1590 err
= security_unix_may_send(sk
->sk_socket
, other
->sk_socket
);
1595 if (unix_peer(other
) != sk
&& unix_recvq_full(other
)) {
1601 timeo
= unix_wait_for_peer(other
, timeo
);
1603 err
= sock_intr_errno(timeo
);
1604 if (signal_pending(current
))
1610 if (sock_flag(other
, SOCK_RCVTSTAMP
))
1611 __net_timestamp(skb
);
1612 maybe_add_creds(skb
, sock
, other
);
1613 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1614 if (max_level
> unix_sk(other
)->recursion_level
)
1615 unix_sk(other
)->recursion_level
= max_level
;
1616 unix_state_unlock(other
);
1617 other
->sk_data_ready(other
);
1623 unix_state_unlock(other
);
1633 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1634 * bytes, and a minimun of a full page.
1636 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1638 static int unix_stream_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1641 struct sock
*sk
= sock
->sk
;
1642 struct sock
*other
= NULL
;
1644 struct sk_buff
*skb
;
1646 struct scm_cookie scm
;
1647 bool fds_sent
= false;
1652 err
= scm_send(sock
, msg
, &scm
, false);
1657 if (msg
->msg_flags
&MSG_OOB
)
1660 if (msg
->msg_namelen
) {
1661 err
= sk
->sk_state
== TCP_ESTABLISHED
? -EISCONN
: -EOPNOTSUPP
;
1665 other
= unix_peer(sk
);
1670 if (sk
->sk_shutdown
& SEND_SHUTDOWN
)
1673 while (sent
< len
) {
1676 /* Keep two messages in the pipe so it schedules better */
1677 size
= min_t(int, size
, (sk
->sk_sndbuf
>> 1) - 64);
1679 /* allow fallback to order-0 allocations */
1680 size
= min_t(int, size
, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ
);
1682 data_len
= max_t(int, 0, size
- SKB_MAX_HEAD(0));
1684 data_len
= min_t(size_t, size
, PAGE_ALIGN(data_len
));
1686 skb
= sock_alloc_send_pskb(sk
, size
- data_len
, data_len
,
1687 msg
->msg_flags
& MSG_DONTWAIT
, &err
,
1688 get_order(UNIX_SKB_FRAGS_SZ
));
1692 /* Only send the fds in the first buffer */
1693 err
= unix_scm_to_skb(&scm
, skb
, !fds_sent
);
1698 max_level
= err
+ 1;
1701 skb_put(skb
, size
- data_len
);
1702 skb
->data_len
= data_len
;
1704 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, size
);
1710 unix_state_lock(other
);
1712 if (sock_flag(other
, SOCK_DEAD
) ||
1713 (other
->sk_shutdown
& RCV_SHUTDOWN
))
1716 maybe_add_creds(skb
, sock
, other
);
1717 skb_queue_tail(&other
->sk_receive_queue
, skb
);
1718 if (max_level
> unix_sk(other
)->recursion_level
)
1719 unix_sk(other
)->recursion_level
= max_level
;
1720 unix_state_unlock(other
);
1721 other
->sk_data_ready(other
);
1730 unix_state_unlock(other
);
1733 if (sent
== 0 && !(msg
->msg_flags
&MSG_NOSIGNAL
))
1734 send_sig(SIGPIPE
, current
, 0);
1738 return sent
? : err
;
1741 static ssize_t
unix_stream_sendpage(struct socket
*socket
, struct page
*page
,
1742 int offset
, size_t size
, int flags
)
1745 bool send_sigpipe
= true;
1746 struct sock
*other
, *sk
= socket
->sk
;
1747 struct sk_buff
*skb
, *newskb
= NULL
, *tail
= NULL
;
1749 if (flags
& MSG_OOB
)
1752 other
= unix_peer(sk
);
1753 if (!other
|| sk
->sk_state
!= TCP_ESTABLISHED
)
1758 unix_state_unlock(other
);
1759 mutex_unlock(&unix_sk(other
)->readlock
);
1760 newskb
= sock_alloc_send_pskb(sk
, 0, 0, flags
& MSG_DONTWAIT
,
1766 /* we must acquire readlock as we modify already present
1767 * skbs in the sk_receive_queue and mess with skb->len
1769 err
= mutex_lock_interruptible(&unix_sk(other
)->readlock
);
1771 err
= flags
& MSG_DONTWAIT
? -EAGAIN
: -ERESTARTSYS
;
1772 send_sigpipe
= false;
1776 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1781 unix_state_lock(other
);
1783 if (sock_flag(other
, SOCK_DEAD
) ||
1784 other
->sk_shutdown
& RCV_SHUTDOWN
) {
1786 goto err_state_unlock
;
1789 skb
= skb_peek_tail(&other
->sk_receive_queue
);
1790 if (tail
&& tail
== skb
) {
1797 } else if (newskb
) {
1798 /* this is fast path, we don't necessarily need to
1799 * call to kfree_skb even though with newskb == NULL
1800 * this - does no harm
1802 consume_skb(newskb
);
1806 if (skb_append_pagefrags(skb
, page
, offset
, size
)) {
1812 skb
->data_len
+= size
;
1813 skb
->truesize
+= size
;
1814 atomic_add(size
, &sk
->sk_wmem_alloc
);
1817 __skb_queue_tail(&other
->sk_receive_queue
, newskb
);
1819 unix_state_unlock(other
);
1820 mutex_unlock(&unix_sk(other
)->readlock
);
1822 other
->sk_data_ready(other
);
1827 unix_state_unlock(other
);
1829 mutex_unlock(&unix_sk(other
)->readlock
);
1832 if (send_sigpipe
&& !(flags
& MSG_NOSIGNAL
))
1833 send_sig(SIGPIPE
, current
, 0);
1837 static int unix_seqpacket_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1841 struct sock
*sk
= sock
->sk
;
1843 err
= sock_error(sk
);
1847 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1850 if (msg
->msg_namelen
)
1851 msg
->msg_namelen
= 0;
1853 return unix_dgram_sendmsg(sock
, msg
, len
);
1856 static int unix_seqpacket_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1857 size_t size
, int flags
)
1859 struct sock
*sk
= sock
->sk
;
1861 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1864 return unix_dgram_recvmsg(sock
, msg
, size
, flags
);
1867 static void unix_copy_addr(struct msghdr
*msg
, struct sock
*sk
)
1869 struct unix_sock
*u
= unix_sk(sk
);
1872 msg
->msg_namelen
= u
->addr
->len
;
1873 memcpy(msg
->msg_name
, u
->addr
->name
, u
->addr
->len
);
1877 static int unix_dgram_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1878 size_t size
, int flags
)
1880 struct scm_cookie scm
;
1881 struct sock
*sk
= sock
->sk
;
1882 struct unix_sock
*u
= unix_sk(sk
);
1883 int noblock
= flags
& MSG_DONTWAIT
;
1884 struct sk_buff
*skb
;
1892 err
= mutex_lock_interruptible(&u
->readlock
);
1893 if (unlikely(err
)) {
1894 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1895 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1897 err
= noblock
? -EAGAIN
: -ERESTARTSYS
;
1901 skip
= sk_peek_offset(sk
, flags
);
1903 skb
= __skb_recv_datagram(sk
, flags
, &peeked
, &skip
, &err
);
1905 unix_state_lock(sk
);
1906 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1907 if (sk
->sk_type
== SOCK_SEQPACKET
&& err
== -EAGAIN
&&
1908 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1910 unix_state_unlock(sk
);
1914 wake_up_interruptible_sync_poll(&u
->peer_wait
,
1915 POLLOUT
| POLLWRNORM
| POLLWRBAND
);
1918 unix_copy_addr(msg
, skb
->sk
);
1920 if (size
> skb
->len
- skip
)
1921 size
= skb
->len
- skip
;
1922 else if (size
< skb
->len
- skip
)
1923 msg
->msg_flags
|= MSG_TRUNC
;
1925 err
= skb_copy_datagram_msg(skb
, skip
, msg
, size
);
1929 if (sock_flag(sk
, SOCK_RCVTSTAMP
))
1930 __sock_recv_timestamp(msg
, sk
, skb
);
1932 memset(&scm
, 0, sizeof(scm
));
1934 scm_set_cred(&scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).uid
, UNIXCB(skb
).gid
);
1935 unix_set_secdata(&scm
, skb
);
1937 if (!(flags
& MSG_PEEK
)) {
1939 unix_detach_fds(&scm
, skb
);
1941 sk_peek_offset_bwd(sk
, skb
->len
);
1943 /* It is questionable: on PEEK we could:
1944 - do not return fds - good, but too simple 8)
1945 - return fds, and do not return them on read (old strategy,
1947 - clone fds (I chose it for now, it is the most universal
1950 POSIX 1003.1g does not actually define this clearly
1951 at all. POSIX 1003.1g doesn't define a lot of things
1956 sk_peek_offset_fwd(sk
, size
);
1959 scm
.fp
= scm_fp_dup(UNIXCB(skb
).fp
);
1961 err
= (flags
& MSG_TRUNC
) ? skb
->len
- skip
: size
;
1963 scm_recv(sock
, msg
, &scm
, flags
);
1966 skb_free_datagram(sk
, skb
);
1968 mutex_unlock(&u
->readlock
);
1974 * Sleep until more data has arrived. But check for races..
1976 static long unix_stream_data_wait(struct sock
*sk
, long timeo
,
1977 struct sk_buff
*last
, unsigned int last_len
)
1979 struct sk_buff
*tail
;
1982 unix_state_lock(sk
);
1985 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1987 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
1989 (tail
&& tail
->len
!= last_len
) ||
1991 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
1992 signal_pending(current
) ||
1996 set_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
1997 unix_state_unlock(sk
);
1998 timeo
= freezable_schedule_timeout(timeo
);
1999 unix_state_lock(sk
);
2001 if (sock_flag(sk
, SOCK_DEAD
))
2004 clear_bit(SOCK_ASYNC_WAITDATA
, &sk
->sk_socket
->flags
);
2007 finish_wait(sk_sleep(sk
), &wait
);
2008 unix_state_unlock(sk
);
2012 static unsigned int unix_skb_len(const struct sk_buff
*skb
)
2014 return skb
->len
- UNIXCB(skb
).consumed
;
2017 struct unix_stream_read_state
{
2018 int (*recv_actor
)(struct sk_buff
*, int, int,
2019 struct unix_stream_read_state
*);
2020 struct socket
*socket
;
2022 struct pipe_inode_info
*pipe
;
2025 unsigned int splice_flags
;
2028 static int unix_stream_read_generic(struct unix_stream_read_state
*state
)
2030 struct scm_cookie scm
;
2031 struct socket
*sock
= state
->socket
;
2032 struct sock
*sk
= sock
->sk
;
2033 struct unix_sock
*u
= unix_sk(sk
);
2035 int flags
= state
->flags
;
2036 int noblock
= flags
& MSG_DONTWAIT
;
2037 bool check_creds
= false;
2042 size_t size
= state
->size
;
2043 unsigned int last_len
;
2046 if (sk
->sk_state
!= TCP_ESTABLISHED
)
2050 if (flags
& MSG_OOB
)
2053 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, size
);
2054 timeo
= sock_rcvtimeo(sk
, noblock
);
2056 memset(&scm
, 0, sizeof(scm
));
2058 /* Lock the socket to prevent queue disordering
2059 * while sleeps in memcpy_tomsg
2061 err
= mutex_lock_interruptible(&u
->readlock
);
2062 if (unlikely(err
)) {
2063 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2064 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2066 err
= noblock
? -EAGAIN
: -ERESTARTSYS
;
2070 if (flags
& MSG_PEEK
)
2071 skip
= sk_peek_offset(sk
, flags
);
2078 struct sk_buff
*skb
, *last
;
2080 unix_state_lock(sk
);
2081 if (sock_flag(sk
, SOCK_DEAD
)) {
2085 last
= skb
= skb_peek(&sk
->sk_receive_queue
);
2086 last_len
= last
? last
->len
: 0;
2089 unix_sk(sk
)->recursion_level
= 0;
2090 if (copied
>= target
)
2094 * POSIX 1003.1g mandates this order.
2097 err
= sock_error(sk
);
2100 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2103 unix_state_unlock(sk
);
2107 mutex_unlock(&u
->readlock
);
2109 timeo
= unix_stream_data_wait(sk
, timeo
, last
,
2112 if (signal_pending(current
) ||
2113 mutex_lock_interruptible(&u
->readlock
)) {
2114 err
= sock_intr_errno(timeo
);
2120 unix_state_unlock(sk
);
2124 while (skip
>= unix_skb_len(skb
)) {
2125 skip
-= unix_skb_len(skb
);
2127 last_len
= skb
->len
;
2128 skb
= skb_peek_next(skb
, &sk
->sk_receive_queue
);
2133 unix_state_unlock(sk
);
2136 /* Never glue messages from different writers */
2137 if ((UNIXCB(skb
).pid
!= scm
.pid
) ||
2138 !uid_eq(UNIXCB(skb
).uid
, scm
.creds
.uid
) ||
2139 !gid_eq(UNIXCB(skb
).gid
, scm
.creds
.gid
) ||
2140 !unix_secdata_eq(&scm
, skb
))
2142 } else if (test_bit(SOCK_PASSCRED
, &sock
->flags
)) {
2143 /* Copy credentials */
2144 scm_set_cred(&scm
, UNIXCB(skb
).pid
, UNIXCB(skb
).uid
, UNIXCB(skb
).gid
);
2145 unix_set_secdata(&scm
, skb
);
2149 /* Copy address just once */
2150 if (state
->msg
&& state
->msg
->msg_name
) {
2151 DECLARE_SOCKADDR(struct sockaddr_un
*, sunaddr
,
2152 state
->msg
->msg_name
);
2153 unix_copy_addr(state
->msg
, skb
->sk
);
2157 chunk
= min_t(unsigned int, unix_skb_len(skb
) - skip
, size
);
2159 chunk
= state
->recv_actor(skb
, skip
, chunk
, state
);
2160 drop_skb
= !unix_skb_len(skb
);
2161 /* skb is only safe to use if !drop_skb */
2172 /* the skb was touched by a concurrent reader;
2173 * we should not expect anything from this skb
2174 * anymore and assume it invalid - we can be
2175 * sure it was dropped from the socket queue
2177 * let's report a short read
2183 /* Mark read part of skb as used */
2184 if (!(flags
& MSG_PEEK
)) {
2185 UNIXCB(skb
).consumed
+= chunk
;
2187 sk_peek_offset_bwd(sk
, chunk
);
2190 unix_detach_fds(&scm
, skb
);
2192 if (unix_skb_len(skb
))
2195 skb_unlink(skb
, &sk
->sk_receive_queue
);
2201 /* It is questionable, see note in unix_dgram_recvmsg.
2204 scm
.fp
= scm_fp_dup(UNIXCB(skb
).fp
);
2206 sk_peek_offset_fwd(sk
, chunk
);
2213 last_len
= skb
->len
;
2214 unix_state_lock(sk
);
2215 skb
= skb_peek_next(skb
, &sk
->sk_receive_queue
);
2218 unix_state_unlock(sk
);
2223 mutex_unlock(&u
->readlock
);
2225 scm_recv(sock
, state
->msg
, &scm
, flags
);
2229 return copied
? : err
;
2232 static int unix_stream_read_actor(struct sk_buff
*skb
,
2233 int skip
, int chunk
,
2234 struct unix_stream_read_state
*state
)
2238 ret
= skb_copy_datagram_msg(skb
, UNIXCB(skb
).consumed
+ skip
,
2240 return ret
?: chunk
;
2243 static int unix_stream_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
2244 size_t size
, int flags
)
2246 struct unix_stream_read_state state
= {
2247 .recv_actor
= unix_stream_read_actor
,
2254 return unix_stream_read_generic(&state
);
2257 static ssize_t
skb_unix_socket_splice(struct sock
*sk
,
2258 struct pipe_inode_info
*pipe
,
2259 struct splice_pipe_desc
*spd
)
2262 struct unix_sock
*u
= unix_sk(sk
);
2264 mutex_unlock(&u
->readlock
);
2265 ret
= splice_to_pipe(pipe
, spd
);
2266 mutex_lock(&u
->readlock
);
2271 static int unix_stream_splice_actor(struct sk_buff
*skb
,
2272 int skip
, int chunk
,
2273 struct unix_stream_read_state
*state
)
2275 return skb_splice_bits(skb
, state
->socket
->sk
,
2276 UNIXCB(skb
).consumed
+ skip
,
2277 state
->pipe
, chunk
, state
->splice_flags
,
2278 skb_unix_socket_splice
);
2281 static ssize_t
unix_stream_splice_read(struct socket
*sock
, loff_t
*ppos
,
2282 struct pipe_inode_info
*pipe
,
2283 size_t size
, unsigned int flags
)
2285 struct unix_stream_read_state state
= {
2286 .recv_actor
= unix_stream_splice_actor
,
2290 .splice_flags
= flags
,
2293 if (unlikely(*ppos
))
2296 if (sock
->file
->f_flags
& O_NONBLOCK
||
2297 flags
& SPLICE_F_NONBLOCK
)
2298 state
.flags
= MSG_DONTWAIT
;
2300 return unix_stream_read_generic(&state
);
2303 static int unix_shutdown(struct socket
*sock
, int mode
)
2305 struct sock
*sk
= sock
->sk
;
2308 if (mode
< SHUT_RD
|| mode
> SHUT_RDWR
)
2311 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2312 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2313 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2317 unix_state_lock(sk
);
2318 sk
->sk_shutdown
|= mode
;
2319 other
= unix_peer(sk
);
2322 unix_state_unlock(sk
);
2323 sk
->sk_state_change(sk
);
2326 (sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
)) {
2330 if (mode
&RCV_SHUTDOWN
)
2331 peer_mode
|= SEND_SHUTDOWN
;
2332 if (mode
&SEND_SHUTDOWN
)
2333 peer_mode
|= RCV_SHUTDOWN
;
2334 unix_state_lock(other
);
2335 other
->sk_shutdown
|= peer_mode
;
2336 unix_state_unlock(other
);
2337 other
->sk_state_change(other
);
2338 if (peer_mode
== SHUTDOWN_MASK
)
2339 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_HUP
);
2340 else if (peer_mode
& RCV_SHUTDOWN
)
2341 sk_wake_async(other
, SOCK_WAKE_WAITD
, POLL_IN
);
2349 long unix_inq_len(struct sock
*sk
)
2351 struct sk_buff
*skb
;
2354 if (sk
->sk_state
== TCP_LISTEN
)
2357 spin_lock(&sk
->sk_receive_queue
.lock
);
2358 if (sk
->sk_type
== SOCK_STREAM
||
2359 sk
->sk_type
== SOCK_SEQPACKET
) {
2360 skb_queue_walk(&sk
->sk_receive_queue
, skb
)
2361 amount
+= unix_skb_len(skb
);
2363 skb
= skb_peek(&sk
->sk_receive_queue
);
2367 spin_unlock(&sk
->sk_receive_queue
.lock
);
2371 EXPORT_SYMBOL_GPL(unix_inq_len
);
2373 long unix_outq_len(struct sock
*sk
)
2375 return sk_wmem_alloc_get(sk
);
2377 EXPORT_SYMBOL_GPL(unix_outq_len
);
2379 static int unix_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
2381 struct sock
*sk
= sock
->sk
;
2387 amount
= unix_outq_len(sk
);
2388 err
= put_user(amount
, (int __user
*)arg
);
2391 amount
= unix_inq_len(sk
);
2395 err
= put_user(amount
, (int __user
*)arg
);
2404 static unsigned int unix_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
2406 struct sock
*sk
= sock
->sk
;
2409 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2412 /* exceptional events? */
2415 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2417 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2418 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2421 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2422 mask
|= POLLIN
| POLLRDNORM
;
2424 /* Connection-based need to check for termination and startup */
2425 if ((sk
->sk_type
== SOCK_STREAM
|| sk
->sk_type
== SOCK_SEQPACKET
) &&
2426 sk
->sk_state
== TCP_CLOSE
)
2430 * we set writable also when the other side has shut down the
2431 * connection. This prevents stuck sockets.
2433 if (unix_writable(sk
))
2434 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2439 static unsigned int unix_dgram_poll(struct file
*file
, struct socket
*sock
,
2442 struct sock
*sk
= sock
->sk
, *other
;
2443 unsigned int mask
, writable
;
2445 sock_poll_wait(file
, sk_sleep(sk
), wait
);
2448 /* exceptional events? */
2449 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
2451 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? POLLPRI
: 0);
2453 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
2454 mask
|= POLLRDHUP
| POLLIN
| POLLRDNORM
;
2455 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
2459 if (!skb_queue_empty(&sk
->sk_receive_queue
))
2460 mask
|= POLLIN
| POLLRDNORM
;
2462 /* Connection-based need to check for termination and startup */
2463 if (sk
->sk_type
== SOCK_SEQPACKET
) {
2464 if (sk
->sk_state
== TCP_CLOSE
)
2466 /* connection hasn't started yet? */
2467 if (sk
->sk_state
== TCP_SYN_SENT
)
2471 /* No write status requested, avoid expensive OUT tests. */
2472 if (!(poll_requested_events(wait
) & (POLLWRBAND
|POLLWRNORM
|POLLOUT
)))
2475 writable
= unix_writable(sk
);
2476 other
= unix_peer_get(sk
);
2478 if (unix_peer(other
) != sk
) {
2479 sock_poll_wait(file
, &unix_sk(other
)->peer_wait
, wait
);
2480 if (unix_recvq_full(other
))
2487 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
2489 set_bit(SOCK_ASYNC_NOSPACE
, &sk
->sk_socket
->flags
);
2494 #ifdef CONFIG_PROC_FS
2496 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2498 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2499 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2500 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2502 static struct sock
*unix_from_bucket(struct seq_file
*seq
, loff_t
*pos
)
2504 unsigned long offset
= get_offset(*pos
);
2505 unsigned long bucket
= get_bucket(*pos
);
2507 unsigned long count
= 0;
2509 for (sk
= sk_head(&unix_socket_table
[bucket
]); sk
; sk
= sk_next(sk
)) {
2510 if (sock_net(sk
) != seq_file_net(seq
))
2512 if (++count
== offset
)
2519 static struct sock
*unix_next_socket(struct seq_file
*seq
,
2523 unsigned long bucket
;
2525 while (sk
> (struct sock
*)SEQ_START_TOKEN
) {
2529 if (sock_net(sk
) == seq_file_net(seq
))
2534 sk
= unix_from_bucket(seq
, pos
);
2539 bucket
= get_bucket(*pos
) + 1;
2540 *pos
= set_bucket_offset(bucket
, 1);
2541 } while (bucket
< ARRAY_SIZE(unix_socket_table
));
2546 static void *unix_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2547 __acquires(unix_table_lock
)
2549 spin_lock(&unix_table_lock
);
2552 return SEQ_START_TOKEN
;
2554 if (get_bucket(*pos
) >= ARRAY_SIZE(unix_socket_table
))
2557 return unix_next_socket(seq
, NULL
, pos
);
2560 static void *unix_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2563 return unix_next_socket(seq
, v
, pos
);
2566 static void unix_seq_stop(struct seq_file
*seq
, void *v
)
2567 __releases(unix_table_lock
)
2569 spin_unlock(&unix_table_lock
);
2572 static int unix_seq_show(struct seq_file
*seq
, void *v
)
2575 if (v
== SEQ_START_TOKEN
)
2576 seq_puts(seq
, "Num RefCount Protocol Flags Type St "
2580 struct unix_sock
*u
= unix_sk(s
);
2583 seq_printf(seq
, "%pK: %08X %08X %08X %04X %02X %5lu",
2585 atomic_read(&s
->sk_refcnt
),
2587 s
->sk_state
== TCP_LISTEN
? __SO_ACCEPTCON
: 0,
2590 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTED
: SS_UNCONNECTED
) :
2591 (s
->sk_state
== TCP_ESTABLISHED
? SS_CONNECTING
: SS_DISCONNECTING
),
2599 len
= u
->addr
->len
- sizeof(short);
2600 if (!UNIX_ABSTRACT(s
))
2606 for ( ; i
< len
; i
++)
2607 seq_putc(seq
, u
->addr
->name
->sun_path
[i
]);
2609 unix_state_unlock(s
);
2610 seq_putc(seq
, '\n');
2616 static const struct seq_operations unix_seq_ops
= {
2617 .start
= unix_seq_start
,
2618 .next
= unix_seq_next
,
2619 .stop
= unix_seq_stop
,
2620 .show
= unix_seq_show
,
2623 static int unix_seq_open(struct inode
*inode
, struct file
*file
)
2625 return seq_open_net(inode
, file
, &unix_seq_ops
,
2626 sizeof(struct seq_net_private
));
2629 static const struct file_operations unix_seq_fops
= {
2630 .owner
= THIS_MODULE
,
2631 .open
= unix_seq_open
,
2633 .llseek
= seq_lseek
,
2634 .release
= seq_release_net
,
2639 static const struct net_proto_family unix_family_ops
= {
2641 .create
= unix_create
,
2642 .owner
= THIS_MODULE
,
2646 static int __net_init
unix_net_init(struct net
*net
)
2648 int error
= -ENOMEM
;
2650 net
->unx
.sysctl_max_dgram_qlen
= 10;
2651 if (unix_sysctl_register(net
))
2654 #ifdef CONFIG_PROC_FS
2655 if (!proc_create("unix", 0, net
->proc_net
, &unix_seq_fops
)) {
2656 unix_sysctl_unregister(net
);
2665 static void __net_exit
unix_net_exit(struct net
*net
)
2667 unix_sysctl_unregister(net
);
2668 remove_proc_entry("unix", net
->proc_net
);
2671 static struct pernet_operations unix_net_ops
= {
2672 .init
= unix_net_init
,
2673 .exit
= unix_net_exit
,
2676 static int __init
af_unix_init(void)
2680 BUILD_BUG_ON(sizeof(struct unix_skb_parms
) > FIELD_SIZEOF(struct sk_buff
, cb
));
2682 rc
= proto_register(&unix_proto
, 1);
2684 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__
);
2688 sock_register(&unix_family_ops
);
2689 register_pernet_subsys(&unix_net_ops
);
2694 static void __exit
af_unix_exit(void)
2696 sock_unregister(PF_UNIX
);
2697 proto_unregister(&unix_proto
);
2698 unregister_pernet_subsys(&unix_net_ops
);
2701 /* Earlier than device_initcall() so that other drivers invoking
2702 request_module() don't end up in a loop when modprobe tries
2703 to use a UNIX socket. But later than subsys_initcall() because
2704 we depend on stuff initialised there */
2705 fs_initcall(af_unix_init
);
2706 module_exit(af_unix_exit
);
2708 MODULE_LICENSE("GPL");
2709 MODULE_ALIAS_NETPROTO(PF_UNIX
);