unix: convert printks to pr_<level>
[deliverable/linux.git] / net / unix / af_unix.c
CommitLineData
1da177e4
LT
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
113aa838 4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
1da177e4
LT
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
5cc208be 83#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
1da177e4 85#include <linux/module.h>
1da177e4 86#include <linux/kernel.h>
1da177e4
LT
87#include <linux/signal.h>
88#include <linux/sched.h>
89#include <linux/errno.h>
90#include <linux/string.h>
91#include <linux/stat.h>
92#include <linux/dcache.h>
93#include <linux/namei.h>
94#include <linux/socket.h>
95#include <linux/un.h>
96#include <linux/fcntl.h>
97#include <linux/termios.h>
98#include <linux/sockios.h>
99#include <linux/net.h>
100#include <linux/in.h>
101#include <linux/fs.h>
102#include <linux/slab.h>
103#include <asm/uaccess.h>
104#include <linux/skbuff.h>
105#include <linux/netdevice.h>
457c4cbc 106#include <net/net_namespace.h>
1da177e4 107#include <net/sock.h>
c752f073 108#include <net/tcp_states.h>
1da177e4
LT
109#include <net/af_unix.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <net/scm.h>
113#include <linux/init.h>
114#include <linux/poll.h>
1da177e4
LT
115#include <linux/rtnetlink.h>
116#include <linux/mount.h>
117#include <net/checksum.h>
118#include <linux/security.h>
2b15af6f 119#include <linux/freezer.h>
1da177e4 120
7123aaa3 121struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
fa7ff56f
PE
122EXPORT_SYMBOL_GPL(unix_socket_table);
123DEFINE_SPINLOCK(unix_table_lock);
124EXPORT_SYMBOL_GPL(unix_table_lock);
518de9b3 125static atomic_long_t unix_nr_socks;
1da177e4 126
1da177e4 127
7123aaa3
ED
128static struct hlist_head *unix_sockets_unbound(void *addr)
129{
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136}
137
138#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
1da177e4 139
877ce7c1 140#ifdef CONFIG_SECURITY_NETWORK
dc49c1f9 141static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1 142{
dc49c1f9 143 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
877ce7c1
CZ
144}
145
146static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147{
dc49c1f9 148 scm->secid = *UNIXSID(skb);
877ce7c1
CZ
149}
150#else
dc49c1f9 151static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1
CZ
152{ }
153
154static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
155{ }
156#endif /* CONFIG_SECURITY_NETWORK */
157
1da177e4
LT
158/*
159 * SMP locking strategy:
fbe9cc4a 160 * hash table is protected with spinlock unix_table_lock
663717f6 161 * each socket state is protected by separate spin lock.
1da177e4
LT
162 */
163
95c96174 164static inline unsigned int unix_hash_fold(__wsum n)
1da177e4 165{
95c96174
ED
166 unsigned int hash = (__force unsigned int)n;
167
1da177e4
LT
168 hash ^= hash>>16;
169 hash ^= hash>>8;
170 return hash&(UNIX_HASH_SIZE-1);
171}
172
173#define unix_peer(sk) (unix_sk(sk)->peer)
174
175static inline int unix_our_peer(struct sock *sk, struct sock *osk)
176{
177 return unix_peer(osk) == sk;
178}
179
180static inline int unix_may_send(struct sock *sk, struct sock *osk)
181{
6eba6a37 182 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
1da177e4
LT
183}
184
3c73419c
RW
185static inline int unix_recvq_full(struct sock const *sk)
186{
187 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
188}
189
fa7ff56f 190struct sock *unix_peer_get(struct sock *s)
1da177e4
LT
191{
192 struct sock *peer;
193
1c92b4e5 194 unix_state_lock(s);
1da177e4
LT
195 peer = unix_peer(s);
196 if (peer)
197 sock_hold(peer);
1c92b4e5 198 unix_state_unlock(s);
1da177e4
LT
199 return peer;
200}
fa7ff56f 201EXPORT_SYMBOL_GPL(unix_peer_get);
1da177e4
LT
202
203static inline void unix_release_addr(struct unix_address *addr)
204{
205 if (atomic_dec_and_test(&addr->refcnt))
206 kfree(addr);
207}
208
209/*
210 * Check unix socket name:
211 * - should be not zero length.
212 * - if started by not zero, should be NULL terminated (FS object)
213 * - if started by zero, it is abstract name.
214 */
ac7bfa62 215
95c96174 216static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
1da177e4
LT
217{
218 if (len <= sizeof(short) || len > sizeof(*sunaddr))
219 return -EINVAL;
220 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
221 return -EINVAL;
222 if (sunaddr->sun_path[0]) {
223 /*
224 * This may look like an off by one error but it is a bit more
225 * subtle. 108 is the longest valid AF_UNIX path for a binding.
25985edc 226 * sun_path[108] doesn't as such exist. However in kernel space
1da177e4
LT
227 * we are guaranteed that it is a valid memory location in our
228 * kernel address buffer.
229 */
e27dfcea 230 ((char *)sunaddr)[len] = 0;
1da177e4
LT
231 len = strlen(sunaddr->sun_path)+1+sizeof(short);
232 return len;
233 }
234
07f0757a 235 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
1da177e4
LT
236 return len;
237}
238
239static void __unix_remove_socket(struct sock *sk)
240{
241 sk_del_node_init(sk);
242}
243
244static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
245{
547b792c 246 WARN_ON(!sk_unhashed(sk));
1da177e4
LT
247 sk_add_node(sk, list);
248}
249
250static inline void unix_remove_socket(struct sock *sk)
251{
fbe9cc4a 252 spin_lock(&unix_table_lock);
1da177e4 253 __unix_remove_socket(sk);
fbe9cc4a 254 spin_unlock(&unix_table_lock);
1da177e4
LT
255}
256
257static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
258{
fbe9cc4a 259 spin_lock(&unix_table_lock);
1da177e4 260 __unix_insert_socket(list, sk);
fbe9cc4a 261 spin_unlock(&unix_table_lock);
1da177e4
LT
262}
263
097e66c5
DL
264static struct sock *__unix_find_socket_byname(struct net *net,
265 struct sockaddr_un *sunname,
95c96174 266 int len, int type, unsigned int hash)
1da177e4
LT
267{
268 struct sock *s;
1da177e4 269
b67bfe0d 270 sk_for_each(s, &unix_socket_table[hash ^ type]) {
1da177e4
LT
271 struct unix_sock *u = unix_sk(s);
272
878628fb 273 if (!net_eq(sock_net(s), net))
097e66c5
DL
274 continue;
275
1da177e4
LT
276 if (u->addr->len == len &&
277 !memcmp(u->addr->name, sunname, len))
278 goto found;
279 }
280 s = NULL;
281found:
282 return s;
283}
284
097e66c5
DL
285static inline struct sock *unix_find_socket_byname(struct net *net,
286 struct sockaddr_un *sunname,
1da177e4 287 int len, int type,
95c96174 288 unsigned int hash)
1da177e4
LT
289{
290 struct sock *s;
291
fbe9cc4a 292 spin_lock(&unix_table_lock);
097e66c5 293 s = __unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
294 if (s)
295 sock_hold(s);
fbe9cc4a 296 spin_unlock(&unix_table_lock);
1da177e4
LT
297 return s;
298}
299
6616f788 300static struct sock *unix_find_socket_byinode(struct inode *i)
1da177e4
LT
301{
302 struct sock *s;
1da177e4 303
fbe9cc4a 304 spin_lock(&unix_table_lock);
b67bfe0d 305 sk_for_each(s,
1da177e4 306 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
40ffe67d 307 struct dentry *dentry = unix_sk(s)->path.dentry;
1da177e4 308
6eba6a37 309 if (dentry && dentry->d_inode == i) {
1da177e4
LT
310 sock_hold(s);
311 goto found;
312 }
313 }
314 s = NULL;
315found:
fbe9cc4a 316 spin_unlock(&unix_table_lock);
1da177e4
LT
317 return s;
318}
319
320static inline int unix_writable(struct sock *sk)
321{
322 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
323}
324
325static void unix_write_space(struct sock *sk)
326{
43815482
ED
327 struct socket_wq *wq;
328
329 rcu_read_lock();
1da177e4 330 if (unix_writable(sk)) {
43815482
ED
331 wq = rcu_dereference(sk->sk_wq);
332 if (wq_has_sleeper(wq))
67426b75
ED
333 wake_up_interruptible_sync_poll(&wq->wait,
334 POLLOUT | POLLWRNORM | POLLWRBAND);
8d8ad9d7 335 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4 336 }
43815482 337 rcu_read_unlock();
1da177e4
LT
338}
339
340/* When dgram socket disconnects (or changes its peer), we clear its receive
341 * queue of packets arrived from previous peer. First, it allows to do
342 * flow control based only on wmem_alloc; second, sk connected to peer
343 * may receive messages only from that peer. */
344static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
345{
b03efcfb 346 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1da177e4
LT
347 skb_queue_purge(&sk->sk_receive_queue);
348 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
349
350 /* If one link of bidirectional dgram pipe is disconnected,
351 * we signal error. Messages are lost. Do not make this,
352 * when peer was not connected to us.
353 */
354 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
355 other->sk_err = ECONNRESET;
356 other->sk_error_report(other);
357 }
358 }
359}
360
361static void unix_sock_destructor(struct sock *sk)
362{
363 struct unix_sock *u = unix_sk(sk);
364
365 skb_queue_purge(&sk->sk_receive_queue);
366
547b792c
IJ
367 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
368 WARN_ON(!sk_unhashed(sk));
369 WARN_ON(sk->sk_socket);
1da177e4 370 if (!sock_flag(sk, SOCK_DEAD)) {
5cc208be 371 pr_info("Attempt to release alive unix socket: %p\n", sk);
1da177e4
LT
372 return;
373 }
374
375 if (u->addr)
376 unix_release_addr(u->addr);
377
518de9b3 378 atomic_long_dec(&unix_nr_socks);
6f756a8c 379 local_bh_disable();
a8076d8d 380 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
6f756a8c 381 local_bh_enable();
1da177e4 382#ifdef UNIX_REFCNT_DEBUG
5cc208be 383 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
518de9b3 384 atomic_long_read(&unix_nr_socks));
1da177e4
LT
385#endif
386}
387
ded34e0f 388static void unix_release_sock(struct sock *sk, int embrion)
1da177e4
LT
389{
390 struct unix_sock *u = unix_sk(sk);
40ffe67d 391 struct path path;
1da177e4
LT
392 struct sock *skpair;
393 struct sk_buff *skb;
394 int state;
395
396 unix_remove_socket(sk);
397
398 /* Clear state */
1c92b4e5 399 unix_state_lock(sk);
1da177e4
LT
400 sock_orphan(sk);
401 sk->sk_shutdown = SHUTDOWN_MASK;
40ffe67d
AV
402 path = u->path;
403 u->path.dentry = NULL;
404 u->path.mnt = NULL;
1da177e4
LT
405 state = sk->sk_state;
406 sk->sk_state = TCP_CLOSE;
1c92b4e5 407 unix_state_unlock(sk);
1da177e4
LT
408
409 wake_up_interruptible_all(&u->peer_wait);
410
e27dfcea 411 skpair = unix_peer(sk);
1da177e4 412
e27dfcea 413 if (skpair != NULL) {
1da177e4 414 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
1c92b4e5 415 unix_state_lock(skpair);
1da177e4
LT
416 /* No more writes */
417 skpair->sk_shutdown = SHUTDOWN_MASK;
418 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
419 skpair->sk_err = ECONNRESET;
1c92b4e5 420 unix_state_unlock(skpair);
1da177e4 421 skpair->sk_state_change(skpair);
8d8ad9d7 422 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4
LT
423 }
424 sock_put(skpair); /* It may now die */
425 unix_peer(sk) = NULL;
426 }
427
428 /* Try to flush out this socket. Throw out buffers at least */
429
430 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
e27dfcea 431 if (state == TCP_LISTEN)
1da177e4
LT
432 unix_release_sock(skb->sk, 1);
433 /* passed fds are erased in the kfree_skb hook */
434 kfree_skb(skb);
435 }
436
40ffe67d
AV
437 if (path.dentry)
438 path_put(&path);
1da177e4
LT
439
440 sock_put(sk);
441
442 /* ---- Socket is dead now and most probably destroyed ---- */
443
444 /*
e04dae84 445 * Fixme: BSD difference: In BSD all sockets connected to us get
1da177e4
LT
446 * ECONNRESET and we die on the spot. In Linux we behave
447 * like files and pipes do and wait for the last
448 * dereference.
449 *
450 * Can't we simply set sock->err?
451 *
452 * What the above comment does talk about? --ANK(980817)
453 */
454
9305cfa4 455 if (unix_tot_inflight)
ac7bfa62 456 unix_gc(); /* Garbage collect fds */
1da177e4
LT
457}
458
109f6e39
EB
459static void init_peercred(struct sock *sk)
460{
461 put_pid(sk->sk_peer_pid);
462 if (sk->sk_peer_cred)
463 put_cred(sk->sk_peer_cred);
464 sk->sk_peer_pid = get_pid(task_tgid(current));
465 sk->sk_peer_cred = get_current_cred();
466}
467
468static void copy_peercred(struct sock *sk, struct sock *peersk)
469{
470 put_pid(sk->sk_peer_pid);
471 if (sk->sk_peer_cred)
472 put_cred(sk->sk_peer_cred);
473 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
474 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
475}
476
1da177e4
LT
477static int unix_listen(struct socket *sock, int backlog)
478{
479 int err;
480 struct sock *sk = sock->sk;
481 struct unix_sock *u = unix_sk(sk);
109f6e39 482 struct pid *old_pid = NULL;
1da177e4
LT
483
484 err = -EOPNOTSUPP;
6eba6a37
ED
485 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
486 goto out; /* Only stream/seqpacket sockets accept */
1da177e4
LT
487 err = -EINVAL;
488 if (!u->addr)
6eba6a37 489 goto out; /* No listens on an unbound socket */
1c92b4e5 490 unix_state_lock(sk);
1da177e4
LT
491 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
492 goto out_unlock;
493 if (backlog > sk->sk_max_ack_backlog)
494 wake_up_interruptible_all(&u->peer_wait);
495 sk->sk_max_ack_backlog = backlog;
496 sk->sk_state = TCP_LISTEN;
497 /* set credentials so connect can copy them */
109f6e39 498 init_peercred(sk);
1da177e4
LT
499 err = 0;
500
501out_unlock:
1c92b4e5 502 unix_state_unlock(sk);
109f6e39 503 put_pid(old_pid);
1da177e4
LT
504out:
505 return err;
506}
507
508static int unix_release(struct socket *);
509static int unix_bind(struct socket *, struct sockaddr *, int);
510static int unix_stream_connect(struct socket *, struct sockaddr *,
511 int addr_len, int flags);
512static int unix_socketpair(struct socket *, struct socket *);
513static int unix_accept(struct socket *, struct socket *, int);
514static int unix_getname(struct socket *, struct sockaddr *, int *, int);
515static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
ec0d215f
RW
516static unsigned int unix_dgram_poll(struct file *, struct socket *,
517 poll_table *);
1da177e4
LT
518static int unix_ioctl(struct socket *, unsigned int, unsigned long);
519static int unix_shutdown(struct socket *, int);
520static int unix_stream_sendmsg(struct kiocb *, struct socket *,
521 struct msghdr *, size_t);
522static int unix_stream_recvmsg(struct kiocb *, struct socket *,
523 struct msghdr *, size_t, int);
524static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
525 struct msghdr *, size_t);
526static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
527 struct msghdr *, size_t, int);
528static int unix_dgram_connect(struct socket *, struct sockaddr *,
529 int, int);
530static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
531 struct msghdr *, size_t);
a05d2ad1
EB
532static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
533 struct msghdr *, size_t, int);
1da177e4 534
f55bb7f9
PE
535static void unix_set_peek_off(struct sock *sk, int val)
536{
537 struct unix_sock *u = unix_sk(sk);
538
539 mutex_lock(&u->readlock);
540 sk->sk_peek_off = val;
541 mutex_unlock(&u->readlock);
542}
543
544
90ddc4f0 545static const struct proto_ops unix_stream_ops = {
1da177e4
LT
546 .family = PF_UNIX,
547 .owner = THIS_MODULE,
548 .release = unix_release,
549 .bind = unix_bind,
550 .connect = unix_stream_connect,
551 .socketpair = unix_socketpair,
552 .accept = unix_accept,
553 .getname = unix_getname,
554 .poll = unix_poll,
555 .ioctl = unix_ioctl,
556 .listen = unix_listen,
557 .shutdown = unix_shutdown,
558 .setsockopt = sock_no_setsockopt,
559 .getsockopt = sock_no_getsockopt,
560 .sendmsg = unix_stream_sendmsg,
561 .recvmsg = unix_stream_recvmsg,
562 .mmap = sock_no_mmap,
563 .sendpage = sock_no_sendpage,
fc0d7536 564 .set_peek_off = unix_set_peek_off,
1da177e4
LT
565};
566
90ddc4f0 567static const struct proto_ops unix_dgram_ops = {
1da177e4
LT
568 .family = PF_UNIX,
569 .owner = THIS_MODULE,
570 .release = unix_release,
571 .bind = unix_bind,
572 .connect = unix_dgram_connect,
573 .socketpair = unix_socketpair,
574 .accept = sock_no_accept,
575 .getname = unix_getname,
ec0d215f 576 .poll = unix_dgram_poll,
1da177e4
LT
577 .ioctl = unix_ioctl,
578 .listen = sock_no_listen,
579 .shutdown = unix_shutdown,
580 .setsockopt = sock_no_setsockopt,
581 .getsockopt = sock_no_getsockopt,
582 .sendmsg = unix_dgram_sendmsg,
583 .recvmsg = unix_dgram_recvmsg,
584 .mmap = sock_no_mmap,
585 .sendpage = sock_no_sendpage,
f55bb7f9 586 .set_peek_off = unix_set_peek_off,
1da177e4
LT
587};
588
90ddc4f0 589static const struct proto_ops unix_seqpacket_ops = {
1da177e4
LT
590 .family = PF_UNIX,
591 .owner = THIS_MODULE,
592 .release = unix_release,
593 .bind = unix_bind,
594 .connect = unix_stream_connect,
595 .socketpair = unix_socketpair,
596 .accept = unix_accept,
597 .getname = unix_getname,
ec0d215f 598 .poll = unix_dgram_poll,
1da177e4
LT
599 .ioctl = unix_ioctl,
600 .listen = unix_listen,
601 .shutdown = unix_shutdown,
602 .setsockopt = sock_no_setsockopt,
603 .getsockopt = sock_no_getsockopt,
604 .sendmsg = unix_seqpacket_sendmsg,
a05d2ad1 605 .recvmsg = unix_seqpacket_recvmsg,
1da177e4
LT
606 .mmap = sock_no_mmap,
607 .sendpage = sock_no_sendpage,
f55bb7f9 608 .set_peek_off = unix_set_peek_off,
1da177e4
LT
609};
610
611static struct proto unix_proto = {
248969ae
ED
612 .name = "UNIX",
613 .owner = THIS_MODULE,
248969ae 614 .obj_size = sizeof(struct unix_sock),
1da177e4
LT
615};
616
a09785a2
IM
617/*
618 * AF_UNIX sockets do not interact with hardware, hence they
619 * dont trigger interrupts - so it's safe for them to have
620 * bh-unsafe locking for their sk_receive_queue.lock. Split off
621 * this special lock-class by reinitializing the spinlock key:
622 */
623static struct lock_class_key af_unix_sk_receive_queue_lock_key;
624
6eba6a37 625static struct sock *unix_create1(struct net *net, struct socket *sock)
1da177e4
LT
626{
627 struct sock *sk = NULL;
628 struct unix_sock *u;
629
518de9b3
ED
630 atomic_long_inc(&unix_nr_socks);
631 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
1da177e4
LT
632 goto out;
633
6257ff21 634 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
1da177e4
LT
635 if (!sk)
636 goto out;
637
6eba6a37 638 sock_init_data(sock, sk);
a09785a2
IM
639 lockdep_set_class(&sk->sk_receive_queue.lock,
640 &af_unix_sk_receive_queue_lock_key);
1da177e4
LT
641
642 sk->sk_write_space = unix_write_space;
a0a53c8b 643 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
1da177e4
LT
644 sk->sk_destruct = unix_sock_destructor;
645 u = unix_sk(sk);
40ffe67d
AV
646 u->path.dentry = NULL;
647 u->path.mnt = NULL;
fd19f329 648 spin_lock_init(&u->lock);
516e0cc5 649 atomic_long_set(&u->inflight, 0);
1fd05ba5 650 INIT_LIST_HEAD(&u->link);
57b47a53 651 mutex_init(&u->readlock); /* single task reading lock */
1da177e4 652 init_waitqueue_head(&u->peer_wait);
7123aaa3 653 unix_insert_socket(unix_sockets_unbound(sk), sk);
1da177e4 654out:
284b327b 655 if (sk == NULL)
518de9b3 656 atomic_long_dec(&unix_nr_socks);
920de804
ED
657 else {
658 local_bh_disable();
a8076d8d 659 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
920de804
ED
660 local_bh_enable();
661 }
1da177e4
LT
662 return sk;
663}
664
3f378b68
EP
665static int unix_create(struct net *net, struct socket *sock, int protocol,
666 int kern)
1da177e4
LT
667{
668 if (protocol && protocol != PF_UNIX)
669 return -EPROTONOSUPPORT;
670
671 sock->state = SS_UNCONNECTED;
672
673 switch (sock->type) {
674 case SOCK_STREAM:
675 sock->ops = &unix_stream_ops;
676 break;
677 /*
678 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
679 * nothing uses it.
680 */
681 case SOCK_RAW:
e27dfcea 682 sock->type = SOCK_DGRAM;
1da177e4
LT
683 case SOCK_DGRAM:
684 sock->ops = &unix_dgram_ops;
685 break;
686 case SOCK_SEQPACKET:
687 sock->ops = &unix_seqpacket_ops;
688 break;
689 default:
690 return -ESOCKTNOSUPPORT;
691 }
692
1b8d7ae4 693 return unix_create1(net, sock) ? 0 : -ENOMEM;
1da177e4
LT
694}
695
696static int unix_release(struct socket *sock)
697{
698 struct sock *sk = sock->sk;
699
700 if (!sk)
701 return 0;
702
ded34e0f 703 unix_release_sock(sk, 0);
1da177e4
LT
704 sock->sk = NULL;
705
ded34e0f 706 return 0;
1da177e4
LT
707}
708
709static int unix_autobind(struct socket *sock)
710{
711 struct sock *sk = sock->sk;
3b1e0a65 712 struct net *net = sock_net(sk);
1da177e4
LT
713 struct unix_sock *u = unix_sk(sk);
714 static u32 ordernum = 1;
6eba6a37 715 struct unix_address *addr;
1da177e4 716 int err;
8df73ff9 717 unsigned int retries = 0;
1da177e4 718
57b47a53 719 mutex_lock(&u->readlock);
1da177e4
LT
720
721 err = 0;
722 if (u->addr)
723 goto out;
724
725 err = -ENOMEM;
0da974f4 726 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
1da177e4
LT
727 if (!addr)
728 goto out;
729
1da177e4
LT
730 addr->name->sun_family = AF_UNIX;
731 atomic_set(&addr->refcnt, 1);
732
733retry:
734 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
07f0757a 735 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
1da177e4 736
fbe9cc4a 737 spin_lock(&unix_table_lock);
1da177e4
LT
738 ordernum = (ordernum+1)&0xFFFFF;
739
097e66c5 740 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
1da177e4 741 addr->hash)) {
fbe9cc4a 742 spin_unlock(&unix_table_lock);
8df73ff9
TH
743 /*
744 * __unix_find_socket_byname() may take long time if many names
745 * are already in use.
746 */
747 cond_resched();
748 /* Give up if all names seems to be in use. */
749 if (retries++ == 0xFFFFF) {
750 err = -ENOSPC;
751 kfree(addr);
752 goto out;
753 }
1da177e4
LT
754 goto retry;
755 }
756 addr->hash ^= sk->sk_type;
757
758 __unix_remove_socket(sk);
759 u->addr = addr;
760 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
fbe9cc4a 761 spin_unlock(&unix_table_lock);
1da177e4
LT
762 err = 0;
763
57b47a53 764out: mutex_unlock(&u->readlock);
1da177e4
LT
765 return err;
766}
767
097e66c5
DL
768static struct sock *unix_find_other(struct net *net,
769 struct sockaddr_un *sunname, int len,
95c96174 770 int type, unsigned int hash, int *error)
1da177e4
LT
771{
772 struct sock *u;
421748ec 773 struct path path;
1da177e4 774 int err = 0;
ac7bfa62 775
1da177e4 776 if (sunname->sun_path[0]) {
421748ec
AV
777 struct inode *inode;
778 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
1da177e4
LT
779 if (err)
780 goto fail;
421748ec
AV
781 inode = path.dentry->d_inode;
782 err = inode_permission(inode, MAY_WRITE);
1da177e4
LT
783 if (err)
784 goto put_fail;
785
786 err = -ECONNREFUSED;
421748ec 787 if (!S_ISSOCK(inode->i_mode))
1da177e4 788 goto put_fail;
6616f788 789 u = unix_find_socket_byinode(inode);
1da177e4
LT
790 if (!u)
791 goto put_fail;
792
793 if (u->sk_type == type)
68ac1234 794 touch_atime(&path);
1da177e4 795
421748ec 796 path_put(&path);
1da177e4 797
e27dfcea 798 err = -EPROTOTYPE;
1da177e4
LT
799 if (u->sk_type != type) {
800 sock_put(u);
801 goto fail;
802 }
803 } else {
804 err = -ECONNREFUSED;
e27dfcea 805 u = unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
806 if (u) {
807 struct dentry *dentry;
40ffe67d 808 dentry = unix_sk(u)->path.dentry;
1da177e4 809 if (dentry)
68ac1234 810 touch_atime(&unix_sk(u)->path);
1da177e4
LT
811 } else
812 goto fail;
813 }
814 return u;
815
816put_fail:
421748ec 817 path_put(&path);
1da177e4 818fail:
e27dfcea 819 *error = err;
1da177e4
LT
820 return NULL;
821}
822
faf02010
AV
823static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
824{
825 struct dentry *dentry;
826 struct path path;
827 int err = 0;
828 /*
829 * Get the parent directory, calculate the hash for last
830 * component.
831 */
832 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
833 err = PTR_ERR(dentry);
834 if (IS_ERR(dentry))
835 return err;
836
837 /*
838 * All right, let's create it.
839 */
840 err = security_path_mknod(&path, dentry, mode, 0);
841 if (!err) {
842 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
843 if (!err) {
844 res->mnt = mntget(path.mnt);
845 res->dentry = dget(dentry);
846 }
847 }
848 done_path_create(&path, dentry);
849 return err;
850}
1da177e4
LT
851
852static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
853{
854 struct sock *sk = sock->sk;
3b1e0a65 855 struct net *net = sock_net(sk);
1da177e4 856 struct unix_sock *u = unix_sk(sk);
e27dfcea 857 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
dae6ad8f 858 char *sun_path = sunaddr->sun_path;
1da177e4 859 int err;
95c96174 860 unsigned int hash;
1da177e4
LT
861 struct unix_address *addr;
862 struct hlist_head *list;
863
864 err = -EINVAL;
865 if (sunaddr->sun_family != AF_UNIX)
866 goto out;
867
e27dfcea 868 if (addr_len == sizeof(short)) {
1da177e4
LT
869 err = unix_autobind(sock);
870 goto out;
871 }
872
873 err = unix_mkname(sunaddr, addr_len, &hash);
874 if (err < 0)
875 goto out;
876 addr_len = err;
877
57b47a53 878 mutex_lock(&u->readlock);
1da177e4
LT
879
880 err = -EINVAL;
881 if (u->addr)
882 goto out_up;
883
884 err = -ENOMEM;
885 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
886 if (!addr)
887 goto out_up;
888
889 memcpy(addr->name, sunaddr, addr_len);
890 addr->len = addr_len;
891 addr->hash = hash ^ sk->sk_type;
892 atomic_set(&addr->refcnt, 1);
893
dae6ad8f 894 if (sun_path[0]) {
faf02010
AV
895 struct path path;
896 umode_t mode = S_IFSOCK |
ce3b0f8d 897 (SOCK_INODE(sock)->i_mode & ~current_umask());
faf02010
AV
898 err = unix_mknod(sun_path, mode, &path);
899 if (err) {
900 if (err == -EEXIST)
901 err = -EADDRINUSE;
902 unix_release_addr(addr);
903 goto out_up;
904 }
1da177e4 905 addr->hash = UNIX_HASH_SIZE;
faf02010
AV
906 hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
907 spin_lock(&unix_table_lock);
908 u->path = path;
909 list = &unix_socket_table[hash];
910 } else {
911 spin_lock(&unix_table_lock);
1da177e4 912 err = -EADDRINUSE;
097e66c5 913 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1da177e4
LT
914 sk->sk_type, hash)) {
915 unix_release_addr(addr);
916 goto out_unlock;
917 }
918
919 list = &unix_socket_table[addr->hash];
1da177e4
LT
920 }
921
922 err = 0;
923 __unix_remove_socket(sk);
924 u->addr = addr;
925 __unix_insert_socket(list, sk);
926
927out_unlock:
fbe9cc4a 928 spin_unlock(&unix_table_lock);
1da177e4 929out_up:
57b47a53 930 mutex_unlock(&u->readlock);
1da177e4
LT
931out:
932 return err;
1da177e4
LT
933}
934
278a3de5
DM
935static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
936{
937 if (unlikely(sk1 == sk2) || !sk2) {
938 unix_state_lock(sk1);
939 return;
940 }
941 if (sk1 < sk2) {
942 unix_state_lock(sk1);
943 unix_state_lock_nested(sk2);
944 } else {
945 unix_state_lock(sk2);
946 unix_state_lock_nested(sk1);
947 }
948}
949
950static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
951{
952 if (unlikely(sk1 == sk2) || !sk2) {
953 unix_state_unlock(sk1);
954 return;
955 }
956 unix_state_unlock(sk1);
957 unix_state_unlock(sk2);
958}
959
1da177e4
LT
960static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
961 int alen, int flags)
962{
963 struct sock *sk = sock->sk;
3b1e0a65 964 struct net *net = sock_net(sk);
e27dfcea 965 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1da177e4 966 struct sock *other;
95c96174 967 unsigned int hash;
1da177e4
LT
968 int err;
969
970 if (addr->sa_family != AF_UNSPEC) {
971 err = unix_mkname(sunaddr, alen, &hash);
972 if (err < 0)
973 goto out;
974 alen = err;
975
976 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
977 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
978 goto out;
979
278a3de5 980restart:
e27dfcea 981 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1da177e4
LT
982 if (!other)
983 goto out;
984
278a3de5
DM
985 unix_state_double_lock(sk, other);
986
987 /* Apparently VFS overslept socket death. Retry. */
988 if (sock_flag(other, SOCK_DEAD)) {
989 unix_state_double_unlock(sk, other);
990 sock_put(other);
991 goto restart;
992 }
1da177e4
LT
993
994 err = -EPERM;
995 if (!unix_may_send(sk, other))
996 goto out_unlock;
997
998 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
999 if (err)
1000 goto out_unlock;
1001
1002 } else {
1003 /*
1004 * 1003.1g breaking connected state with AF_UNSPEC
1005 */
1006 other = NULL;
278a3de5 1007 unix_state_double_lock(sk, other);
1da177e4
LT
1008 }
1009
1010 /*
1011 * If it was connected, reconnect.
1012 */
1013 if (unix_peer(sk)) {
1014 struct sock *old_peer = unix_peer(sk);
e27dfcea 1015 unix_peer(sk) = other;
278a3de5 1016 unix_state_double_unlock(sk, other);
1da177e4
LT
1017
1018 if (other != old_peer)
1019 unix_dgram_disconnected(sk, old_peer);
1020 sock_put(old_peer);
1021 } else {
e27dfcea 1022 unix_peer(sk) = other;
278a3de5 1023 unix_state_double_unlock(sk, other);
1da177e4 1024 }
ac7bfa62 1025 return 0;
1da177e4
LT
1026
1027out_unlock:
278a3de5 1028 unix_state_double_unlock(sk, other);
1da177e4
LT
1029 sock_put(other);
1030out:
1031 return err;
1032}
1033
1034static long unix_wait_for_peer(struct sock *other, long timeo)
1035{
1036 struct unix_sock *u = unix_sk(other);
1037 int sched;
1038 DEFINE_WAIT(wait);
1039
1040 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1041
1042 sched = !sock_flag(other, SOCK_DEAD) &&
1043 !(other->sk_shutdown & RCV_SHUTDOWN) &&
3c73419c 1044 unix_recvq_full(other);
1da177e4 1045
1c92b4e5 1046 unix_state_unlock(other);
1da177e4
LT
1047
1048 if (sched)
1049 timeo = schedule_timeout(timeo);
1050
1051 finish_wait(&u->peer_wait, &wait);
1052 return timeo;
1053}
1054
1055static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1056 int addr_len, int flags)
1057{
e27dfcea 1058 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1da177e4 1059 struct sock *sk = sock->sk;
3b1e0a65 1060 struct net *net = sock_net(sk);
1da177e4
LT
1061 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1062 struct sock *newsk = NULL;
1063 struct sock *other = NULL;
1064 struct sk_buff *skb = NULL;
95c96174 1065 unsigned int hash;
1da177e4
LT
1066 int st;
1067 int err;
1068 long timeo;
1069
1070 err = unix_mkname(sunaddr, addr_len, &hash);
1071 if (err < 0)
1072 goto out;
1073 addr_len = err;
1074
f64f9e71
JP
1075 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1076 (err = unix_autobind(sock)) != 0)
1da177e4
LT
1077 goto out;
1078
1079 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1080
1081 /* First of all allocate resources.
1082 If we will make it after state is locked,
1083 we will have to recheck all again in any case.
1084 */
1085
1086 err = -ENOMEM;
1087
1088 /* create new sock for complete connection */
3b1e0a65 1089 newsk = unix_create1(sock_net(sk), NULL);
1da177e4
LT
1090 if (newsk == NULL)
1091 goto out;
1092
1093 /* Allocate skb for sending to listening sock */
1094 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1095 if (skb == NULL)
1096 goto out;
1097
1098restart:
1099 /* Find listening sock. */
097e66c5 1100 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1da177e4
LT
1101 if (!other)
1102 goto out;
1103
1104 /* Latch state of peer */
1c92b4e5 1105 unix_state_lock(other);
1da177e4
LT
1106
1107 /* Apparently VFS overslept socket death. Retry. */
1108 if (sock_flag(other, SOCK_DEAD)) {
1c92b4e5 1109 unix_state_unlock(other);
1da177e4
LT
1110 sock_put(other);
1111 goto restart;
1112 }
1113
1114 err = -ECONNREFUSED;
1115 if (other->sk_state != TCP_LISTEN)
1116 goto out_unlock;
77238f2b
TS
1117 if (other->sk_shutdown & RCV_SHUTDOWN)
1118 goto out_unlock;
1da177e4 1119
3c73419c 1120 if (unix_recvq_full(other)) {
1da177e4
LT
1121 err = -EAGAIN;
1122 if (!timeo)
1123 goto out_unlock;
1124
1125 timeo = unix_wait_for_peer(other, timeo);
1126
1127 err = sock_intr_errno(timeo);
1128 if (signal_pending(current))
1129 goto out;
1130 sock_put(other);
1131 goto restart;
ac7bfa62 1132 }
1da177e4
LT
1133
1134 /* Latch our state.
1135
e5537bfc 1136 It is tricky place. We need to grab our state lock and cannot
1da177e4
LT
1137 drop lock on peer. It is dangerous because deadlock is
1138 possible. Connect to self case and simultaneous
1139 attempt to connect are eliminated by checking socket
1140 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1141 check this before attempt to grab lock.
1142
1143 Well, and we have to recheck the state after socket locked.
1144 */
1145 st = sk->sk_state;
1146
1147 switch (st) {
1148 case TCP_CLOSE:
1149 /* This is ok... continue with connect */
1150 break;
1151 case TCP_ESTABLISHED:
1152 /* Socket is already connected */
1153 err = -EISCONN;
1154 goto out_unlock;
1155 default:
1156 err = -EINVAL;
1157 goto out_unlock;
1158 }
1159
1c92b4e5 1160 unix_state_lock_nested(sk);
1da177e4
LT
1161
1162 if (sk->sk_state != st) {
1c92b4e5
DM
1163 unix_state_unlock(sk);
1164 unix_state_unlock(other);
1da177e4
LT
1165 sock_put(other);
1166 goto restart;
1167 }
1168
3610cda5 1169 err = security_unix_stream_connect(sk, other, newsk);
1da177e4 1170 if (err) {
1c92b4e5 1171 unix_state_unlock(sk);
1da177e4
LT
1172 goto out_unlock;
1173 }
1174
1175 /* The way is open! Fastly set all the necessary fields... */
1176
1177 sock_hold(sk);
1178 unix_peer(newsk) = sk;
1179 newsk->sk_state = TCP_ESTABLISHED;
1180 newsk->sk_type = sk->sk_type;
109f6e39 1181 init_peercred(newsk);
1da177e4 1182 newu = unix_sk(newsk);
eaefd110 1183 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1da177e4
LT
1184 otheru = unix_sk(other);
1185
1186 /* copy address information from listening to new sock*/
1187 if (otheru->addr) {
1188 atomic_inc(&otheru->addr->refcnt);
1189 newu->addr = otheru->addr;
1190 }
40ffe67d
AV
1191 if (otheru->path.dentry) {
1192 path_get(&otheru->path);
1193 newu->path = otheru->path;
1da177e4
LT
1194 }
1195
1196 /* Set credentials */
109f6e39 1197 copy_peercred(sk, other);
1da177e4 1198
1da177e4
LT
1199 sock->state = SS_CONNECTED;
1200 sk->sk_state = TCP_ESTABLISHED;
830a1e5c
BL
1201 sock_hold(newsk);
1202
1203 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1204 unix_peer(sk) = newsk;
1da177e4 1205
1c92b4e5 1206 unix_state_unlock(sk);
1da177e4
LT
1207
1208 /* take ten and and send info to listening sock */
1209 spin_lock(&other->sk_receive_queue.lock);
1210 __skb_queue_tail(&other->sk_receive_queue, skb);
1da177e4 1211 spin_unlock(&other->sk_receive_queue.lock);
1c92b4e5 1212 unix_state_unlock(other);
1da177e4
LT
1213 other->sk_data_ready(other, 0);
1214 sock_put(other);
1215 return 0;
1216
1217out_unlock:
1218 if (other)
1c92b4e5 1219 unix_state_unlock(other);
1da177e4
LT
1220
1221out:
40d44446 1222 kfree_skb(skb);
1da177e4
LT
1223 if (newsk)
1224 unix_release_sock(newsk, 0);
1225 if (other)
1226 sock_put(other);
1227 return err;
1228}
1229
1230static int unix_socketpair(struct socket *socka, struct socket *sockb)
1231{
e27dfcea 1232 struct sock *ska = socka->sk, *skb = sockb->sk;
1da177e4
LT
1233
1234 /* Join our sockets back to back */
1235 sock_hold(ska);
1236 sock_hold(skb);
e27dfcea
JK
1237 unix_peer(ska) = skb;
1238 unix_peer(skb) = ska;
109f6e39
EB
1239 init_peercred(ska);
1240 init_peercred(skb);
1da177e4
LT
1241
1242 if (ska->sk_type != SOCK_DGRAM) {
1243 ska->sk_state = TCP_ESTABLISHED;
1244 skb->sk_state = TCP_ESTABLISHED;
1245 socka->state = SS_CONNECTED;
1246 sockb->state = SS_CONNECTED;
1247 }
1248 return 0;
1249}
1250
90c6bd34
DB
1251static void unix_sock_inherit_flags(const struct socket *old,
1252 struct socket *new)
1253{
1254 if (test_bit(SOCK_PASSCRED, &old->flags))
1255 set_bit(SOCK_PASSCRED, &new->flags);
1256 if (test_bit(SOCK_PASSSEC, &old->flags))
1257 set_bit(SOCK_PASSSEC, &new->flags);
1258}
1259
1da177e4
LT
1260static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1261{
1262 struct sock *sk = sock->sk;
1263 struct sock *tsk;
1264 struct sk_buff *skb;
1265 int err;
1266
1267 err = -EOPNOTSUPP;
6eba6a37 1268 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1da177e4
LT
1269 goto out;
1270
1271 err = -EINVAL;
1272 if (sk->sk_state != TCP_LISTEN)
1273 goto out;
1274
1275 /* If socket state is TCP_LISTEN it cannot change (for now...),
1276 * so that no locks are necessary.
1277 */
1278
1279 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1280 if (!skb) {
1281 /* This means receive shutdown. */
1282 if (err == 0)
1283 err = -EINVAL;
1284 goto out;
1285 }
1286
1287 tsk = skb->sk;
1288 skb_free_datagram(sk, skb);
1289 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1290
1291 /* attach accepted sock to socket */
1c92b4e5 1292 unix_state_lock(tsk);
1da177e4 1293 newsock->state = SS_CONNECTED;
90c6bd34 1294 unix_sock_inherit_flags(sock, newsock);
1da177e4 1295 sock_graft(tsk, newsock);
1c92b4e5 1296 unix_state_unlock(tsk);
1da177e4
LT
1297 return 0;
1298
1299out:
1300 return err;
1301}
1302
1303
1304static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1305{
1306 struct sock *sk = sock->sk;
1307 struct unix_sock *u;
13cfa97b 1308 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1da177e4
LT
1309 int err = 0;
1310
1311 if (peer) {
1312 sk = unix_peer_get(sk);
1313
1314 err = -ENOTCONN;
1315 if (!sk)
1316 goto out;
1317 err = 0;
1318 } else {
1319 sock_hold(sk);
1320 }
1321
1322 u = unix_sk(sk);
1c92b4e5 1323 unix_state_lock(sk);
1da177e4
LT
1324 if (!u->addr) {
1325 sunaddr->sun_family = AF_UNIX;
1326 sunaddr->sun_path[0] = 0;
1327 *uaddr_len = sizeof(short);
1328 } else {
1329 struct unix_address *addr = u->addr;
1330
1331 *uaddr_len = addr->len;
1332 memcpy(sunaddr, addr->name, *uaddr_len);
1333 }
1c92b4e5 1334 unix_state_unlock(sk);
1da177e4
LT
1335 sock_put(sk);
1336out:
1337 return err;
1338}
1339
1340static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1341{
1342 int i;
1343
1344 scm->fp = UNIXCB(skb).fp;
1da177e4
LT
1345 UNIXCB(skb).fp = NULL;
1346
6eba6a37 1347 for (i = scm->fp->count-1; i >= 0; i--)
1da177e4
LT
1348 unix_notinflight(scm->fp->fp[i]);
1349}
1350
7361c36c 1351static void unix_destruct_scm(struct sk_buff *skb)
1da177e4
LT
1352{
1353 struct scm_cookie scm;
1354 memset(&scm, 0, sizeof(scm));
7361c36c 1355 scm.pid = UNIXCB(skb).pid;
7361c36c
EB
1356 if (UNIXCB(skb).fp)
1357 unix_detach_fds(&scm, skb);
1da177e4
LT
1358
1359 /* Alas, it calls VFS */
1360 /* So fscking what? fput() had been SMP-safe since the last Summer */
1361 scm_destroy(&scm);
1362 sock_wfree(skb);
1363}
1364
25888e30
ED
1365#define MAX_RECURSION_LEVEL 4
1366
6209344f 1367static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1da177e4
LT
1368{
1369 int i;
25888e30
ED
1370 unsigned char max_level = 0;
1371 int unix_sock_count = 0;
1372
1373 for (i = scm->fp->count - 1; i >= 0; i--) {
1374 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1375
1376 if (sk) {
1377 unix_sock_count++;
1378 max_level = max(max_level,
1379 unix_sk(sk)->recursion_level);
1380 }
1381 }
1382 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1383 return -ETOOMANYREFS;
6209344f
MS
1384
1385 /*
1386 * Need to duplicate file references for the sake of garbage
1387 * collection. Otherwise a socket in the fps might become a
1388 * candidate for GC while the skb is not yet queued.
1389 */
1390 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1391 if (!UNIXCB(skb).fp)
1392 return -ENOMEM;
1393
25888e30
ED
1394 if (unix_sock_count) {
1395 for (i = scm->fp->count - 1; i >= 0; i--)
1396 unix_inflight(scm->fp->fp[i]);
1397 }
1398 return max_level;
1da177e4
LT
1399}
1400
f78a5fda 1401static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
7361c36c
EB
1402{
1403 int err = 0;
16e57262 1404
f78a5fda 1405 UNIXCB(skb).pid = get_pid(scm->pid);
6b0ee8c0
EB
1406 UNIXCB(skb).uid = scm->creds.uid;
1407 UNIXCB(skb).gid = scm->creds.gid;
7361c36c
EB
1408 UNIXCB(skb).fp = NULL;
1409 if (scm->fp && send_fds)
1410 err = unix_attach_fds(scm, skb);
1411
1412 skb->destructor = unix_destruct_scm;
1413 return err;
1414}
1415
16e57262
ED
1416/*
1417 * Some apps rely on write() giving SCM_CREDENTIALS
1418 * We include credentials if source or destination socket
1419 * asserted SOCK_PASSCRED.
1420 */
1421static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1422 const struct sock *other)
1423{
6b0ee8c0 1424 if (UNIXCB(skb).pid)
16e57262
ED
1425 return;
1426 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
25da0e3e
EB
1427 !other->sk_socket ||
1428 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
16e57262 1429 UNIXCB(skb).pid = get_pid(task_tgid(current));
6e0895c2 1430 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
16e57262
ED
1431 }
1432}
1433
1da177e4
LT
1434/*
1435 * Send AF_UNIX data.
1436 */
1437
1438static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1439 struct msghdr *msg, size_t len)
1440{
1441 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1442 struct sock *sk = sock->sk;
3b1e0a65 1443 struct net *net = sock_net(sk);
1da177e4 1444 struct unix_sock *u = unix_sk(sk);
e27dfcea 1445 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1446 struct sock *other = NULL;
1447 int namelen = 0; /* fake GCC */
1448 int err;
95c96174 1449 unsigned int hash;
f78a5fda 1450 struct sk_buff *skb;
1da177e4
LT
1451 long timeo;
1452 struct scm_cookie tmp_scm;
25888e30 1453 int max_level;
eb6a2481 1454 int data_len = 0;
1da177e4
LT
1455
1456 if (NULL == siocb->scm)
1457 siocb->scm = &tmp_scm;
5f23b734 1458 wait_for_unix_gc();
e0e3cea4 1459 err = scm_send(sock, msg, siocb->scm, false);
1da177e4
LT
1460 if (err < 0)
1461 return err;
1462
1463 err = -EOPNOTSUPP;
1464 if (msg->msg_flags&MSG_OOB)
1465 goto out;
1466
1467 if (msg->msg_namelen) {
1468 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1469 if (err < 0)
1470 goto out;
1471 namelen = err;
1472 } else {
1473 sunaddr = NULL;
1474 err = -ENOTCONN;
1475 other = unix_peer_get(sk);
1476 if (!other)
1477 goto out;
1478 }
1479
f64f9e71
JP
1480 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1481 && (err = unix_autobind(sock)) != 0)
1da177e4
LT
1482 goto out;
1483
1484 err = -EMSGSIZE;
1485 if (len > sk->sk_sndbuf - 32)
1486 goto out;
1487
eb6a2481
ED
1488 if (len > SKB_MAX_ALLOC)
1489 data_len = min_t(size_t,
1490 len - SKB_MAX_ALLOC,
1491 MAX_SKB_FRAGS * PAGE_SIZE);
1492
1493 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
28d64271
ED
1494 msg->msg_flags & MSG_DONTWAIT, &err,
1495 PAGE_ALLOC_COSTLY_ORDER);
e27dfcea 1496 if (skb == NULL)
1da177e4
LT
1497 goto out;
1498
f78a5fda 1499 err = unix_scm_to_skb(siocb->scm, skb, true);
25888e30 1500 if (err < 0)
7361c36c 1501 goto out_free;
25888e30 1502 max_level = err + 1;
dc49c1f9 1503 unix_get_secdata(siocb->scm, skb);
877ce7c1 1504
eb6a2481
ED
1505 skb_put(skb, len - data_len);
1506 skb->data_len = data_len;
1507 skb->len = len;
1508 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1da177e4
LT
1509 if (err)
1510 goto out_free;
1511
1512 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1513
1514restart:
1515 if (!other) {
1516 err = -ECONNRESET;
1517 if (sunaddr == NULL)
1518 goto out_free;
1519
097e66c5 1520 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1da177e4 1521 hash, &err);
e27dfcea 1522 if (other == NULL)
1da177e4
LT
1523 goto out_free;
1524 }
1525
d6ae3bae
AC
1526 if (sk_filter(other, skb) < 0) {
1527 /* Toss the packet but do not return any error to the sender */
1528 err = len;
1529 goto out_free;
1530 }
1531
1c92b4e5 1532 unix_state_lock(other);
1da177e4
LT
1533 err = -EPERM;
1534 if (!unix_may_send(sk, other))
1535 goto out_unlock;
1536
1537 if (sock_flag(other, SOCK_DEAD)) {
1538 /*
1539 * Check with 1003.1g - what should
1540 * datagram error
1541 */
1c92b4e5 1542 unix_state_unlock(other);
1da177e4
LT
1543 sock_put(other);
1544
1545 err = 0;
1c92b4e5 1546 unix_state_lock(sk);
1da177e4 1547 if (unix_peer(sk) == other) {
e27dfcea 1548 unix_peer(sk) = NULL;
1c92b4e5 1549 unix_state_unlock(sk);
1da177e4
LT
1550
1551 unix_dgram_disconnected(sk, other);
1552 sock_put(other);
1553 err = -ECONNREFUSED;
1554 } else {
1c92b4e5 1555 unix_state_unlock(sk);
1da177e4
LT
1556 }
1557
1558 other = NULL;
1559 if (err)
1560 goto out_free;
1561 goto restart;
1562 }
1563
1564 err = -EPIPE;
1565 if (other->sk_shutdown & RCV_SHUTDOWN)
1566 goto out_unlock;
1567
1568 if (sk->sk_type != SOCK_SEQPACKET) {
1569 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1570 if (err)
1571 goto out_unlock;
1572 }
1573
3c73419c 1574 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1da177e4
LT
1575 if (!timeo) {
1576 err = -EAGAIN;
1577 goto out_unlock;
1578 }
1579
1580 timeo = unix_wait_for_peer(other, timeo);
1581
1582 err = sock_intr_errno(timeo);
1583 if (signal_pending(current))
1584 goto out_free;
1585
1586 goto restart;
1587 }
1588
3f66116e
AC
1589 if (sock_flag(other, SOCK_RCVTSTAMP))
1590 __net_timestamp(skb);
16e57262 1591 maybe_add_creds(skb, sock, other);
1da177e4 1592 skb_queue_tail(&other->sk_receive_queue, skb);
25888e30
ED
1593 if (max_level > unix_sk(other)->recursion_level)
1594 unix_sk(other)->recursion_level = max_level;
1c92b4e5 1595 unix_state_unlock(other);
1da177e4
LT
1596 other->sk_data_ready(other, len);
1597 sock_put(other);
f78a5fda 1598 scm_destroy(siocb->scm);
1da177e4
LT
1599 return len;
1600
1601out_unlock:
1c92b4e5 1602 unix_state_unlock(other);
1da177e4
LT
1603out_free:
1604 kfree_skb(skb);
1605out:
1606 if (other)
1607 sock_put(other);
f78a5fda 1608 scm_destroy(siocb->scm);
1da177e4
LT
1609 return err;
1610}
1611
e370a723
ED
1612/* We use paged skbs for stream sockets, and limit occupancy to 32768
1613 * bytes, and a minimun of a full page.
1614 */
1615#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
ac7bfa62 1616
1da177e4
LT
1617static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1618 struct msghdr *msg, size_t len)
1619{
1620 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1621 struct sock *sk = sock->sk;
1622 struct sock *other = NULL;
6eba6a37 1623 int err, size;
f78a5fda 1624 struct sk_buff *skb;
e27dfcea 1625 int sent = 0;
1da177e4 1626 struct scm_cookie tmp_scm;
8ba69ba6 1627 bool fds_sent = false;
25888e30 1628 int max_level;
e370a723 1629 int data_len;
1da177e4
LT
1630
1631 if (NULL == siocb->scm)
1632 siocb->scm = &tmp_scm;
5f23b734 1633 wait_for_unix_gc();
e0e3cea4 1634 err = scm_send(sock, msg, siocb->scm, false);
1da177e4
LT
1635 if (err < 0)
1636 return err;
1637
1638 err = -EOPNOTSUPP;
1639 if (msg->msg_flags&MSG_OOB)
1640 goto out_err;
1641
1642 if (msg->msg_namelen) {
1643 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1644 goto out_err;
1645 } else {
1da177e4 1646 err = -ENOTCONN;
830a1e5c 1647 other = unix_peer(sk);
1da177e4
LT
1648 if (!other)
1649 goto out_err;
1650 }
1651
1652 if (sk->sk_shutdown & SEND_SHUTDOWN)
1653 goto pipe_err;
1654
6eba6a37 1655 while (sent < len) {
e370a723 1656 size = len - sent;
1da177e4
LT
1657
1658 /* Keep two messages in the pipe so it schedules better */
e370a723 1659 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1da177e4 1660
e370a723
ED
1661 /* allow fallback to order-0 allocations */
1662 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
ac7bfa62 1663
e370a723 1664 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1da177e4 1665
e370a723 1666 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
28d64271
ED
1667 msg->msg_flags & MSG_DONTWAIT, &err,
1668 get_order(UNIX_SKB_FRAGS_SZ));
e370a723 1669 if (!skb)
1da177e4
LT
1670 goto out_err;
1671
f78a5fda
DM
1672 /* Only send the fds in the first buffer */
1673 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
25888e30 1674 if (err < 0) {
7361c36c 1675 kfree_skb(skb);
f78a5fda 1676 goto out_err;
6209344f 1677 }
25888e30 1678 max_level = err + 1;
7361c36c 1679 fds_sent = true;
1da177e4 1680
e370a723
ED
1681 skb_put(skb, size - data_len);
1682 skb->data_len = data_len;
1683 skb->len = size;
f3dfd208
ED
1684 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
1685 sent, size);
6eba6a37 1686 if (err) {
1da177e4 1687 kfree_skb(skb);
f78a5fda 1688 goto out_err;
1da177e4
LT
1689 }
1690
1c92b4e5 1691 unix_state_lock(other);
1da177e4
LT
1692
1693 if (sock_flag(other, SOCK_DEAD) ||
1694 (other->sk_shutdown & RCV_SHUTDOWN))
1695 goto pipe_err_free;
1696
16e57262 1697 maybe_add_creds(skb, sock, other);
1da177e4 1698 skb_queue_tail(&other->sk_receive_queue, skb);
25888e30
ED
1699 if (max_level > unix_sk(other)->recursion_level)
1700 unix_sk(other)->recursion_level = max_level;
1c92b4e5 1701 unix_state_unlock(other);
1da177e4 1702 other->sk_data_ready(other, size);
e27dfcea 1703 sent += size;
1da177e4 1704 }
1da177e4 1705
f78a5fda 1706 scm_destroy(siocb->scm);
1da177e4
LT
1707 siocb->scm = NULL;
1708
1709 return sent;
1710
1711pipe_err_free:
1c92b4e5 1712 unix_state_unlock(other);
1da177e4
LT
1713 kfree_skb(skb);
1714pipe_err:
6eba6a37
ED
1715 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1716 send_sig(SIGPIPE, current, 0);
1da177e4
LT
1717 err = -EPIPE;
1718out_err:
f78a5fda 1719 scm_destroy(siocb->scm);
1da177e4
LT
1720 siocb->scm = NULL;
1721 return sent ? : err;
1722}
1723
1724static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1725 struct msghdr *msg, size_t len)
1726{
1727 int err;
1728 struct sock *sk = sock->sk;
ac7bfa62 1729
1da177e4
LT
1730 err = sock_error(sk);
1731 if (err)
1732 return err;
1733
1734 if (sk->sk_state != TCP_ESTABLISHED)
1735 return -ENOTCONN;
1736
1737 if (msg->msg_namelen)
1738 msg->msg_namelen = 0;
1739
1740 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1741}
ac7bfa62 1742
a05d2ad1
EB
1743static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1744 struct msghdr *msg, size_t size,
1745 int flags)
1746{
1747 struct sock *sk = sock->sk;
1748
1749 if (sk->sk_state != TCP_ESTABLISHED)
1750 return -ENOTCONN;
1751
1752 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1753}
1754
1da177e4
LT
1755static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1756{
1757 struct unix_sock *u = unix_sk(sk);
1758
1da177e4
LT
1759 if (u->addr) {
1760 msg->msg_namelen = u->addr->len;
1761 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1762 }
1763}
1764
1765static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1766 struct msghdr *msg, size_t size,
1767 int flags)
1768{
1769 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1770 struct scm_cookie tmp_scm;
1771 struct sock *sk = sock->sk;
1772 struct unix_sock *u = unix_sk(sk);
1773 int noblock = flags & MSG_DONTWAIT;
1774 struct sk_buff *skb;
1775 int err;
f55bb7f9 1776 int peeked, skip;
1da177e4
LT
1777
1778 err = -EOPNOTSUPP;
1779 if (flags&MSG_OOB)
1780 goto out;
1781
b3ca9b02
RW
1782 err = mutex_lock_interruptible(&u->readlock);
1783 if (err) {
1784 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1785 goto out;
1786 }
1da177e4 1787
f55bb7f9
PE
1788 skip = sk_peek_offset(sk, flags);
1789
1790 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
0a112258
FZ
1791 if (!skb) {
1792 unix_state_lock(sk);
1793 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1794 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1795 (sk->sk_shutdown & RCV_SHUTDOWN))
1796 err = 0;
1797 unix_state_unlock(sk);
1da177e4 1798 goto out_unlock;
0a112258 1799 }
1da177e4 1800
67426b75
ED
1801 wake_up_interruptible_sync_poll(&u->peer_wait,
1802 POLLOUT | POLLWRNORM | POLLWRBAND);
1da177e4
LT
1803
1804 if (msg->msg_name)
1805 unix_copy_addr(msg, skb->sk);
1806
f55bb7f9
PE
1807 if (size > skb->len - skip)
1808 size = skb->len - skip;
1809 else if (size < skb->len - skip)
1da177e4
LT
1810 msg->msg_flags |= MSG_TRUNC;
1811
f55bb7f9 1812 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1da177e4
LT
1813 if (err)
1814 goto out_free;
1815
3f66116e
AC
1816 if (sock_flag(sk, SOCK_RCVTSTAMP))
1817 __sock_recv_timestamp(msg, sk, skb);
1818
1da177e4
LT
1819 if (!siocb->scm) {
1820 siocb->scm = &tmp_scm;
1821 memset(&tmp_scm, 0, sizeof(tmp_scm));
1822 }
6b0ee8c0 1823 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
877ce7c1 1824 unix_set_secdata(siocb->scm, skb);
1da177e4 1825
6eba6a37 1826 if (!(flags & MSG_PEEK)) {
1da177e4
LT
1827 if (UNIXCB(skb).fp)
1828 unix_detach_fds(siocb->scm, skb);
f55bb7f9
PE
1829
1830 sk_peek_offset_bwd(sk, skb->len);
6eba6a37 1831 } else {
1da177e4
LT
1832 /* It is questionable: on PEEK we could:
1833 - do not return fds - good, but too simple 8)
1834 - return fds, and do not return them on read (old strategy,
1835 apparently wrong)
1836 - clone fds (I chose it for now, it is the most universal
1837 solution)
ac7bfa62
YH
1838
1839 POSIX 1003.1g does not actually define this clearly
1840 at all. POSIX 1003.1g doesn't define a lot of things
1841 clearly however!
1842
1da177e4 1843 */
f55bb7f9
PE
1844
1845 sk_peek_offset_fwd(sk, size);
1846
1da177e4
LT
1847 if (UNIXCB(skb).fp)
1848 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1849 }
9f6f9af7 1850 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1da177e4
LT
1851
1852 scm_recv(sock, msg, siocb->scm, flags);
1853
1854out_free:
6eba6a37 1855 skb_free_datagram(sk, skb);
1da177e4 1856out_unlock:
57b47a53 1857 mutex_unlock(&u->readlock);
1da177e4
LT
1858out:
1859 return err;
1860}
1861
1862/*
79f632c7 1863 * Sleep until more data has arrived. But check for races..
1da177e4 1864 */
79f632c7
BP
1865static long unix_stream_data_wait(struct sock *sk, long timeo,
1866 struct sk_buff *last)
1da177e4
LT
1867{
1868 DEFINE_WAIT(wait);
1869
1c92b4e5 1870 unix_state_lock(sk);
1da177e4
LT
1871
1872 for (;;) {
aa395145 1873 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1da177e4 1874
79f632c7 1875 if (skb_peek_tail(&sk->sk_receive_queue) != last ||
1da177e4
LT
1876 sk->sk_err ||
1877 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1878 signal_pending(current) ||
1879 !timeo)
1880 break;
1881
1882 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1c92b4e5 1883 unix_state_unlock(sk);
2b15af6f 1884 timeo = freezable_schedule_timeout(timeo);
1c92b4e5 1885 unix_state_lock(sk);
1da177e4
LT
1886 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1887 }
1888
aa395145 1889 finish_wait(sk_sleep(sk), &wait);
1c92b4e5 1890 unix_state_unlock(sk);
1da177e4
LT
1891 return timeo;
1892}
1893
e370a723
ED
1894static unsigned int unix_skb_len(const struct sk_buff *skb)
1895{
1896 return skb->len - UNIXCB(skb).consumed;
1897}
1898
1da177e4
LT
1899static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1900 struct msghdr *msg, size_t size,
1901 int flags)
1902{
1903 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1904 struct scm_cookie tmp_scm;
1905 struct sock *sk = sock->sk;
1906 struct unix_sock *u = unix_sk(sk);
e27dfcea 1907 struct sockaddr_un *sunaddr = msg->msg_name;
1da177e4
LT
1908 int copied = 0;
1909 int check_creds = 0;
1910 int target;
1911 int err = 0;
1912 long timeo;
fc0d7536 1913 int skip;
1da177e4
LT
1914
1915 err = -EINVAL;
1916 if (sk->sk_state != TCP_ESTABLISHED)
1917 goto out;
1918
1919 err = -EOPNOTSUPP;
1920 if (flags&MSG_OOB)
1921 goto out;
1922
1923 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1924 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1925
1da177e4
LT
1926 /* Lock the socket to prevent queue disordering
1927 * while sleeps in memcpy_tomsg
1928 */
1929
1930 if (!siocb->scm) {
1931 siocb->scm = &tmp_scm;
1932 memset(&tmp_scm, 0, sizeof(tmp_scm));
1933 }
1934
b3ca9b02
RW
1935 err = mutex_lock_interruptible(&u->readlock);
1936 if (err) {
1937 err = sock_intr_errno(timeo);
1938 goto out;
1939 }
1da177e4 1940
6eba6a37 1941 do {
1da177e4 1942 int chunk;
79f632c7 1943 struct sk_buff *skb, *last;
1da177e4 1944
3c0d2f37 1945 unix_state_lock(sk);
79f632c7 1946 last = skb = skb_peek(&sk->sk_receive_queue);
fc0d7536 1947again:
6eba6a37 1948 if (skb == NULL) {
25888e30 1949 unix_sk(sk)->recursion_level = 0;
1da177e4 1950 if (copied >= target)
3c0d2f37 1951 goto unlock;
1da177e4
LT
1952
1953 /*
1954 * POSIX 1003.1g mandates this order.
1955 */
ac7bfa62 1956
6eba6a37
ED
1957 err = sock_error(sk);
1958 if (err)
3c0d2f37 1959 goto unlock;
1da177e4 1960 if (sk->sk_shutdown & RCV_SHUTDOWN)
3c0d2f37
MS
1961 goto unlock;
1962
1963 unix_state_unlock(sk);
1da177e4
LT
1964 err = -EAGAIN;
1965 if (!timeo)
1966 break;
57b47a53 1967 mutex_unlock(&u->readlock);
1da177e4 1968
79f632c7 1969 timeo = unix_stream_data_wait(sk, timeo, last);
1da177e4 1970
b3ca9b02
RW
1971 if (signal_pending(current)
1972 || mutex_lock_interruptible(&u->readlock)) {
1da177e4
LT
1973 err = sock_intr_errno(timeo);
1974 goto out;
1975 }
b3ca9b02 1976
1da177e4 1977 continue;
3c0d2f37
MS
1978 unlock:
1979 unix_state_unlock(sk);
1980 break;
1da177e4 1981 }
fc0d7536 1982
79f632c7 1983 skip = sk_peek_offset(sk, flags);
e370a723
ED
1984 while (skip >= unix_skb_len(skb)) {
1985 skip -= unix_skb_len(skb);
79f632c7 1986 last = skb;
fc0d7536 1987 skb = skb_peek_next(skb, &sk->sk_receive_queue);
79f632c7
BP
1988 if (!skb)
1989 goto again;
fc0d7536
PE
1990 }
1991
3c0d2f37 1992 unix_state_unlock(sk);
1da177e4
LT
1993
1994 if (check_creds) {
1995 /* Never glue messages from different writers */
7361c36c 1996 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
6b0ee8c0
EB
1997 !uid_eq(UNIXCB(skb).uid, siocb->scm->creds.uid) ||
1998 !gid_eq(UNIXCB(skb).gid, siocb->scm->creds.gid))
1da177e4 1999 break;
0e82e7f6 2000 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
1da177e4 2001 /* Copy credentials */
6b0ee8c0 2002 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1da177e4
LT
2003 check_creds = 1;
2004 }
2005
2006 /* Copy address just once */
6eba6a37 2007 if (sunaddr) {
1da177e4
LT
2008 unix_copy_addr(msg, skb->sk);
2009 sunaddr = NULL;
2010 }
2011
e370a723
ED
2012 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2013 if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
2014 msg->msg_iov, chunk)) {
1da177e4
LT
2015 if (copied == 0)
2016 copied = -EFAULT;
2017 break;
2018 }
2019 copied += chunk;
2020 size -= chunk;
2021
2022 /* Mark read part of skb as used */
6eba6a37 2023 if (!(flags & MSG_PEEK)) {
e370a723 2024 UNIXCB(skb).consumed += chunk;
1da177e4 2025
fc0d7536
PE
2026 sk_peek_offset_bwd(sk, chunk);
2027
1da177e4
LT
2028 if (UNIXCB(skb).fp)
2029 unix_detach_fds(siocb->scm, skb);
2030
e370a723 2031 if (unix_skb_len(skb))
1da177e4 2032 break;
1da177e4 2033
6f01fd6e 2034 skb_unlink(skb, &sk->sk_receive_queue);
70d4bf6d 2035 consume_skb(skb);
1da177e4
LT
2036
2037 if (siocb->scm->fp)
2038 break;
6eba6a37 2039 } else {
1da177e4
LT
2040 /* It is questionable, see note in unix_dgram_recvmsg.
2041 */
2042 if (UNIXCB(skb).fp)
2043 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2044
fc0d7536
PE
2045 sk_peek_offset_fwd(sk, chunk);
2046
1da177e4
LT
2047 break;
2048 }
2049 } while (size);
2050
57b47a53 2051 mutex_unlock(&u->readlock);
1da177e4
LT
2052 scm_recv(sock, msg, siocb->scm, flags);
2053out:
2054 return copied ? : err;
2055}
2056
2057static int unix_shutdown(struct socket *sock, int mode)
2058{
2059 struct sock *sk = sock->sk;
2060 struct sock *other;
2061
fc61b928
XW
2062 if (mode < SHUT_RD || mode > SHUT_RDWR)
2063 return -EINVAL;
2064 /* This maps:
2065 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2066 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2067 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2068 */
2069 ++mode;
7180a031
AC
2070
2071 unix_state_lock(sk);
2072 sk->sk_shutdown |= mode;
2073 other = unix_peer(sk);
2074 if (other)
2075 sock_hold(other);
2076 unix_state_unlock(sk);
2077 sk->sk_state_change(sk);
2078
2079 if (other &&
2080 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2081
2082 int peer_mode = 0;
2083
2084 if (mode&RCV_SHUTDOWN)
2085 peer_mode |= SEND_SHUTDOWN;
2086 if (mode&SEND_SHUTDOWN)
2087 peer_mode |= RCV_SHUTDOWN;
2088 unix_state_lock(other);
2089 other->sk_shutdown |= peer_mode;
2090 unix_state_unlock(other);
2091 other->sk_state_change(other);
2092 if (peer_mode == SHUTDOWN_MASK)
2093 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2094 else if (peer_mode & RCV_SHUTDOWN)
2095 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1da177e4 2096 }
7180a031
AC
2097 if (other)
2098 sock_put(other);
2099
1da177e4
LT
2100 return 0;
2101}
2102
885ee74d
PE
2103long unix_inq_len(struct sock *sk)
2104{
2105 struct sk_buff *skb;
2106 long amount = 0;
2107
2108 if (sk->sk_state == TCP_LISTEN)
2109 return -EINVAL;
2110
2111 spin_lock(&sk->sk_receive_queue.lock);
2112 if (sk->sk_type == SOCK_STREAM ||
2113 sk->sk_type == SOCK_SEQPACKET) {
2114 skb_queue_walk(&sk->sk_receive_queue, skb)
e370a723 2115 amount += unix_skb_len(skb);
885ee74d
PE
2116 } else {
2117 skb = skb_peek(&sk->sk_receive_queue);
2118 if (skb)
2119 amount = skb->len;
2120 }
2121 spin_unlock(&sk->sk_receive_queue.lock);
2122
2123 return amount;
2124}
2125EXPORT_SYMBOL_GPL(unix_inq_len);
2126
2127long unix_outq_len(struct sock *sk)
2128{
2129 return sk_wmem_alloc_get(sk);
2130}
2131EXPORT_SYMBOL_GPL(unix_outq_len);
2132
1da177e4
LT
2133static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2134{
2135 struct sock *sk = sock->sk;
e27dfcea 2136 long amount = 0;
1da177e4
LT
2137 int err;
2138
6eba6a37
ED
2139 switch (cmd) {
2140 case SIOCOUTQ:
885ee74d 2141 amount = unix_outq_len(sk);
6eba6a37
ED
2142 err = put_user(amount, (int __user *)arg);
2143 break;
2144 case SIOCINQ:
885ee74d
PE
2145 amount = unix_inq_len(sk);
2146 if (amount < 0)
2147 err = amount;
2148 else
1da177e4 2149 err = put_user(amount, (int __user *)arg);
885ee74d 2150 break;
6eba6a37
ED
2151 default:
2152 err = -ENOIOCTLCMD;
2153 break;
1da177e4
LT
2154 }
2155 return err;
2156}
2157
6eba6a37 2158static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
1da177e4
LT
2159{
2160 struct sock *sk = sock->sk;
2161 unsigned int mask;
2162
aa395145 2163 sock_poll_wait(file, sk_sleep(sk), wait);
1da177e4
LT
2164 mask = 0;
2165
2166 /* exceptional events? */
2167 if (sk->sk_err)
2168 mask |= POLLERR;
2169 if (sk->sk_shutdown == SHUTDOWN_MASK)
2170 mask |= POLLHUP;
f348d70a 2171 if (sk->sk_shutdown & RCV_SHUTDOWN)
db40980f 2172 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
1da177e4
LT
2173
2174 /* readable? */
db40980f 2175 if (!skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
2176 mask |= POLLIN | POLLRDNORM;
2177
2178 /* Connection-based need to check for termination and startup */
6eba6a37
ED
2179 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2180 sk->sk_state == TCP_CLOSE)
1da177e4
LT
2181 mask |= POLLHUP;
2182
2183 /*
2184 * we set writable also when the other side has shut down the
2185 * connection. This prevents stuck sockets.
2186 */
2187 if (unix_writable(sk))
2188 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2189
2190 return mask;
2191}
2192
ec0d215f
RW
2193static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2194 poll_table *wait)
3c73419c 2195{
ec0d215f
RW
2196 struct sock *sk = sock->sk, *other;
2197 unsigned int mask, writable;
3c73419c 2198
aa395145 2199 sock_poll_wait(file, sk_sleep(sk), wait);
3c73419c
RW
2200 mask = 0;
2201
2202 /* exceptional events? */
2203 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
7d4c04fc 2204 mask |= POLLERR |
8facd5fb 2205 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
7d4c04fc 2206
3c73419c 2207 if (sk->sk_shutdown & RCV_SHUTDOWN)
5456f09a 2208 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
3c73419c
RW
2209 if (sk->sk_shutdown == SHUTDOWN_MASK)
2210 mask |= POLLHUP;
2211
2212 /* readable? */
5456f09a 2213 if (!skb_queue_empty(&sk->sk_receive_queue))
3c73419c
RW
2214 mask |= POLLIN | POLLRDNORM;
2215
2216 /* Connection-based need to check for termination and startup */
2217 if (sk->sk_type == SOCK_SEQPACKET) {
2218 if (sk->sk_state == TCP_CLOSE)
2219 mask |= POLLHUP;
2220 /* connection hasn't started yet? */
2221 if (sk->sk_state == TCP_SYN_SENT)
2222 return mask;
2223 }
2224
973a34aa 2225 /* No write status requested, avoid expensive OUT tests. */
626cf236 2226 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
973a34aa
ED
2227 return mask;
2228
ec0d215f 2229 writable = unix_writable(sk);
5456f09a
ED
2230 other = unix_peer_get(sk);
2231 if (other) {
2232 if (unix_peer(other) != sk) {
2233 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2234 if (unix_recvq_full(other))
2235 writable = 0;
ec0d215f 2236 }
5456f09a 2237 sock_put(other);
ec0d215f
RW
2238 }
2239
2240 if (writable)
3c73419c
RW
2241 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2242 else
2243 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2244
3c73419c
RW
2245 return mask;
2246}
1da177e4
LT
2247
2248#ifdef CONFIG_PROC_FS
a53eb3fe 2249
7123aaa3
ED
2250#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2251
2252#define get_bucket(x) ((x) >> BUCKET_SPACE)
2253#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2254#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
a53eb3fe 2255
7123aaa3 2256static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
1da177e4 2257{
7123aaa3
ED
2258 unsigned long offset = get_offset(*pos);
2259 unsigned long bucket = get_bucket(*pos);
2260 struct sock *sk;
2261 unsigned long count = 0;
1da177e4 2262
7123aaa3
ED
2263 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2264 if (sock_net(sk) != seq_file_net(seq))
097e66c5 2265 continue;
7123aaa3
ED
2266 if (++count == offset)
2267 break;
2268 }
2269
2270 return sk;
2271}
2272
2273static struct sock *unix_next_socket(struct seq_file *seq,
2274 struct sock *sk,
2275 loff_t *pos)
2276{
2277 unsigned long bucket;
2278
2279 while (sk > (struct sock *)SEQ_START_TOKEN) {
2280 sk = sk_next(sk);
2281 if (!sk)
2282 goto next_bucket;
2283 if (sock_net(sk) == seq_file_net(seq))
2284 return sk;
1da177e4 2285 }
7123aaa3
ED
2286
2287 do {
2288 sk = unix_from_bucket(seq, pos);
2289 if (sk)
2290 return sk;
2291
2292next_bucket:
2293 bucket = get_bucket(*pos) + 1;
2294 *pos = set_bucket_offset(bucket, 1);
2295 } while (bucket < ARRAY_SIZE(unix_socket_table));
2296
1da177e4
LT
2297 return NULL;
2298}
2299
1da177e4 2300static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2301 __acquires(unix_table_lock)
1da177e4 2302{
fbe9cc4a 2303 spin_lock(&unix_table_lock);
7123aaa3
ED
2304
2305 if (!*pos)
2306 return SEQ_START_TOKEN;
2307
2308 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2309 return NULL;
2310
2311 return unix_next_socket(seq, NULL, pos);
1da177e4
LT
2312}
2313
2314static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2315{
2316 ++*pos;
7123aaa3 2317 return unix_next_socket(seq, v, pos);
1da177e4
LT
2318}
2319
2320static void unix_seq_stop(struct seq_file *seq, void *v)
9a429c49 2321 __releases(unix_table_lock)
1da177e4 2322{
fbe9cc4a 2323 spin_unlock(&unix_table_lock);
1da177e4
LT
2324}
2325
2326static int unix_seq_show(struct seq_file *seq, void *v)
2327{
ac7bfa62 2328
b9f3124f 2329 if (v == SEQ_START_TOKEN)
1da177e4
LT
2330 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2331 "Inode Path\n");
2332 else {
2333 struct sock *s = v;
2334 struct unix_sock *u = unix_sk(s);
1c92b4e5 2335 unix_state_lock(s);
1da177e4 2336
71338aa7 2337 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
1da177e4
LT
2338 s,
2339 atomic_read(&s->sk_refcnt),
2340 0,
2341 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2342 s->sk_type,
2343 s->sk_socket ?
2344 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2345 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2346 sock_i_ino(s));
2347
2348 if (u->addr) {
2349 int i, len;
2350 seq_putc(seq, ' ');
2351
2352 i = 0;
2353 len = u->addr->len - sizeof(short);
2354 if (!UNIX_ABSTRACT(s))
2355 len--;
2356 else {
2357 seq_putc(seq, '@');
2358 i++;
2359 }
2360 for ( ; i < len; i++)
2361 seq_putc(seq, u->addr->name->sun_path[i]);
2362 }
1c92b4e5 2363 unix_state_unlock(s);
1da177e4
LT
2364 seq_putc(seq, '\n');
2365 }
2366
2367 return 0;
2368}
2369
56b3d975 2370static const struct seq_operations unix_seq_ops = {
1da177e4
LT
2371 .start = unix_seq_start,
2372 .next = unix_seq_next,
2373 .stop = unix_seq_stop,
2374 .show = unix_seq_show,
2375};
2376
1da177e4
LT
2377static int unix_seq_open(struct inode *inode, struct file *file)
2378{
e372c414 2379 return seq_open_net(inode, file, &unix_seq_ops,
8b51b064 2380 sizeof(struct seq_net_private));
1da177e4
LT
2381}
2382
da7071d7 2383static const struct file_operations unix_seq_fops = {
1da177e4
LT
2384 .owner = THIS_MODULE,
2385 .open = unix_seq_open,
2386 .read = seq_read,
2387 .llseek = seq_lseek,
e372c414 2388 .release = seq_release_net,
1da177e4
LT
2389};
2390
2391#endif
2392
ec1b4cf7 2393static const struct net_proto_family unix_family_ops = {
1da177e4
LT
2394 .family = PF_UNIX,
2395 .create = unix_create,
2396 .owner = THIS_MODULE,
2397};
2398
097e66c5 2399
2c8c1e72 2400static int __net_init unix_net_init(struct net *net)
097e66c5
DL
2401{
2402 int error = -ENOMEM;
2403
a0a53c8b 2404 net->unx.sysctl_max_dgram_qlen = 10;
1597fbc0
PE
2405 if (unix_sysctl_register(net))
2406 goto out;
d392e497 2407
097e66c5 2408#ifdef CONFIG_PROC_FS
d4beaa66 2409 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
1597fbc0 2410 unix_sysctl_unregister(net);
097e66c5 2411 goto out;
1597fbc0 2412 }
097e66c5
DL
2413#endif
2414 error = 0;
2415out:
48dcc33e 2416 return error;
097e66c5
DL
2417}
2418
2c8c1e72 2419static void __net_exit unix_net_exit(struct net *net)
097e66c5 2420{
1597fbc0 2421 unix_sysctl_unregister(net);
ece31ffd 2422 remove_proc_entry("unix", net->proc_net);
097e66c5
DL
2423}
2424
2425static struct pernet_operations unix_net_ops = {
2426 .init = unix_net_init,
2427 .exit = unix_net_exit,
2428};
2429
1da177e4
LT
2430static int __init af_unix_init(void)
2431{
2432 int rc = -1;
1da177e4 2433
b4fff5f8 2434 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4
LT
2435
2436 rc = proto_register(&unix_proto, 1);
ac7bfa62 2437 if (rc != 0) {
5cc208be 2438 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
1da177e4
LT
2439 goto out;
2440 }
2441
2442 sock_register(&unix_family_ops);
097e66c5 2443 register_pernet_subsys(&unix_net_ops);
1da177e4
LT
2444out:
2445 return rc;
2446}
2447
2448static void __exit af_unix_exit(void)
2449{
2450 sock_unregister(PF_UNIX);
1da177e4 2451 proto_unregister(&unix_proto);
097e66c5 2452 unregister_pernet_subsys(&unix_net_ops);
1da177e4
LT
2453}
2454
3d366960
DW
2455/* Earlier than device_initcall() so that other drivers invoking
2456 request_module() don't end up in a loop when modprobe tries
2457 to use a UNIX socket. But later than subsys_initcall() because
2458 we depend on stuff initialised there */
2459fs_initcall(af_unix_init);
1da177e4
LT
2460module_exit(af_unix_exit);
2461
2462MODULE_LICENSE("GPL");
2463MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 1.069115 seconds and 5 git commands to generate.