net: Make sure BHs are disabled in sock_prot_inuse_add()
[deliverable/linux.git] / net / unix / af_unix.c
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
117
118 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 static DEFINE_SPINLOCK(unix_table_lock);
120 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
121
122 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
126 #ifdef CONFIG_SECURITY_NETWORK
127 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
128 {
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
130 }
131
132 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133 {
134 scm->secid = *UNIXSID(skb);
135 }
136 #else
137 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
138 { }
139
140 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141 { }
142 #endif /* CONFIG_SECURITY_NETWORK */
143
144 /*
145 * SMP locking strategy:
146 * hash table is protected with spinlock unix_table_lock
147 * each socket state is protected by separate rwlock.
148 */
149
150 static inline unsigned unix_hash_fold(__wsum n)
151 {
152 unsigned hash = (__force unsigned)n;
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156 }
157
158 #define unix_peer(sk) (unix_sk(sk)->peer)
159
160 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161 {
162 return unix_peer(osk) == sk;
163 }
164
165 static inline int unix_may_send(struct sock *sk, struct sock *osk)
166 {
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
168 }
169
170 static inline int unix_recvq_full(struct sock const *sk)
171 {
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173 }
174
175 static struct sock *unix_peer_get(struct sock *s)
176 {
177 struct sock *peer;
178
179 unix_state_lock(s);
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
183 unix_state_unlock(s);
184 return peer;
185 }
186
187 static inline void unix_release_addr(struct unix_address *addr)
188 {
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191 }
192
193 /*
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
198 */
199
200 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
201 {
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207 /*
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
213 */
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
220 return len;
221 }
222
223 static void __unix_remove_socket(struct sock *sk)
224 {
225 sk_del_node_init(sk);
226 }
227
228 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229 {
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
232 }
233
234 static inline void unix_remove_socket(struct sock *sk)
235 {
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
239 }
240
241 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242 {
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
246 }
247
248 static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
251 {
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
258 if (!net_eq(sock_net(s), net))
259 continue;
260
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266 found:
267 return s;
268 }
269
270 static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
272 int len, int type,
273 unsigned hash)
274 {
275 struct sock *s;
276
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
279 if (s)
280 sock_hold(s);
281 spin_unlock(&unix_table_lock);
282 return s;
283 }
284
285 static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
286 {
287 struct sock *s;
288 struct hlist_node *node;
289
290 spin_lock(&unix_table_lock);
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
295 if (!net_eq(sock_net(s), net))
296 continue;
297
298 if (dentry && dentry->d_inode == i) {
299 sock_hold(s);
300 goto found;
301 }
302 }
303 s = NULL;
304 found:
305 spin_unlock(&unix_table_lock);
306 return s;
307 }
308
309 static inline int unix_writable(struct sock *sk)
310 {
311 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
312 }
313
314 static void unix_write_space(struct sock *sk)
315 {
316 read_lock(&sk->sk_callback_lock);
317 if (unix_writable(sk)) {
318 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
319 wake_up_interruptible_sync(sk->sk_sleep);
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
321 }
322 read_unlock(&sk->sk_callback_lock);
323 }
324
325 /* When dgram socket disconnects (or changes its peer), we clear its receive
326 * queue of packets arrived from previous peer. First, it allows to do
327 * flow control based only on wmem_alloc; second, sk connected to peer
328 * may receive messages only from that peer. */
329 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
330 {
331 if (!skb_queue_empty(&sk->sk_receive_queue)) {
332 skb_queue_purge(&sk->sk_receive_queue);
333 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
334
335 /* If one link of bidirectional dgram pipe is disconnected,
336 * we signal error. Messages are lost. Do not make this,
337 * when peer was not connected to us.
338 */
339 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
340 other->sk_err = ECONNRESET;
341 other->sk_error_report(other);
342 }
343 }
344 }
345
346 static void unix_sock_destructor(struct sock *sk)
347 {
348 struct unix_sock *u = unix_sk(sk);
349
350 skb_queue_purge(&sk->sk_receive_queue);
351
352 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
353 WARN_ON(!sk_unhashed(sk));
354 WARN_ON(sk->sk_socket);
355 if (!sock_flag(sk, SOCK_DEAD)) {
356 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
357 return;
358 }
359
360 if (u->addr)
361 unix_release_addr(u->addr);
362
363 atomic_dec(&unix_nr_socks);
364 local_bh_disable();
365 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
366 local_bh_enable();
367 #ifdef UNIX_REFCNT_DEBUG
368 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
369 atomic_read(&unix_nr_socks));
370 #endif
371 }
372
373 static int unix_release_sock(struct sock *sk, int embrion)
374 {
375 struct unix_sock *u = unix_sk(sk);
376 struct dentry *dentry;
377 struct vfsmount *mnt;
378 struct sock *skpair;
379 struct sk_buff *skb;
380 int state;
381
382 unix_remove_socket(sk);
383
384 /* Clear state */
385 unix_state_lock(sk);
386 sock_orphan(sk);
387 sk->sk_shutdown = SHUTDOWN_MASK;
388 dentry = u->dentry;
389 u->dentry = NULL;
390 mnt = u->mnt;
391 u->mnt = NULL;
392 state = sk->sk_state;
393 sk->sk_state = TCP_CLOSE;
394 unix_state_unlock(sk);
395
396 wake_up_interruptible_all(&u->peer_wait);
397
398 skpair = unix_peer(sk);
399
400 if (skpair != NULL) {
401 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
402 unix_state_lock(skpair);
403 /* No more writes */
404 skpair->sk_shutdown = SHUTDOWN_MASK;
405 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
406 skpair->sk_err = ECONNRESET;
407 unix_state_unlock(skpair);
408 skpair->sk_state_change(skpair);
409 read_lock(&skpair->sk_callback_lock);
410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 read_unlock(&skpair->sk_callback_lock);
412 }
413 sock_put(skpair); /* It may now die */
414 unix_peer(sk) = NULL;
415 }
416
417 /* Try to flush out this socket. Throw out buffers at least */
418
419 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
420 if (state == TCP_LISTEN)
421 unix_release_sock(skb->sk, 1);
422 /* passed fds are erased in the kfree_skb hook */
423 kfree_skb(skb);
424 }
425
426 if (dentry) {
427 dput(dentry);
428 mntput(mnt);
429 }
430
431 sock_put(sk);
432
433 /* ---- Socket is dead now and most probably destroyed ---- */
434
435 /*
436 * Fixme: BSD difference: In BSD all sockets connected to use get
437 * ECONNRESET and we die on the spot. In Linux we behave
438 * like files and pipes do and wait for the last
439 * dereference.
440 *
441 * Can't we simply set sock->err?
442 *
443 * What the above comment does talk about? --ANK(980817)
444 */
445
446 if (unix_tot_inflight)
447 unix_gc(); /* Garbage collect fds */
448
449 return 0;
450 }
451
452 static int unix_listen(struct socket *sock, int backlog)
453 {
454 int err;
455 struct sock *sk = sock->sk;
456 struct unix_sock *u = unix_sk(sk);
457
458 err = -EOPNOTSUPP;
459 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
460 goto out; /* Only stream/seqpacket sockets accept */
461 err = -EINVAL;
462 if (!u->addr)
463 goto out; /* No listens on an unbound socket */
464 unix_state_lock(sk);
465 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
466 goto out_unlock;
467 if (backlog > sk->sk_max_ack_backlog)
468 wake_up_interruptible_all(&u->peer_wait);
469 sk->sk_max_ack_backlog = backlog;
470 sk->sk_state = TCP_LISTEN;
471 /* set credentials so connect can copy them */
472 sk->sk_peercred.pid = task_tgid_vnr(current);
473 sk->sk_peercred.uid = current->euid;
474 sk->sk_peercred.gid = current->egid;
475 err = 0;
476
477 out_unlock:
478 unix_state_unlock(sk);
479 out:
480 return err;
481 }
482
483 static int unix_release(struct socket *);
484 static int unix_bind(struct socket *, struct sockaddr *, int);
485 static int unix_stream_connect(struct socket *, struct sockaddr *,
486 int addr_len, int flags);
487 static int unix_socketpair(struct socket *, struct socket *);
488 static int unix_accept(struct socket *, struct socket *, int);
489 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
490 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
491 static unsigned int unix_dgram_poll(struct file *, struct socket *,
492 poll_table *);
493 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
494 static int unix_shutdown(struct socket *, int);
495 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
496 struct msghdr *, size_t);
497 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
498 struct msghdr *, size_t, int);
499 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
500 struct msghdr *, size_t);
501 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
502 struct msghdr *, size_t, int);
503 static int unix_dgram_connect(struct socket *, struct sockaddr *,
504 int, int);
505 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
506 struct msghdr *, size_t);
507
508 static const struct proto_ops unix_stream_ops = {
509 .family = PF_UNIX,
510 .owner = THIS_MODULE,
511 .release = unix_release,
512 .bind = unix_bind,
513 .connect = unix_stream_connect,
514 .socketpair = unix_socketpair,
515 .accept = unix_accept,
516 .getname = unix_getname,
517 .poll = unix_poll,
518 .ioctl = unix_ioctl,
519 .listen = unix_listen,
520 .shutdown = unix_shutdown,
521 .setsockopt = sock_no_setsockopt,
522 .getsockopt = sock_no_getsockopt,
523 .sendmsg = unix_stream_sendmsg,
524 .recvmsg = unix_stream_recvmsg,
525 .mmap = sock_no_mmap,
526 .sendpage = sock_no_sendpage,
527 };
528
529 static const struct proto_ops unix_dgram_ops = {
530 .family = PF_UNIX,
531 .owner = THIS_MODULE,
532 .release = unix_release,
533 .bind = unix_bind,
534 .connect = unix_dgram_connect,
535 .socketpair = unix_socketpair,
536 .accept = sock_no_accept,
537 .getname = unix_getname,
538 .poll = unix_dgram_poll,
539 .ioctl = unix_ioctl,
540 .listen = sock_no_listen,
541 .shutdown = unix_shutdown,
542 .setsockopt = sock_no_setsockopt,
543 .getsockopt = sock_no_getsockopt,
544 .sendmsg = unix_dgram_sendmsg,
545 .recvmsg = unix_dgram_recvmsg,
546 .mmap = sock_no_mmap,
547 .sendpage = sock_no_sendpage,
548 };
549
550 static const struct proto_ops unix_seqpacket_ops = {
551 .family = PF_UNIX,
552 .owner = THIS_MODULE,
553 .release = unix_release,
554 .bind = unix_bind,
555 .connect = unix_stream_connect,
556 .socketpair = unix_socketpair,
557 .accept = unix_accept,
558 .getname = unix_getname,
559 .poll = unix_dgram_poll,
560 .ioctl = unix_ioctl,
561 .listen = unix_listen,
562 .shutdown = unix_shutdown,
563 .setsockopt = sock_no_setsockopt,
564 .getsockopt = sock_no_getsockopt,
565 .sendmsg = unix_seqpacket_sendmsg,
566 .recvmsg = unix_dgram_recvmsg,
567 .mmap = sock_no_mmap,
568 .sendpage = sock_no_sendpage,
569 };
570
571 static struct proto unix_proto = {
572 .name = "UNIX",
573 .owner = THIS_MODULE,
574 .sockets_allocated = &unix_nr_socks,
575 .obj_size = sizeof(struct unix_sock),
576 };
577
578 /*
579 * AF_UNIX sockets do not interact with hardware, hence they
580 * dont trigger interrupts - so it's safe for them to have
581 * bh-unsafe locking for their sk_receive_queue.lock. Split off
582 * this special lock-class by reinitializing the spinlock key:
583 */
584 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
585
586 static struct sock *unix_create1(struct net *net, struct socket *sock)
587 {
588 struct sock *sk = NULL;
589 struct unix_sock *u;
590
591 atomic_inc(&unix_nr_socks);
592 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
593 goto out;
594
595 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
596 if (!sk)
597 goto out;
598
599 sock_init_data(sock, sk);
600 lockdep_set_class(&sk->sk_receive_queue.lock,
601 &af_unix_sk_receive_queue_lock_key);
602
603 sk->sk_write_space = unix_write_space;
604 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
605 sk->sk_destruct = unix_sock_destructor;
606 u = unix_sk(sk);
607 u->dentry = NULL;
608 u->mnt = NULL;
609 spin_lock_init(&u->lock);
610 atomic_long_set(&u->inflight, 0);
611 INIT_LIST_HEAD(&u->link);
612 mutex_init(&u->readlock); /* single task reading lock */
613 init_waitqueue_head(&u->peer_wait);
614 unix_insert_socket(unix_sockets_unbound, sk);
615 out:
616 if (sk == NULL)
617 atomic_dec(&unix_nr_socks);
618 else {
619 local_bh_disable();
620 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
621 local_bh_enable();
622 }
623 return sk;
624 }
625
626 static int unix_create(struct net *net, struct socket *sock, int protocol)
627 {
628 if (protocol && protocol != PF_UNIX)
629 return -EPROTONOSUPPORT;
630
631 sock->state = SS_UNCONNECTED;
632
633 switch (sock->type) {
634 case SOCK_STREAM:
635 sock->ops = &unix_stream_ops;
636 break;
637 /*
638 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
639 * nothing uses it.
640 */
641 case SOCK_RAW:
642 sock->type = SOCK_DGRAM;
643 case SOCK_DGRAM:
644 sock->ops = &unix_dgram_ops;
645 break;
646 case SOCK_SEQPACKET:
647 sock->ops = &unix_seqpacket_ops;
648 break;
649 default:
650 return -ESOCKTNOSUPPORT;
651 }
652
653 return unix_create1(net, sock) ? 0 : -ENOMEM;
654 }
655
656 static int unix_release(struct socket *sock)
657 {
658 struct sock *sk = sock->sk;
659
660 if (!sk)
661 return 0;
662
663 sock->sk = NULL;
664
665 return unix_release_sock(sk, 0);
666 }
667
668 static int unix_autobind(struct socket *sock)
669 {
670 struct sock *sk = sock->sk;
671 struct net *net = sock_net(sk);
672 struct unix_sock *u = unix_sk(sk);
673 static u32 ordernum = 1;
674 struct unix_address *addr;
675 int err;
676
677 mutex_lock(&u->readlock);
678
679 err = 0;
680 if (u->addr)
681 goto out;
682
683 err = -ENOMEM;
684 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
685 if (!addr)
686 goto out;
687
688 addr->name->sun_family = AF_UNIX;
689 atomic_set(&addr->refcnt, 1);
690
691 retry:
692 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
693 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
694
695 spin_lock(&unix_table_lock);
696 ordernum = (ordernum+1)&0xFFFFF;
697
698 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
699 addr->hash)) {
700 spin_unlock(&unix_table_lock);
701 /* Sanity yield. It is unusual case, but yet... */
702 if (!(ordernum&0xFF))
703 yield();
704 goto retry;
705 }
706 addr->hash ^= sk->sk_type;
707
708 __unix_remove_socket(sk);
709 u->addr = addr;
710 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
711 spin_unlock(&unix_table_lock);
712 err = 0;
713
714 out: mutex_unlock(&u->readlock);
715 return err;
716 }
717
718 static struct sock *unix_find_other(struct net *net,
719 struct sockaddr_un *sunname, int len,
720 int type, unsigned hash, int *error)
721 {
722 struct sock *u;
723 struct path path;
724 int err = 0;
725
726 if (sunname->sun_path[0]) {
727 struct inode *inode;
728 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
729 if (err)
730 goto fail;
731 inode = path.dentry->d_inode;
732 err = inode_permission(inode, MAY_WRITE);
733 if (err)
734 goto put_fail;
735
736 err = -ECONNREFUSED;
737 if (!S_ISSOCK(inode->i_mode))
738 goto put_fail;
739 u = unix_find_socket_byinode(net, inode);
740 if (!u)
741 goto put_fail;
742
743 if (u->sk_type == type)
744 touch_atime(path.mnt, path.dentry);
745
746 path_put(&path);
747
748 err = -EPROTOTYPE;
749 if (u->sk_type != type) {
750 sock_put(u);
751 goto fail;
752 }
753 } else {
754 err = -ECONNREFUSED;
755 u = unix_find_socket_byname(net, sunname, len, type, hash);
756 if (u) {
757 struct dentry *dentry;
758 dentry = unix_sk(u)->dentry;
759 if (dentry)
760 touch_atime(unix_sk(u)->mnt, dentry);
761 } else
762 goto fail;
763 }
764 return u;
765
766 put_fail:
767 path_put(&path);
768 fail:
769 *error = err;
770 return NULL;
771 }
772
773
774 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
775 {
776 struct sock *sk = sock->sk;
777 struct net *net = sock_net(sk);
778 struct unix_sock *u = unix_sk(sk);
779 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
780 struct dentry *dentry = NULL;
781 struct nameidata nd;
782 int err;
783 unsigned hash;
784 struct unix_address *addr;
785 struct hlist_head *list;
786
787 err = -EINVAL;
788 if (sunaddr->sun_family != AF_UNIX)
789 goto out;
790
791 if (addr_len == sizeof(short)) {
792 err = unix_autobind(sock);
793 goto out;
794 }
795
796 err = unix_mkname(sunaddr, addr_len, &hash);
797 if (err < 0)
798 goto out;
799 addr_len = err;
800
801 mutex_lock(&u->readlock);
802
803 err = -EINVAL;
804 if (u->addr)
805 goto out_up;
806
807 err = -ENOMEM;
808 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
809 if (!addr)
810 goto out_up;
811
812 memcpy(addr->name, sunaddr, addr_len);
813 addr->len = addr_len;
814 addr->hash = hash ^ sk->sk_type;
815 atomic_set(&addr->refcnt, 1);
816
817 if (sunaddr->sun_path[0]) {
818 unsigned int mode;
819 err = 0;
820 /*
821 * Get the parent directory, calculate the hash for last
822 * component.
823 */
824 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
825 if (err)
826 goto out_mknod_parent;
827
828 dentry = lookup_create(&nd, 0);
829 err = PTR_ERR(dentry);
830 if (IS_ERR(dentry))
831 goto out_mknod_unlock;
832
833 /*
834 * All right, let's create it.
835 */
836 mode = S_IFSOCK |
837 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
838 err = mnt_want_write(nd.path.mnt);
839 if (err)
840 goto out_mknod_dput;
841 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
842 mnt_drop_write(nd.path.mnt);
843 if (err)
844 goto out_mknod_dput;
845 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
846 dput(nd.path.dentry);
847 nd.path.dentry = dentry;
848
849 addr->hash = UNIX_HASH_SIZE;
850 }
851
852 spin_lock(&unix_table_lock);
853
854 if (!sunaddr->sun_path[0]) {
855 err = -EADDRINUSE;
856 if (__unix_find_socket_byname(net, sunaddr, addr_len,
857 sk->sk_type, hash)) {
858 unix_release_addr(addr);
859 goto out_unlock;
860 }
861
862 list = &unix_socket_table[addr->hash];
863 } else {
864 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
865 u->dentry = nd.path.dentry;
866 u->mnt = nd.path.mnt;
867 }
868
869 err = 0;
870 __unix_remove_socket(sk);
871 u->addr = addr;
872 __unix_insert_socket(list, sk);
873
874 out_unlock:
875 spin_unlock(&unix_table_lock);
876 out_up:
877 mutex_unlock(&u->readlock);
878 out:
879 return err;
880
881 out_mknod_dput:
882 dput(dentry);
883 out_mknod_unlock:
884 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
885 path_put(&nd.path);
886 out_mknod_parent:
887 if (err == -EEXIST)
888 err = -EADDRINUSE;
889 unix_release_addr(addr);
890 goto out_up;
891 }
892
893 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
894 {
895 if (unlikely(sk1 == sk2) || !sk2) {
896 unix_state_lock(sk1);
897 return;
898 }
899 if (sk1 < sk2) {
900 unix_state_lock(sk1);
901 unix_state_lock_nested(sk2);
902 } else {
903 unix_state_lock(sk2);
904 unix_state_lock_nested(sk1);
905 }
906 }
907
908 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
909 {
910 if (unlikely(sk1 == sk2) || !sk2) {
911 unix_state_unlock(sk1);
912 return;
913 }
914 unix_state_unlock(sk1);
915 unix_state_unlock(sk2);
916 }
917
918 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
919 int alen, int flags)
920 {
921 struct sock *sk = sock->sk;
922 struct net *net = sock_net(sk);
923 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
924 struct sock *other;
925 unsigned hash;
926 int err;
927
928 if (addr->sa_family != AF_UNSPEC) {
929 err = unix_mkname(sunaddr, alen, &hash);
930 if (err < 0)
931 goto out;
932 alen = err;
933
934 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
935 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
936 goto out;
937
938 restart:
939 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
940 if (!other)
941 goto out;
942
943 unix_state_double_lock(sk, other);
944
945 /* Apparently VFS overslept socket death. Retry. */
946 if (sock_flag(other, SOCK_DEAD)) {
947 unix_state_double_unlock(sk, other);
948 sock_put(other);
949 goto restart;
950 }
951
952 err = -EPERM;
953 if (!unix_may_send(sk, other))
954 goto out_unlock;
955
956 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
957 if (err)
958 goto out_unlock;
959
960 } else {
961 /*
962 * 1003.1g breaking connected state with AF_UNSPEC
963 */
964 other = NULL;
965 unix_state_double_lock(sk, other);
966 }
967
968 /*
969 * If it was connected, reconnect.
970 */
971 if (unix_peer(sk)) {
972 struct sock *old_peer = unix_peer(sk);
973 unix_peer(sk) = other;
974 unix_state_double_unlock(sk, other);
975
976 if (other != old_peer)
977 unix_dgram_disconnected(sk, old_peer);
978 sock_put(old_peer);
979 } else {
980 unix_peer(sk) = other;
981 unix_state_double_unlock(sk, other);
982 }
983 return 0;
984
985 out_unlock:
986 unix_state_double_unlock(sk, other);
987 sock_put(other);
988 out:
989 return err;
990 }
991
992 static long unix_wait_for_peer(struct sock *other, long timeo)
993 {
994 struct unix_sock *u = unix_sk(other);
995 int sched;
996 DEFINE_WAIT(wait);
997
998 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
999
1000 sched = !sock_flag(other, SOCK_DEAD) &&
1001 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1002 unix_recvq_full(other);
1003
1004 unix_state_unlock(other);
1005
1006 if (sched)
1007 timeo = schedule_timeout(timeo);
1008
1009 finish_wait(&u->peer_wait, &wait);
1010 return timeo;
1011 }
1012
1013 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1014 int addr_len, int flags)
1015 {
1016 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1017 struct sock *sk = sock->sk;
1018 struct net *net = sock_net(sk);
1019 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1020 struct sock *newsk = NULL;
1021 struct sock *other = NULL;
1022 struct sk_buff *skb = NULL;
1023 unsigned hash;
1024 int st;
1025 int err;
1026 long timeo;
1027
1028 err = unix_mkname(sunaddr, addr_len, &hash);
1029 if (err < 0)
1030 goto out;
1031 addr_len = err;
1032
1033 if (test_bit(SOCK_PASSCRED, &sock->flags)
1034 && !u->addr && (err = unix_autobind(sock)) != 0)
1035 goto out;
1036
1037 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1038
1039 /* First of all allocate resources.
1040 If we will make it after state is locked,
1041 we will have to recheck all again in any case.
1042 */
1043
1044 err = -ENOMEM;
1045
1046 /* create new sock for complete connection */
1047 newsk = unix_create1(sock_net(sk), NULL);
1048 if (newsk == NULL)
1049 goto out;
1050
1051 /* Allocate skb for sending to listening sock */
1052 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1053 if (skb == NULL)
1054 goto out;
1055
1056 restart:
1057 /* Find listening sock. */
1058 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1059 if (!other)
1060 goto out;
1061
1062 /* Latch state of peer */
1063 unix_state_lock(other);
1064
1065 /* Apparently VFS overslept socket death. Retry. */
1066 if (sock_flag(other, SOCK_DEAD)) {
1067 unix_state_unlock(other);
1068 sock_put(other);
1069 goto restart;
1070 }
1071
1072 err = -ECONNREFUSED;
1073 if (other->sk_state != TCP_LISTEN)
1074 goto out_unlock;
1075
1076 if (unix_recvq_full(other)) {
1077 err = -EAGAIN;
1078 if (!timeo)
1079 goto out_unlock;
1080
1081 timeo = unix_wait_for_peer(other, timeo);
1082
1083 err = sock_intr_errno(timeo);
1084 if (signal_pending(current))
1085 goto out;
1086 sock_put(other);
1087 goto restart;
1088 }
1089
1090 /* Latch our state.
1091
1092 It is tricky place. We need to grab write lock and cannot
1093 drop lock on peer. It is dangerous because deadlock is
1094 possible. Connect to self case and simultaneous
1095 attempt to connect are eliminated by checking socket
1096 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1097 check this before attempt to grab lock.
1098
1099 Well, and we have to recheck the state after socket locked.
1100 */
1101 st = sk->sk_state;
1102
1103 switch (st) {
1104 case TCP_CLOSE:
1105 /* This is ok... continue with connect */
1106 break;
1107 case TCP_ESTABLISHED:
1108 /* Socket is already connected */
1109 err = -EISCONN;
1110 goto out_unlock;
1111 default:
1112 err = -EINVAL;
1113 goto out_unlock;
1114 }
1115
1116 unix_state_lock_nested(sk);
1117
1118 if (sk->sk_state != st) {
1119 unix_state_unlock(sk);
1120 unix_state_unlock(other);
1121 sock_put(other);
1122 goto restart;
1123 }
1124
1125 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1126 if (err) {
1127 unix_state_unlock(sk);
1128 goto out_unlock;
1129 }
1130
1131 /* The way is open! Fastly set all the necessary fields... */
1132
1133 sock_hold(sk);
1134 unix_peer(newsk) = sk;
1135 newsk->sk_state = TCP_ESTABLISHED;
1136 newsk->sk_type = sk->sk_type;
1137 newsk->sk_peercred.pid = task_tgid_vnr(current);
1138 newsk->sk_peercred.uid = current->euid;
1139 newsk->sk_peercred.gid = current->egid;
1140 newu = unix_sk(newsk);
1141 newsk->sk_sleep = &newu->peer_wait;
1142 otheru = unix_sk(other);
1143
1144 /* copy address information from listening to new sock*/
1145 if (otheru->addr) {
1146 atomic_inc(&otheru->addr->refcnt);
1147 newu->addr = otheru->addr;
1148 }
1149 if (otheru->dentry) {
1150 newu->dentry = dget(otheru->dentry);
1151 newu->mnt = mntget(otheru->mnt);
1152 }
1153
1154 /* Set credentials */
1155 sk->sk_peercred = other->sk_peercred;
1156
1157 sock->state = SS_CONNECTED;
1158 sk->sk_state = TCP_ESTABLISHED;
1159 sock_hold(newsk);
1160
1161 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1162 unix_peer(sk) = newsk;
1163
1164 unix_state_unlock(sk);
1165
1166 /* take ten and and send info to listening sock */
1167 spin_lock(&other->sk_receive_queue.lock);
1168 __skb_queue_tail(&other->sk_receive_queue, skb);
1169 spin_unlock(&other->sk_receive_queue.lock);
1170 unix_state_unlock(other);
1171 other->sk_data_ready(other, 0);
1172 sock_put(other);
1173 return 0;
1174
1175 out_unlock:
1176 if (other)
1177 unix_state_unlock(other);
1178
1179 out:
1180 if (skb)
1181 kfree_skb(skb);
1182 if (newsk)
1183 unix_release_sock(newsk, 0);
1184 if (other)
1185 sock_put(other);
1186 return err;
1187 }
1188
1189 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1190 {
1191 struct sock *ska = socka->sk, *skb = sockb->sk;
1192
1193 /* Join our sockets back to back */
1194 sock_hold(ska);
1195 sock_hold(skb);
1196 unix_peer(ska) = skb;
1197 unix_peer(skb) = ska;
1198 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
1199 ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1200 ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1201
1202 if (ska->sk_type != SOCK_DGRAM) {
1203 ska->sk_state = TCP_ESTABLISHED;
1204 skb->sk_state = TCP_ESTABLISHED;
1205 socka->state = SS_CONNECTED;
1206 sockb->state = SS_CONNECTED;
1207 }
1208 return 0;
1209 }
1210
1211 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1212 {
1213 struct sock *sk = sock->sk;
1214 struct sock *tsk;
1215 struct sk_buff *skb;
1216 int err;
1217
1218 err = -EOPNOTSUPP;
1219 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1220 goto out;
1221
1222 err = -EINVAL;
1223 if (sk->sk_state != TCP_LISTEN)
1224 goto out;
1225
1226 /* If socket state is TCP_LISTEN it cannot change (for now...),
1227 * so that no locks are necessary.
1228 */
1229
1230 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1231 if (!skb) {
1232 /* This means receive shutdown. */
1233 if (err == 0)
1234 err = -EINVAL;
1235 goto out;
1236 }
1237
1238 tsk = skb->sk;
1239 skb_free_datagram(sk, skb);
1240 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1241
1242 /* attach accepted sock to socket */
1243 unix_state_lock(tsk);
1244 newsock->state = SS_CONNECTED;
1245 sock_graft(tsk, newsock);
1246 unix_state_unlock(tsk);
1247 return 0;
1248
1249 out:
1250 return err;
1251 }
1252
1253
1254 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1255 {
1256 struct sock *sk = sock->sk;
1257 struct unix_sock *u;
1258 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1259 int err = 0;
1260
1261 if (peer) {
1262 sk = unix_peer_get(sk);
1263
1264 err = -ENOTCONN;
1265 if (!sk)
1266 goto out;
1267 err = 0;
1268 } else {
1269 sock_hold(sk);
1270 }
1271
1272 u = unix_sk(sk);
1273 unix_state_lock(sk);
1274 if (!u->addr) {
1275 sunaddr->sun_family = AF_UNIX;
1276 sunaddr->sun_path[0] = 0;
1277 *uaddr_len = sizeof(short);
1278 } else {
1279 struct unix_address *addr = u->addr;
1280
1281 *uaddr_len = addr->len;
1282 memcpy(sunaddr, addr->name, *uaddr_len);
1283 }
1284 unix_state_unlock(sk);
1285 sock_put(sk);
1286 out:
1287 return err;
1288 }
1289
1290 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1291 {
1292 int i;
1293
1294 scm->fp = UNIXCB(skb).fp;
1295 skb->destructor = sock_wfree;
1296 UNIXCB(skb).fp = NULL;
1297
1298 for (i = scm->fp->count-1; i >= 0; i--)
1299 unix_notinflight(scm->fp->fp[i]);
1300 }
1301
1302 static void unix_destruct_fds(struct sk_buff *skb)
1303 {
1304 struct scm_cookie scm;
1305 memset(&scm, 0, sizeof(scm));
1306 unix_detach_fds(&scm, skb);
1307
1308 /* Alas, it calls VFS */
1309 /* So fscking what? fput() had been SMP-safe since the last Summer */
1310 scm_destroy(&scm);
1311 sock_wfree(skb);
1312 }
1313
1314 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1315 {
1316 int i;
1317
1318 /*
1319 * Need to duplicate file references for the sake of garbage
1320 * collection. Otherwise a socket in the fps might become a
1321 * candidate for GC while the skb is not yet queued.
1322 */
1323 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1324 if (!UNIXCB(skb).fp)
1325 return -ENOMEM;
1326
1327 for (i = scm->fp->count-1; i >= 0; i--)
1328 unix_inflight(scm->fp->fp[i]);
1329 skb->destructor = unix_destruct_fds;
1330 return 0;
1331 }
1332
1333 /*
1334 * Send AF_UNIX data.
1335 */
1336
1337 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1338 struct msghdr *msg, size_t len)
1339 {
1340 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1341 struct sock *sk = sock->sk;
1342 struct net *net = sock_net(sk);
1343 struct unix_sock *u = unix_sk(sk);
1344 struct sockaddr_un *sunaddr = msg->msg_name;
1345 struct sock *other = NULL;
1346 int namelen = 0; /* fake GCC */
1347 int err;
1348 unsigned hash;
1349 struct sk_buff *skb;
1350 long timeo;
1351 struct scm_cookie tmp_scm;
1352
1353 if (NULL == siocb->scm)
1354 siocb->scm = &tmp_scm;
1355 err = scm_send(sock, msg, siocb->scm);
1356 if (err < 0)
1357 return err;
1358
1359 err = -EOPNOTSUPP;
1360 if (msg->msg_flags&MSG_OOB)
1361 goto out;
1362
1363 if (msg->msg_namelen) {
1364 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1365 if (err < 0)
1366 goto out;
1367 namelen = err;
1368 } else {
1369 sunaddr = NULL;
1370 err = -ENOTCONN;
1371 other = unix_peer_get(sk);
1372 if (!other)
1373 goto out;
1374 }
1375
1376 if (test_bit(SOCK_PASSCRED, &sock->flags)
1377 && !u->addr && (err = unix_autobind(sock)) != 0)
1378 goto out;
1379
1380 err = -EMSGSIZE;
1381 if (len > sk->sk_sndbuf - 32)
1382 goto out;
1383
1384 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1385 if (skb == NULL)
1386 goto out;
1387
1388 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1389 if (siocb->scm->fp) {
1390 err = unix_attach_fds(siocb->scm, skb);
1391 if (err)
1392 goto out_free;
1393 }
1394 unix_get_secdata(siocb->scm, skb);
1395
1396 skb_reset_transport_header(skb);
1397 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1398 if (err)
1399 goto out_free;
1400
1401 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1402
1403 restart:
1404 if (!other) {
1405 err = -ECONNRESET;
1406 if (sunaddr == NULL)
1407 goto out_free;
1408
1409 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1410 hash, &err);
1411 if (other == NULL)
1412 goto out_free;
1413 }
1414
1415 unix_state_lock(other);
1416 err = -EPERM;
1417 if (!unix_may_send(sk, other))
1418 goto out_unlock;
1419
1420 if (sock_flag(other, SOCK_DEAD)) {
1421 /*
1422 * Check with 1003.1g - what should
1423 * datagram error
1424 */
1425 unix_state_unlock(other);
1426 sock_put(other);
1427
1428 err = 0;
1429 unix_state_lock(sk);
1430 if (unix_peer(sk) == other) {
1431 unix_peer(sk) = NULL;
1432 unix_state_unlock(sk);
1433
1434 unix_dgram_disconnected(sk, other);
1435 sock_put(other);
1436 err = -ECONNREFUSED;
1437 } else {
1438 unix_state_unlock(sk);
1439 }
1440
1441 other = NULL;
1442 if (err)
1443 goto out_free;
1444 goto restart;
1445 }
1446
1447 err = -EPIPE;
1448 if (other->sk_shutdown & RCV_SHUTDOWN)
1449 goto out_unlock;
1450
1451 if (sk->sk_type != SOCK_SEQPACKET) {
1452 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1453 if (err)
1454 goto out_unlock;
1455 }
1456
1457 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1458 if (!timeo) {
1459 err = -EAGAIN;
1460 goto out_unlock;
1461 }
1462
1463 timeo = unix_wait_for_peer(other, timeo);
1464
1465 err = sock_intr_errno(timeo);
1466 if (signal_pending(current))
1467 goto out_free;
1468
1469 goto restart;
1470 }
1471
1472 skb_queue_tail(&other->sk_receive_queue, skb);
1473 unix_state_unlock(other);
1474 other->sk_data_ready(other, len);
1475 sock_put(other);
1476 scm_destroy(siocb->scm);
1477 return len;
1478
1479 out_unlock:
1480 unix_state_unlock(other);
1481 out_free:
1482 kfree_skb(skb);
1483 out:
1484 if (other)
1485 sock_put(other);
1486 scm_destroy(siocb->scm);
1487 return err;
1488 }
1489
1490
1491 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1492 struct msghdr *msg, size_t len)
1493 {
1494 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1495 struct sock *sk = sock->sk;
1496 struct sock *other = NULL;
1497 struct sockaddr_un *sunaddr = msg->msg_name;
1498 int err, size;
1499 struct sk_buff *skb;
1500 int sent = 0;
1501 struct scm_cookie tmp_scm;
1502
1503 if (NULL == siocb->scm)
1504 siocb->scm = &tmp_scm;
1505 err = scm_send(sock, msg, siocb->scm);
1506 if (err < 0)
1507 return err;
1508
1509 err = -EOPNOTSUPP;
1510 if (msg->msg_flags&MSG_OOB)
1511 goto out_err;
1512
1513 if (msg->msg_namelen) {
1514 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1515 goto out_err;
1516 } else {
1517 sunaddr = NULL;
1518 err = -ENOTCONN;
1519 other = unix_peer(sk);
1520 if (!other)
1521 goto out_err;
1522 }
1523
1524 if (sk->sk_shutdown & SEND_SHUTDOWN)
1525 goto pipe_err;
1526
1527 while (sent < len) {
1528 /*
1529 * Optimisation for the fact that under 0.01% of X
1530 * messages typically need breaking up.
1531 */
1532
1533 size = len-sent;
1534
1535 /* Keep two messages in the pipe so it schedules better */
1536 if (size > ((sk->sk_sndbuf >> 1) - 64))
1537 size = (sk->sk_sndbuf >> 1) - 64;
1538
1539 if (size > SKB_MAX_ALLOC)
1540 size = SKB_MAX_ALLOC;
1541
1542 /*
1543 * Grab a buffer
1544 */
1545
1546 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1547 &err);
1548
1549 if (skb == NULL)
1550 goto out_err;
1551
1552 /*
1553 * If you pass two values to the sock_alloc_send_skb
1554 * it tries to grab the large buffer with GFP_NOFS
1555 * (which can fail easily), and if it fails grab the
1556 * fallback size buffer which is under a page and will
1557 * succeed. [Alan]
1558 */
1559 size = min_t(int, size, skb_tailroom(skb));
1560
1561 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1562 if (siocb->scm->fp) {
1563 err = unix_attach_fds(siocb->scm, skb);
1564 if (err) {
1565 kfree_skb(skb);
1566 goto out_err;
1567 }
1568 }
1569
1570 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1571 if (err) {
1572 kfree_skb(skb);
1573 goto out_err;
1574 }
1575
1576 unix_state_lock(other);
1577
1578 if (sock_flag(other, SOCK_DEAD) ||
1579 (other->sk_shutdown & RCV_SHUTDOWN))
1580 goto pipe_err_free;
1581
1582 skb_queue_tail(&other->sk_receive_queue, skb);
1583 unix_state_unlock(other);
1584 other->sk_data_ready(other, size);
1585 sent += size;
1586 }
1587
1588 scm_destroy(siocb->scm);
1589 siocb->scm = NULL;
1590
1591 return sent;
1592
1593 pipe_err_free:
1594 unix_state_unlock(other);
1595 kfree_skb(skb);
1596 pipe_err:
1597 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1598 send_sig(SIGPIPE, current, 0);
1599 err = -EPIPE;
1600 out_err:
1601 scm_destroy(siocb->scm);
1602 siocb->scm = NULL;
1603 return sent ? : err;
1604 }
1605
1606 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1607 struct msghdr *msg, size_t len)
1608 {
1609 int err;
1610 struct sock *sk = sock->sk;
1611
1612 err = sock_error(sk);
1613 if (err)
1614 return err;
1615
1616 if (sk->sk_state != TCP_ESTABLISHED)
1617 return -ENOTCONN;
1618
1619 if (msg->msg_namelen)
1620 msg->msg_namelen = 0;
1621
1622 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1623 }
1624
1625 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1626 {
1627 struct unix_sock *u = unix_sk(sk);
1628
1629 msg->msg_namelen = 0;
1630 if (u->addr) {
1631 msg->msg_namelen = u->addr->len;
1632 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1633 }
1634 }
1635
1636 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1637 struct msghdr *msg, size_t size,
1638 int flags)
1639 {
1640 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1641 struct scm_cookie tmp_scm;
1642 struct sock *sk = sock->sk;
1643 struct unix_sock *u = unix_sk(sk);
1644 int noblock = flags & MSG_DONTWAIT;
1645 struct sk_buff *skb;
1646 int err;
1647
1648 err = -EOPNOTSUPP;
1649 if (flags&MSG_OOB)
1650 goto out;
1651
1652 msg->msg_namelen = 0;
1653
1654 mutex_lock(&u->readlock);
1655
1656 skb = skb_recv_datagram(sk, flags, noblock, &err);
1657 if (!skb) {
1658 unix_state_lock(sk);
1659 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1660 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1661 (sk->sk_shutdown & RCV_SHUTDOWN))
1662 err = 0;
1663 unix_state_unlock(sk);
1664 goto out_unlock;
1665 }
1666
1667 wake_up_interruptible_sync(&u->peer_wait);
1668
1669 if (msg->msg_name)
1670 unix_copy_addr(msg, skb->sk);
1671
1672 if (size > skb->len)
1673 size = skb->len;
1674 else if (size < skb->len)
1675 msg->msg_flags |= MSG_TRUNC;
1676
1677 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1678 if (err)
1679 goto out_free;
1680
1681 if (!siocb->scm) {
1682 siocb->scm = &tmp_scm;
1683 memset(&tmp_scm, 0, sizeof(tmp_scm));
1684 }
1685 siocb->scm->creds = *UNIXCREDS(skb);
1686 unix_set_secdata(siocb->scm, skb);
1687
1688 if (!(flags & MSG_PEEK)) {
1689 if (UNIXCB(skb).fp)
1690 unix_detach_fds(siocb->scm, skb);
1691 } else {
1692 /* It is questionable: on PEEK we could:
1693 - do not return fds - good, but too simple 8)
1694 - return fds, and do not return them on read (old strategy,
1695 apparently wrong)
1696 - clone fds (I chose it for now, it is the most universal
1697 solution)
1698
1699 POSIX 1003.1g does not actually define this clearly
1700 at all. POSIX 1003.1g doesn't define a lot of things
1701 clearly however!
1702
1703 */
1704 if (UNIXCB(skb).fp)
1705 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1706 }
1707 err = size;
1708
1709 scm_recv(sock, msg, siocb->scm, flags);
1710
1711 out_free:
1712 skb_free_datagram(sk, skb);
1713 out_unlock:
1714 mutex_unlock(&u->readlock);
1715 out:
1716 return err;
1717 }
1718
1719 /*
1720 * Sleep until data has arrive. But check for races..
1721 */
1722
1723 static long unix_stream_data_wait(struct sock *sk, long timeo)
1724 {
1725 DEFINE_WAIT(wait);
1726
1727 unix_state_lock(sk);
1728
1729 for (;;) {
1730 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1731
1732 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1733 sk->sk_err ||
1734 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1735 signal_pending(current) ||
1736 !timeo)
1737 break;
1738
1739 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1740 unix_state_unlock(sk);
1741 timeo = schedule_timeout(timeo);
1742 unix_state_lock(sk);
1743 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1744 }
1745
1746 finish_wait(sk->sk_sleep, &wait);
1747 unix_state_unlock(sk);
1748 return timeo;
1749 }
1750
1751
1752
1753 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1754 struct msghdr *msg, size_t size,
1755 int flags)
1756 {
1757 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1758 struct scm_cookie tmp_scm;
1759 struct sock *sk = sock->sk;
1760 struct unix_sock *u = unix_sk(sk);
1761 struct sockaddr_un *sunaddr = msg->msg_name;
1762 int copied = 0;
1763 int check_creds = 0;
1764 int target;
1765 int err = 0;
1766 long timeo;
1767
1768 err = -EINVAL;
1769 if (sk->sk_state != TCP_ESTABLISHED)
1770 goto out;
1771
1772 err = -EOPNOTSUPP;
1773 if (flags&MSG_OOB)
1774 goto out;
1775
1776 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1777 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1778
1779 msg->msg_namelen = 0;
1780
1781 /* Lock the socket to prevent queue disordering
1782 * while sleeps in memcpy_tomsg
1783 */
1784
1785 if (!siocb->scm) {
1786 siocb->scm = &tmp_scm;
1787 memset(&tmp_scm, 0, sizeof(tmp_scm));
1788 }
1789
1790 mutex_lock(&u->readlock);
1791
1792 do {
1793 int chunk;
1794 struct sk_buff *skb;
1795
1796 unix_state_lock(sk);
1797 skb = skb_dequeue(&sk->sk_receive_queue);
1798 if (skb == NULL) {
1799 if (copied >= target)
1800 goto unlock;
1801
1802 /*
1803 * POSIX 1003.1g mandates this order.
1804 */
1805
1806 err = sock_error(sk);
1807 if (err)
1808 goto unlock;
1809 if (sk->sk_shutdown & RCV_SHUTDOWN)
1810 goto unlock;
1811
1812 unix_state_unlock(sk);
1813 err = -EAGAIN;
1814 if (!timeo)
1815 break;
1816 mutex_unlock(&u->readlock);
1817
1818 timeo = unix_stream_data_wait(sk, timeo);
1819
1820 if (signal_pending(current)) {
1821 err = sock_intr_errno(timeo);
1822 goto out;
1823 }
1824 mutex_lock(&u->readlock);
1825 continue;
1826 unlock:
1827 unix_state_unlock(sk);
1828 break;
1829 }
1830 unix_state_unlock(sk);
1831
1832 if (check_creds) {
1833 /* Never glue messages from different writers */
1834 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds,
1835 sizeof(siocb->scm->creds)) != 0) {
1836 skb_queue_head(&sk->sk_receive_queue, skb);
1837 break;
1838 }
1839 } else {
1840 /* Copy credentials */
1841 siocb->scm->creds = *UNIXCREDS(skb);
1842 check_creds = 1;
1843 }
1844
1845 /* Copy address just once */
1846 if (sunaddr) {
1847 unix_copy_addr(msg, skb->sk);
1848 sunaddr = NULL;
1849 }
1850
1851 chunk = min_t(unsigned int, skb->len, size);
1852 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1853 skb_queue_head(&sk->sk_receive_queue, skb);
1854 if (copied == 0)
1855 copied = -EFAULT;
1856 break;
1857 }
1858 copied += chunk;
1859 size -= chunk;
1860
1861 /* Mark read part of skb as used */
1862 if (!(flags & MSG_PEEK)) {
1863 skb_pull(skb, chunk);
1864
1865 if (UNIXCB(skb).fp)
1866 unix_detach_fds(siocb->scm, skb);
1867
1868 /* put the skb back if we didn't use it up.. */
1869 if (skb->len) {
1870 skb_queue_head(&sk->sk_receive_queue, skb);
1871 break;
1872 }
1873
1874 kfree_skb(skb);
1875
1876 if (siocb->scm->fp)
1877 break;
1878 } else {
1879 /* It is questionable, see note in unix_dgram_recvmsg.
1880 */
1881 if (UNIXCB(skb).fp)
1882 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1883
1884 /* put message back and return */
1885 skb_queue_head(&sk->sk_receive_queue, skb);
1886 break;
1887 }
1888 } while (size);
1889
1890 mutex_unlock(&u->readlock);
1891 scm_recv(sock, msg, siocb->scm, flags);
1892 out:
1893 return copied ? : err;
1894 }
1895
1896 static int unix_shutdown(struct socket *sock, int mode)
1897 {
1898 struct sock *sk = sock->sk;
1899 struct sock *other;
1900
1901 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1902
1903 if (mode) {
1904 unix_state_lock(sk);
1905 sk->sk_shutdown |= mode;
1906 other = unix_peer(sk);
1907 if (other)
1908 sock_hold(other);
1909 unix_state_unlock(sk);
1910 sk->sk_state_change(sk);
1911
1912 if (other &&
1913 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1914
1915 int peer_mode = 0;
1916
1917 if (mode&RCV_SHUTDOWN)
1918 peer_mode |= SEND_SHUTDOWN;
1919 if (mode&SEND_SHUTDOWN)
1920 peer_mode |= RCV_SHUTDOWN;
1921 unix_state_lock(other);
1922 other->sk_shutdown |= peer_mode;
1923 unix_state_unlock(other);
1924 other->sk_state_change(other);
1925 read_lock(&other->sk_callback_lock);
1926 if (peer_mode == SHUTDOWN_MASK)
1927 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1928 else if (peer_mode & RCV_SHUTDOWN)
1929 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1930 read_unlock(&other->sk_callback_lock);
1931 }
1932 if (other)
1933 sock_put(other);
1934 }
1935 return 0;
1936 }
1937
1938 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1939 {
1940 struct sock *sk = sock->sk;
1941 long amount = 0;
1942 int err;
1943
1944 switch (cmd) {
1945 case SIOCOUTQ:
1946 amount = atomic_read(&sk->sk_wmem_alloc);
1947 err = put_user(amount, (int __user *)arg);
1948 break;
1949 case SIOCINQ:
1950 {
1951 struct sk_buff *skb;
1952
1953 if (sk->sk_state == TCP_LISTEN) {
1954 err = -EINVAL;
1955 break;
1956 }
1957
1958 spin_lock(&sk->sk_receive_queue.lock);
1959 if (sk->sk_type == SOCK_STREAM ||
1960 sk->sk_type == SOCK_SEQPACKET) {
1961 skb_queue_walk(&sk->sk_receive_queue, skb)
1962 amount += skb->len;
1963 } else {
1964 skb = skb_peek(&sk->sk_receive_queue);
1965 if (skb)
1966 amount = skb->len;
1967 }
1968 spin_unlock(&sk->sk_receive_queue.lock);
1969 err = put_user(amount, (int __user *)arg);
1970 break;
1971 }
1972
1973 default:
1974 err = -ENOIOCTLCMD;
1975 break;
1976 }
1977 return err;
1978 }
1979
1980 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
1981 {
1982 struct sock *sk = sock->sk;
1983 unsigned int mask;
1984
1985 poll_wait(file, sk->sk_sleep, wait);
1986 mask = 0;
1987
1988 /* exceptional events? */
1989 if (sk->sk_err)
1990 mask |= POLLERR;
1991 if (sk->sk_shutdown == SHUTDOWN_MASK)
1992 mask |= POLLHUP;
1993 if (sk->sk_shutdown & RCV_SHUTDOWN)
1994 mask |= POLLRDHUP;
1995
1996 /* readable? */
1997 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1998 (sk->sk_shutdown & RCV_SHUTDOWN))
1999 mask |= POLLIN | POLLRDNORM;
2000
2001 /* Connection-based need to check for termination and startup */
2002 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2003 sk->sk_state == TCP_CLOSE)
2004 mask |= POLLHUP;
2005
2006 /*
2007 * we set writable also when the other side has shut down the
2008 * connection. This prevents stuck sockets.
2009 */
2010 if (unix_writable(sk))
2011 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2012
2013 return mask;
2014 }
2015
2016 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2017 poll_table *wait)
2018 {
2019 struct sock *sk = sock->sk, *other;
2020 unsigned int mask, writable;
2021
2022 poll_wait(file, sk->sk_sleep, wait);
2023 mask = 0;
2024
2025 /* exceptional events? */
2026 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2027 mask |= POLLERR;
2028 if (sk->sk_shutdown & RCV_SHUTDOWN)
2029 mask |= POLLRDHUP;
2030 if (sk->sk_shutdown == SHUTDOWN_MASK)
2031 mask |= POLLHUP;
2032
2033 /* readable? */
2034 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2035 (sk->sk_shutdown & RCV_SHUTDOWN))
2036 mask |= POLLIN | POLLRDNORM;
2037
2038 /* Connection-based need to check for termination and startup */
2039 if (sk->sk_type == SOCK_SEQPACKET) {
2040 if (sk->sk_state == TCP_CLOSE)
2041 mask |= POLLHUP;
2042 /* connection hasn't started yet? */
2043 if (sk->sk_state == TCP_SYN_SENT)
2044 return mask;
2045 }
2046
2047 /* writable? */
2048 writable = unix_writable(sk);
2049 if (writable) {
2050 other = unix_peer_get(sk);
2051 if (other) {
2052 if (unix_peer(other) != sk) {
2053 poll_wait(file, &unix_sk(other)->peer_wait,
2054 wait);
2055 if (unix_recvq_full(other))
2056 writable = 0;
2057 }
2058
2059 sock_put(other);
2060 }
2061 }
2062
2063 if (writable)
2064 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2065 else
2066 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2067
2068 return mask;
2069 }
2070
2071 #ifdef CONFIG_PROC_FS
2072 static struct sock *first_unix_socket(int *i)
2073 {
2074 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2075 if (!hlist_empty(&unix_socket_table[*i]))
2076 return __sk_head(&unix_socket_table[*i]);
2077 }
2078 return NULL;
2079 }
2080
2081 static struct sock *next_unix_socket(int *i, struct sock *s)
2082 {
2083 struct sock *next = sk_next(s);
2084 /* More in this chain? */
2085 if (next)
2086 return next;
2087 /* Look for next non-empty chain. */
2088 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2089 if (!hlist_empty(&unix_socket_table[*i]))
2090 return __sk_head(&unix_socket_table[*i]);
2091 }
2092 return NULL;
2093 }
2094
2095 struct unix_iter_state {
2096 struct seq_net_private p;
2097 int i;
2098 };
2099
2100 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2101 {
2102 struct unix_iter_state *iter = seq->private;
2103 loff_t off = 0;
2104 struct sock *s;
2105
2106 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2107 if (sock_net(s) != seq_file_net(seq))
2108 continue;
2109 if (off == pos)
2110 return s;
2111 ++off;
2112 }
2113 return NULL;
2114 }
2115
2116 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2117 __acquires(unix_table_lock)
2118 {
2119 spin_lock(&unix_table_lock);
2120 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2121 }
2122
2123 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2124 {
2125 struct unix_iter_state *iter = seq->private;
2126 struct sock *sk = v;
2127 ++*pos;
2128
2129 if (v == SEQ_START_TOKEN)
2130 sk = first_unix_socket(&iter->i);
2131 else
2132 sk = next_unix_socket(&iter->i, sk);
2133 while (sk && (sock_net(sk) != seq_file_net(seq)))
2134 sk = next_unix_socket(&iter->i, sk);
2135 return sk;
2136 }
2137
2138 static void unix_seq_stop(struct seq_file *seq, void *v)
2139 __releases(unix_table_lock)
2140 {
2141 spin_unlock(&unix_table_lock);
2142 }
2143
2144 static int unix_seq_show(struct seq_file *seq, void *v)
2145 {
2146
2147 if (v == SEQ_START_TOKEN)
2148 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2149 "Inode Path\n");
2150 else {
2151 struct sock *s = v;
2152 struct unix_sock *u = unix_sk(s);
2153 unix_state_lock(s);
2154
2155 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2156 s,
2157 atomic_read(&s->sk_refcnt),
2158 0,
2159 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2160 s->sk_type,
2161 s->sk_socket ?
2162 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2163 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2164 sock_i_ino(s));
2165
2166 if (u->addr) {
2167 int i, len;
2168 seq_putc(seq, ' ');
2169
2170 i = 0;
2171 len = u->addr->len - sizeof(short);
2172 if (!UNIX_ABSTRACT(s))
2173 len--;
2174 else {
2175 seq_putc(seq, '@');
2176 i++;
2177 }
2178 for ( ; i < len; i++)
2179 seq_putc(seq, u->addr->name->sun_path[i]);
2180 }
2181 unix_state_unlock(s);
2182 seq_putc(seq, '\n');
2183 }
2184
2185 return 0;
2186 }
2187
2188 static const struct seq_operations unix_seq_ops = {
2189 .start = unix_seq_start,
2190 .next = unix_seq_next,
2191 .stop = unix_seq_stop,
2192 .show = unix_seq_show,
2193 };
2194
2195 static int unix_seq_open(struct inode *inode, struct file *file)
2196 {
2197 return seq_open_net(inode, file, &unix_seq_ops,
2198 sizeof(struct unix_iter_state));
2199 }
2200
2201 static const struct file_operations unix_seq_fops = {
2202 .owner = THIS_MODULE,
2203 .open = unix_seq_open,
2204 .read = seq_read,
2205 .llseek = seq_lseek,
2206 .release = seq_release_net,
2207 };
2208
2209 #endif
2210
2211 static struct net_proto_family unix_family_ops = {
2212 .family = PF_UNIX,
2213 .create = unix_create,
2214 .owner = THIS_MODULE,
2215 };
2216
2217
2218 static int unix_net_init(struct net *net)
2219 {
2220 int error = -ENOMEM;
2221
2222 net->unx.sysctl_max_dgram_qlen = 10;
2223 if (unix_sysctl_register(net))
2224 goto out;
2225
2226 #ifdef CONFIG_PROC_FS
2227 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2228 unix_sysctl_unregister(net);
2229 goto out;
2230 }
2231 #endif
2232 error = 0;
2233 out:
2234 return error;
2235 }
2236
2237 static void unix_net_exit(struct net *net)
2238 {
2239 unix_sysctl_unregister(net);
2240 proc_net_remove(net, "unix");
2241 }
2242
2243 static struct pernet_operations unix_net_ops = {
2244 .init = unix_net_init,
2245 .exit = unix_net_exit,
2246 };
2247
2248 static int __init af_unix_init(void)
2249 {
2250 int rc = -1;
2251 struct sk_buff *dummy_skb;
2252
2253 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2254
2255 rc = proto_register(&unix_proto, 1);
2256 if (rc != 0) {
2257 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2258 __func__);
2259 goto out;
2260 }
2261
2262 sock_register(&unix_family_ops);
2263 register_pernet_subsys(&unix_net_ops);
2264 out:
2265 return rc;
2266 }
2267
2268 static void __exit af_unix_exit(void)
2269 {
2270 sock_unregister(PF_UNIX);
2271 proto_unregister(&unix_proto);
2272 unregister_pernet_subsys(&unix_net_ops);
2273 }
2274
2275 /* Earlier than device_initcall() so that other drivers invoking
2276 request_module() don't end up in a loop when modprobe tries
2277 to use a UNIX socket. But later than subsys_initcall() because
2278 we depend on stuff initialised there */
2279 fs_initcall(af_unix_init);
2280 module_exit(af_unix_exit);
2281
2282 MODULE_LICENSE("GPL");
2283 MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 0.077469 seconds and 5 git commands to generate.