unix: Support peeking offset for datagram and seqpacket sockets
[deliverable/linux.git] / net / unix / af_unix.c
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/signal.h>
86 #include <linux/sched.h>
87 #include <linux/errno.h>
88 #include <linux/string.h>
89 #include <linux/stat.h>
90 #include <linux/dcache.h>
91 #include <linux/namei.h>
92 #include <linux/socket.h>
93 #include <linux/un.h>
94 #include <linux/fcntl.h>
95 #include <linux/termios.h>
96 #include <linux/sockios.h>
97 #include <linux/net.h>
98 #include <linux/in.h>
99 #include <linux/fs.h>
100 #include <linux/slab.h>
101 #include <asm/uaccess.h>
102 #include <linux/skbuff.h>
103 #include <linux/netdevice.h>
104 #include <net/net_namespace.h>
105 #include <net/sock.h>
106 #include <net/tcp_states.h>
107 #include <net/af_unix.h>
108 #include <linux/proc_fs.h>
109 #include <linux/seq_file.h>
110 #include <net/scm.h>
111 #include <linux/init.h>
112 #include <linux/poll.h>
113 #include <linux/rtnetlink.h>
114 #include <linux/mount.h>
115 #include <net/checksum.h>
116 #include <linux/security.h>
117
118 struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119 EXPORT_SYMBOL_GPL(unix_socket_table);
120 DEFINE_SPINLOCK(unix_table_lock);
121 EXPORT_SYMBOL_GPL(unix_table_lock);
122 static atomic_long_t unix_nr_socks;
123
124 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
125
126 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
127
128 #ifdef CONFIG_SECURITY_NETWORK
129 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
130 {
131 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
132 }
133
134 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
135 {
136 scm->secid = *UNIXSID(skb);
137 }
138 #else
139 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
140 { }
141
142 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
143 { }
144 #endif /* CONFIG_SECURITY_NETWORK */
145
146 /*
147 * SMP locking strategy:
148 * hash table is protected with spinlock unix_table_lock
149 * each socket state is protected by separate spin lock.
150 */
151
152 static inline unsigned unix_hash_fold(__wsum n)
153 {
154 unsigned hash = (__force unsigned)n;
155 hash ^= hash>>16;
156 hash ^= hash>>8;
157 return hash&(UNIX_HASH_SIZE-1);
158 }
159
160 #define unix_peer(sk) (unix_sk(sk)->peer)
161
162 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
163 {
164 return unix_peer(osk) == sk;
165 }
166
167 static inline int unix_may_send(struct sock *sk, struct sock *osk)
168 {
169 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
170 }
171
172 static inline int unix_recvq_full(struct sock const *sk)
173 {
174 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
175 }
176
177 struct sock *unix_peer_get(struct sock *s)
178 {
179 struct sock *peer;
180
181 unix_state_lock(s);
182 peer = unix_peer(s);
183 if (peer)
184 sock_hold(peer);
185 unix_state_unlock(s);
186 return peer;
187 }
188 EXPORT_SYMBOL_GPL(unix_peer_get);
189
190 static inline void unix_release_addr(struct unix_address *addr)
191 {
192 if (atomic_dec_and_test(&addr->refcnt))
193 kfree(addr);
194 }
195
196 /*
197 * Check unix socket name:
198 * - should be not zero length.
199 * - if started by not zero, should be NULL terminated (FS object)
200 * - if started by zero, it is abstract name.
201 */
202
203 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
204 {
205 if (len <= sizeof(short) || len > sizeof(*sunaddr))
206 return -EINVAL;
207 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
208 return -EINVAL;
209 if (sunaddr->sun_path[0]) {
210 /*
211 * This may look like an off by one error but it is a bit more
212 * subtle. 108 is the longest valid AF_UNIX path for a binding.
213 * sun_path[108] doesn't as such exist. However in kernel space
214 * we are guaranteed that it is a valid memory location in our
215 * kernel address buffer.
216 */
217 ((char *)sunaddr)[len] = 0;
218 len = strlen(sunaddr->sun_path)+1+sizeof(short);
219 return len;
220 }
221
222 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
223 return len;
224 }
225
226 static void __unix_remove_socket(struct sock *sk)
227 {
228 sk_del_node_init(sk);
229 }
230
231 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
232 {
233 WARN_ON(!sk_unhashed(sk));
234 sk_add_node(sk, list);
235 }
236
237 static inline void unix_remove_socket(struct sock *sk)
238 {
239 spin_lock(&unix_table_lock);
240 __unix_remove_socket(sk);
241 spin_unlock(&unix_table_lock);
242 }
243
244 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
245 {
246 spin_lock(&unix_table_lock);
247 __unix_insert_socket(list, sk);
248 spin_unlock(&unix_table_lock);
249 }
250
251 static struct sock *__unix_find_socket_byname(struct net *net,
252 struct sockaddr_un *sunname,
253 int len, int type, unsigned hash)
254 {
255 struct sock *s;
256 struct hlist_node *node;
257
258 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
259 struct unix_sock *u = unix_sk(s);
260
261 if (!net_eq(sock_net(s), net))
262 continue;
263
264 if (u->addr->len == len &&
265 !memcmp(u->addr->name, sunname, len))
266 goto found;
267 }
268 s = NULL;
269 found:
270 return s;
271 }
272
273 static inline struct sock *unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
275 int len, int type,
276 unsigned hash)
277 {
278 struct sock *s;
279
280 spin_lock(&unix_table_lock);
281 s = __unix_find_socket_byname(net, sunname, len, type, hash);
282 if (s)
283 sock_hold(s);
284 spin_unlock(&unix_table_lock);
285 return s;
286 }
287
288 static struct sock *unix_find_socket_byinode(struct inode *i)
289 {
290 struct sock *s;
291 struct hlist_node *node;
292
293 spin_lock(&unix_table_lock);
294 sk_for_each(s, node,
295 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
296 struct dentry *dentry = unix_sk(s)->dentry;
297
298 if (dentry && dentry->d_inode == i) {
299 sock_hold(s);
300 goto found;
301 }
302 }
303 s = NULL;
304 found:
305 spin_unlock(&unix_table_lock);
306 return s;
307 }
308
309 static inline int unix_writable(struct sock *sk)
310 {
311 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
312 }
313
314 static void unix_write_space(struct sock *sk)
315 {
316 struct socket_wq *wq;
317
318 rcu_read_lock();
319 if (unix_writable(sk)) {
320 wq = rcu_dereference(sk->sk_wq);
321 if (wq_has_sleeper(wq))
322 wake_up_interruptible_sync_poll(&wq->wait,
323 POLLOUT | POLLWRNORM | POLLWRBAND);
324 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
325 }
326 rcu_read_unlock();
327 }
328
329 /* When dgram socket disconnects (or changes its peer), we clear its receive
330 * queue of packets arrived from previous peer. First, it allows to do
331 * flow control based only on wmem_alloc; second, sk connected to peer
332 * may receive messages only from that peer. */
333 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
334 {
335 if (!skb_queue_empty(&sk->sk_receive_queue)) {
336 skb_queue_purge(&sk->sk_receive_queue);
337 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
338
339 /* If one link of bidirectional dgram pipe is disconnected,
340 * we signal error. Messages are lost. Do not make this,
341 * when peer was not connected to us.
342 */
343 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
344 other->sk_err = ECONNRESET;
345 other->sk_error_report(other);
346 }
347 }
348 }
349
350 static void unix_sock_destructor(struct sock *sk)
351 {
352 struct unix_sock *u = unix_sk(sk);
353
354 skb_queue_purge(&sk->sk_receive_queue);
355
356 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
357 WARN_ON(!sk_unhashed(sk));
358 WARN_ON(sk->sk_socket);
359 if (!sock_flag(sk, SOCK_DEAD)) {
360 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
361 return;
362 }
363
364 if (u->addr)
365 unix_release_addr(u->addr);
366
367 atomic_long_dec(&unix_nr_socks);
368 local_bh_disable();
369 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
370 local_bh_enable();
371 #ifdef UNIX_REFCNT_DEBUG
372 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
373 atomic_long_read(&unix_nr_socks));
374 #endif
375 }
376
377 static int unix_release_sock(struct sock *sk, int embrion)
378 {
379 struct unix_sock *u = unix_sk(sk);
380 struct dentry *dentry;
381 struct vfsmount *mnt;
382 struct sock *skpair;
383 struct sk_buff *skb;
384 int state;
385
386 unix_remove_socket(sk);
387
388 /* Clear state */
389 unix_state_lock(sk);
390 sock_orphan(sk);
391 sk->sk_shutdown = SHUTDOWN_MASK;
392 dentry = u->dentry;
393 u->dentry = NULL;
394 mnt = u->mnt;
395 u->mnt = NULL;
396 state = sk->sk_state;
397 sk->sk_state = TCP_CLOSE;
398 unix_state_unlock(sk);
399
400 wake_up_interruptible_all(&u->peer_wait);
401
402 skpair = unix_peer(sk);
403
404 if (skpair != NULL) {
405 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
406 unix_state_lock(skpair);
407 /* No more writes */
408 skpair->sk_shutdown = SHUTDOWN_MASK;
409 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
410 skpair->sk_err = ECONNRESET;
411 unix_state_unlock(skpair);
412 skpair->sk_state_change(skpair);
413 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
414 }
415 sock_put(skpair); /* It may now die */
416 unix_peer(sk) = NULL;
417 }
418
419 /* Try to flush out this socket. Throw out buffers at least */
420
421 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
422 if (state == TCP_LISTEN)
423 unix_release_sock(skb->sk, 1);
424 /* passed fds are erased in the kfree_skb hook */
425 kfree_skb(skb);
426 }
427
428 if (dentry) {
429 dput(dentry);
430 mntput(mnt);
431 }
432
433 sock_put(sk);
434
435 /* ---- Socket is dead now and most probably destroyed ---- */
436
437 /*
438 * Fixme: BSD difference: In BSD all sockets connected to use get
439 * ECONNRESET and we die on the spot. In Linux we behave
440 * like files and pipes do and wait for the last
441 * dereference.
442 *
443 * Can't we simply set sock->err?
444 *
445 * What the above comment does talk about? --ANK(980817)
446 */
447
448 if (unix_tot_inflight)
449 unix_gc(); /* Garbage collect fds */
450
451 return 0;
452 }
453
454 static void init_peercred(struct sock *sk)
455 {
456 put_pid(sk->sk_peer_pid);
457 if (sk->sk_peer_cred)
458 put_cred(sk->sk_peer_cred);
459 sk->sk_peer_pid = get_pid(task_tgid(current));
460 sk->sk_peer_cred = get_current_cred();
461 }
462
463 static void copy_peercred(struct sock *sk, struct sock *peersk)
464 {
465 put_pid(sk->sk_peer_pid);
466 if (sk->sk_peer_cred)
467 put_cred(sk->sk_peer_cred);
468 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
469 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
470 }
471
472 static int unix_listen(struct socket *sock, int backlog)
473 {
474 int err;
475 struct sock *sk = sock->sk;
476 struct unix_sock *u = unix_sk(sk);
477 struct pid *old_pid = NULL;
478 const struct cred *old_cred = NULL;
479
480 err = -EOPNOTSUPP;
481 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
482 goto out; /* Only stream/seqpacket sockets accept */
483 err = -EINVAL;
484 if (!u->addr)
485 goto out; /* No listens on an unbound socket */
486 unix_state_lock(sk);
487 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
488 goto out_unlock;
489 if (backlog > sk->sk_max_ack_backlog)
490 wake_up_interruptible_all(&u->peer_wait);
491 sk->sk_max_ack_backlog = backlog;
492 sk->sk_state = TCP_LISTEN;
493 /* set credentials so connect can copy them */
494 init_peercred(sk);
495 err = 0;
496
497 out_unlock:
498 unix_state_unlock(sk);
499 put_pid(old_pid);
500 if (old_cred)
501 put_cred(old_cred);
502 out:
503 return err;
504 }
505
506 static int unix_release(struct socket *);
507 static int unix_bind(struct socket *, struct sockaddr *, int);
508 static int unix_stream_connect(struct socket *, struct sockaddr *,
509 int addr_len, int flags);
510 static int unix_socketpair(struct socket *, struct socket *);
511 static int unix_accept(struct socket *, struct socket *, int);
512 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
513 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
514 static unsigned int unix_dgram_poll(struct file *, struct socket *,
515 poll_table *);
516 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
517 static int unix_shutdown(struct socket *, int);
518 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
519 struct msghdr *, size_t);
520 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
521 struct msghdr *, size_t, int);
522 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
523 struct msghdr *, size_t);
524 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
525 struct msghdr *, size_t, int);
526 static int unix_dgram_connect(struct socket *, struct sockaddr *,
527 int, int);
528 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
529 struct msghdr *, size_t);
530 static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
531 struct msghdr *, size_t, int);
532
533 static void unix_set_peek_off(struct sock *sk, int val)
534 {
535 struct unix_sock *u = unix_sk(sk);
536
537 mutex_lock(&u->readlock);
538 sk->sk_peek_off = val;
539 mutex_unlock(&u->readlock);
540 }
541
542
543 static const struct proto_ops unix_stream_ops = {
544 .family = PF_UNIX,
545 .owner = THIS_MODULE,
546 .release = unix_release,
547 .bind = unix_bind,
548 .connect = unix_stream_connect,
549 .socketpair = unix_socketpair,
550 .accept = unix_accept,
551 .getname = unix_getname,
552 .poll = unix_poll,
553 .ioctl = unix_ioctl,
554 .listen = unix_listen,
555 .shutdown = unix_shutdown,
556 .setsockopt = sock_no_setsockopt,
557 .getsockopt = sock_no_getsockopt,
558 .sendmsg = unix_stream_sendmsg,
559 .recvmsg = unix_stream_recvmsg,
560 .mmap = sock_no_mmap,
561 .sendpage = sock_no_sendpage,
562 };
563
564 static const struct proto_ops unix_dgram_ops = {
565 .family = PF_UNIX,
566 .owner = THIS_MODULE,
567 .release = unix_release,
568 .bind = unix_bind,
569 .connect = unix_dgram_connect,
570 .socketpair = unix_socketpair,
571 .accept = sock_no_accept,
572 .getname = unix_getname,
573 .poll = unix_dgram_poll,
574 .ioctl = unix_ioctl,
575 .listen = sock_no_listen,
576 .shutdown = unix_shutdown,
577 .setsockopt = sock_no_setsockopt,
578 .getsockopt = sock_no_getsockopt,
579 .sendmsg = unix_dgram_sendmsg,
580 .recvmsg = unix_dgram_recvmsg,
581 .mmap = sock_no_mmap,
582 .sendpage = sock_no_sendpage,
583 .set_peek_off = unix_set_peek_off,
584 };
585
586 static const struct proto_ops unix_seqpacket_ops = {
587 .family = PF_UNIX,
588 .owner = THIS_MODULE,
589 .release = unix_release,
590 .bind = unix_bind,
591 .connect = unix_stream_connect,
592 .socketpair = unix_socketpair,
593 .accept = unix_accept,
594 .getname = unix_getname,
595 .poll = unix_dgram_poll,
596 .ioctl = unix_ioctl,
597 .listen = unix_listen,
598 .shutdown = unix_shutdown,
599 .setsockopt = sock_no_setsockopt,
600 .getsockopt = sock_no_getsockopt,
601 .sendmsg = unix_seqpacket_sendmsg,
602 .recvmsg = unix_seqpacket_recvmsg,
603 .mmap = sock_no_mmap,
604 .sendpage = sock_no_sendpage,
605 .set_peek_off = unix_set_peek_off,
606 };
607
608 static struct proto unix_proto = {
609 .name = "UNIX",
610 .owner = THIS_MODULE,
611 .obj_size = sizeof(struct unix_sock),
612 };
613
614 /*
615 * AF_UNIX sockets do not interact with hardware, hence they
616 * dont trigger interrupts - so it's safe for them to have
617 * bh-unsafe locking for their sk_receive_queue.lock. Split off
618 * this special lock-class by reinitializing the spinlock key:
619 */
620 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
621
622 static struct sock *unix_create1(struct net *net, struct socket *sock)
623 {
624 struct sock *sk = NULL;
625 struct unix_sock *u;
626
627 atomic_long_inc(&unix_nr_socks);
628 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
629 goto out;
630
631 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
632 if (!sk)
633 goto out;
634
635 sock_init_data(sock, sk);
636 lockdep_set_class(&sk->sk_receive_queue.lock,
637 &af_unix_sk_receive_queue_lock_key);
638
639 sk->sk_write_space = unix_write_space;
640 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
641 sk->sk_destruct = unix_sock_destructor;
642 u = unix_sk(sk);
643 u->dentry = NULL;
644 u->mnt = NULL;
645 spin_lock_init(&u->lock);
646 atomic_long_set(&u->inflight, 0);
647 INIT_LIST_HEAD(&u->link);
648 mutex_init(&u->readlock); /* single task reading lock */
649 init_waitqueue_head(&u->peer_wait);
650 unix_insert_socket(unix_sockets_unbound, sk);
651 out:
652 if (sk == NULL)
653 atomic_long_dec(&unix_nr_socks);
654 else {
655 local_bh_disable();
656 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
657 local_bh_enable();
658 }
659 return sk;
660 }
661
662 static int unix_create(struct net *net, struct socket *sock, int protocol,
663 int kern)
664 {
665 if (protocol && protocol != PF_UNIX)
666 return -EPROTONOSUPPORT;
667
668 sock->state = SS_UNCONNECTED;
669
670 switch (sock->type) {
671 case SOCK_STREAM:
672 sock->ops = &unix_stream_ops;
673 break;
674 /*
675 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
676 * nothing uses it.
677 */
678 case SOCK_RAW:
679 sock->type = SOCK_DGRAM;
680 case SOCK_DGRAM:
681 sock->ops = &unix_dgram_ops;
682 break;
683 case SOCK_SEQPACKET:
684 sock->ops = &unix_seqpacket_ops;
685 break;
686 default:
687 return -ESOCKTNOSUPPORT;
688 }
689
690 return unix_create1(net, sock) ? 0 : -ENOMEM;
691 }
692
693 static int unix_release(struct socket *sock)
694 {
695 struct sock *sk = sock->sk;
696
697 if (!sk)
698 return 0;
699
700 sock->sk = NULL;
701
702 return unix_release_sock(sk, 0);
703 }
704
705 static int unix_autobind(struct socket *sock)
706 {
707 struct sock *sk = sock->sk;
708 struct net *net = sock_net(sk);
709 struct unix_sock *u = unix_sk(sk);
710 static u32 ordernum = 1;
711 struct unix_address *addr;
712 int err;
713 unsigned int retries = 0;
714
715 mutex_lock(&u->readlock);
716
717 err = 0;
718 if (u->addr)
719 goto out;
720
721 err = -ENOMEM;
722 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
723 if (!addr)
724 goto out;
725
726 addr->name->sun_family = AF_UNIX;
727 atomic_set(&addr->refcnt, 1);
728
729 retry:
730 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
731 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
732
733 spin_lock(&unix_table_lock);
734 ordernum = (ordernum+1)&0xFFFFF;
735
736 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
737 addr->hash)) {
738 spin_unlock(&unix_table_lock);
739 /*
740 * __unix_find_socket_byname() may take long time if many names
741 * are already in use.
742 */
743 cond_resched();
744 /* Give up if all names seems to be in use. */
745 if (retries++ == 0xFFFFF) {
746 err = -ENOSPC;
747 kfree(addr);
748 goto out;
749 }
750 goto retry;
751 }
752 addr->hash ^= sk->sk_type;
753
754 __unix_remove_socket(sk);
755 u->addr = addr;
756 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
757 spin_unlock(&unix_table_lock);
758 err = 0;
759
760 out: mutex_unlock(&u->readlock);
761 return err;
762 }
763
764 static struct sock *unix_find_other(struct net *net,
765 struct sockaddr_un *sunname, int len,
766 int type, unsigned hash, int *error)
767 {
768 struct sock *u;
769 struct path path;
770 int err = 0;
771
772 if (sunname->sun_path[0]) {
773 struct inode *inode;
774 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
775 if (err)
776 goto fail;
777 inode = path.dentry->d_inode;
778 err = inode_permission(inode, MAY_WRITE);
779 if (err)
780 goto put_fail;
781
782 err = -ECONNREFUSED;
783 if (!S_ISSOCK(inode->i_mode))
784 goto put_fail;
785 u = unix_find_socket_byinode(inode);
786 if (!u)
787 goto put_fail;
788
789 if (u->sk_type == type)
790 touch_atime(path.mnt, path.dentry);
791
792 path_put(&path);
793
794 err = -EPROTOTYPE;
795 if (u->sk_type != type) {
796 sock_put(u);
797 goto fail;
798 }
799 } else {
800 err = -ECONNREFUSED;
801 u = unix_find_socket_byname(net, sunname, len, type, hash);
802 if (u) {
803 struct dentry *dentry;
804 dentry = unix_sk(u)->dentry;
805 if (dentry)
806 touch_atime(unix_sk(u)->mnt, dentry);
807 } else
808 goto fail;
809 }
810 return u;
811
812 put_fail:
813 path_put(&path);
814 fail:
815 *error = err;
816 return NULL;
817 }
818
819
820 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
821 {
822 struct sock *sk = sock->sk;
823 struct net *net = sock_net(sk);
824 struct unix_sock *u = unix_sk(sk);
825 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
826 char *sun_path = sunaddr->sun_path;
827 struct dentry *dentry = NULL;
828 struct path path;
829 int err;
830 unsigned hash;
831 struct unix_address *addr;
832 struct hlist_head *list;
833
834 err = -EINVAL;
835 if (sunaddr->sun_family != AF_UNIX)
836 goto out;
837
838 if (addr_len == sizeof(short)) {
839 err = unix_autobind(sock);
840 goto out;
841 }
842
843 err = unix_mkname(sunaddr, addr_len, &hash);
844 if (err < 0)
845 goto out;
846 addr_len = err;
847
848 mutex_lock(&u->readlock);
849
850 err = -EINVAL;
851 if (u->addr)
852 goto out_up;
853
854 err = -ENOMEM;
855 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
856 if (!addr)
857 goto out_up;
858
859 memcpy(addr->name, sunaddr, addr_len);
860 addr->len = addr_len;
861 addr->hash = hash ^ sk->sk_type;
862 atomic_set(&addr->refcnt, 1);
863
864 if (sun_path[0]) {
865 umode_t mode;
866 err = 0;
867 /*
868 * Get the parent directory, calculate the hash for last
869 * component.
870 */
871 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
872 err = PTR_ERR(dentry);
873 if (IS_ERR(dentry))
874 goto out_mknod_parent;
875
876 /*
877 * All right, let's create it.
878 */
879 mode = S_IFSOCK |
880 (SOCK_INODE(sock)->i_mode & ~current_umask());
881 err = mnt_want_write(path.mnt);
882 if (err)
883 goto out_mknod_dput;
884 err = security_path_mknod(&path, dentry, mode, 0);
885 if (err)
886 goto out_mknod_drop_write;
887 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
888 out_mknod_drop_write:
889 mnt_drop_write(path.mnt);
890 if (err)
891 goto out_mknod_dput;
892 mutex_unlock(&path.dentry->d_inode->i_mutex);
893 dput(path.dentry);
894 path.dentry = dentry;
895
896 addr->hash = UNIX_HASH_SIZE;
897 }
898
899 spin_lock(&unix_table_lock);
900
901 if (!sun_path[0]) {
902 err = -EADDRINUSE;
903 if (__unix_find_socket_byname(net, sunaddr, addr_len,
904 sk->sk_type, hash)) {
905 unix_release_addr(addr);
906 goto out_unlock;
907 }
908
909 list = &unix_socket_table[addr->hash];
910 } else {
911 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
912 u->dentry = path.dentry;
913 u->mnt = path.mnt;
914 }
915
916 err = 0;
917 __unix_remove_socket(sk);
918 u->addr = addr;
919 __unix_insert_socket(list, sk);
920
921 out_unlock:
922 spin_unlock(&unix_table_lock);
923 out_up:
924 mutex_unlock(&u->readlock);
925 out:
926 return err;
927
928 out_mknod_dput:
929 dput(dentry);
930 mutex_unlock(&path.dentry->d_inode->i_mutex);
931 path_put(&path);
932 out_mknod_parent:
933 if (err == -EEXIST)
934 err = -EADDRINUSE;
935 unix_release_addr(addr);
936 goto out_up;
937 }
938
939 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
940 {
941 if (unlikely(sk1 == sk2) || !sk2) {
942 unix_state_lock(sk1);
943 return;
944 }
945 if (sk1 < sk2) {
946 unix_state_lock(sk1);
947 unix_state_lock_nested(sk2);
948 } else {
949 unix_state_lock(sk2);
950 unix_state_lock_nested(sk1);
951 }
952 }
953
954 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
955 {
956 if (unlikely(sk1 == sk2) || !sk2) {
957 unix_state_unlock(sk1);
958 return;
959 }
960 unix_state_unlock(sk1);
961 unix_state_unlock(sk2);
962 }
963
964 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
965 int alen, int flags)
966 {
967 struct sock *sk = sock->sk;
968 struct net *net = sock_net(sk);
969 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
970 struct sock *other;
971 unsigned hash;
972 int err;
973
974 if (addr->sa_family != AF_UNSPEC) {
975 err = unix_mkname(sunaddr, alen, &hash);
976 if (err < 0)
977 goto out;
978 alen = err;
979
980 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
981 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
982 goto out;
983
984 restart:
985 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
986 if (!other)
987 goto out;
988
989 unix_state_double_lock(sk, other);
990
991 /* Apparently VFS overslept socket death. Retry. */
992 if (sock_flag(other, SOCK_DEAD)) {
993 unix_state_double_unlock(sk, other);
994 sock_put(other);
995 goto restart;
996 }
997
998 err = -EPERM;
999 if (!unix_may_send(sk, other))
1000 goto out_unlock;
1001
1002 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1003 if (err)
1004 goto out_unlock;
1005
1006 } else {
1007 /*
1008 * 1003.1g breaking connected state with AF_UNSPEC
1009 */
1010 other = NULL;
1011 unix_state_double_lock(sk, other);
1012 }
1013
1014 /*
1015 * If it was connected, reconnect.
1016 */
1017 if (unix_peer(sk)) {
1018 struct sock *old_peer = unix_peer(sk);
1019 unix_peer(sk) = other;
1020 unix_state_double_unlock(sk, other);
1021
1022 if (other != old_peer)
1023 unix_dgram_disconnected(sk, old_peer);
1024 sock_put(old_peer);
1025 } else {
1026 unix_peer(sk) = other;
1027 unix_state_double_unlock(sk, other);
1028 }
1029 return 0;
1030
1031 out_unlock:
1032 unix_state_double_unlock(sk, other);
1033 sock_put(other);
1034 out:
1035 return err;
1036 }
1037
1038 static long unix_wait_for_peer(struct sock *other, long timeo)
1039 {
1040 struct unix_sock *u = unix_sk(other);
1041 int sched;
1042 DEFINE_WAIT(wait);
1043
1044 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1045
1046 sched = !sock_flag(other, SOCK_DEAD) &&
1047 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1048 unix_recvq_full(other);
1049
1050 unix_state_unlock(other);
1051
1052 if (sched)
1053 timeo = schedule_timeout(timeo);
1054
1055 finish_wait(&u->peer_wait, &wait);
1056 return timeo;
1057 }
1058
1059 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1060 int addr_len, int flags)
1061 {
1062 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1063 struct sock *sk = sock->sk;
1064 struct net *net = sock_net(sk);
1065 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1066 struct sock *newsk = NULL;
1067 struct sock *other = NULL;
1068 struct sk_buff *skb = NULL;
1069 unsigned hash;
1070 int st;
1071 int err;
1072 long timeo;
1073
1074 err = unix_mkname(sunaddr, addr_len, &hash);
1075 if (err < 0)
1076 goto out;
1077 addr_len = err;
1078
1079 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1080 (err = unix_autobind(sock)) != 0)
1081 goto out;
1082
1083 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1084
1085 /* First of all allocate resources.
1086 If we will make it after state is locked,
1087 we will have to recheck all again in any case.
1088 */
1089
1090 err = -ENOMEM;
1091
1092 /* create new sock for complete connection */
1093 newsk = unix_create1(sock_net(sk), NULL);
1094 if (newsk == NULL)
1095 goto out;
1096
1097 /* Allocate skb for sending to listening sock */
1098 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1099 if (skb == NULL)
1100 goto out;
1101
1102 restart:
1103 /* Find listening sock. */
1104 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1105 if (!other)
1106 goto out;
1107
1108 /* Latch state of peer */
1109 unix_state_lock(other);
1110
1111 /* Apparently VFS overslept socket death. Retry. */
1112 if (sock_flag(other, SOCK_DEAD)) {
1113 unix_state_unlock(other);
1114 sock_put(other);
1115 goto restart;
1116 }
1117
1118 err = -ECONNREFUSED;
1119 if (other->sk_state != TCP_LISTEN)
1120 goto out_unlock;
1121 if (other->sk_shutdown & RCV_SHUTDOWN)
1122 goto out_unlock;
1123
1124 if (unix_recvq_full(other)) {
1125 err = -EAGAIN;
1126 if (!timeo)
1127 goto out_unlock;
1128
1129 timeo = unix_wait_for_peer(other, timeo);
1130
1131 err = sock_intr_errno(timeo);
1132 if (signal_pending(current))
1133 goto out;
1134 sock_put(other);
1135 goto restart;
1136 }
1137
1138 /* Latch our state.
1139
1140 It is tricky place. We need to grab our state lock and cannot
1141 drop lock on peer. It is dangerous because deadlock is
1142 possible. Connect to self case and simultaneous
1143 attempt to connect are eliminated by checking socket
1144 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1145 check this before attempt to grab lock.
1146
1147 Well, and we have to recheck the state after socket locked.
1148 */
1149 st = sk->sk_state;
1150
1151 switch (st) {
1152 case TCP_CLOSE:
1153 /* This is ok... continue with connect */
1154 break;
1155 case TCP_ESTABLISHED:
1156 /* Socket is already connected */
1157 err = -EISCONN;
1158 goto out_unlock;
1159 default:
1160 err = -EINVAL;
1161 goto out_unlock;
1162 }
1163
1164 unix_state_lock_nested(sk);
1165
1166 if (sk->sk_state != st) {
1167 unix_state_unlock(sk);
1168 unix_state_unlock(other);
1169 sock_put(other);
1170 goto restart;
1171 }
1172
1173 err = security_unix_stream_connect(sk, other, newsk);
1174 if (err) {
1175 unix_state_unlock(sk);
1176 goto out_unlock;
1177 }
1178
1179 /* The way is open! Fastly set all the necessary fields... */
1180
1181 sock_hold(sk);
1182 unix_peer(newsk) = sk;
1183 newsk->sk_state = TCP_ESTABLISHED;
1184 newsk->sk_type = sk->sk_type;
1185 init_peercred(newsk);
1186 newu = unix_sk(newsk);
1187 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1188 otheru = unix_sk(other);
1189
1190 /* copy address information from listening to new sock*/
1191 if (otheru->addr) {
1192 atomic_inc(&otheru->addr->refcnt);
1193 newu->addr = otheru->addr;
1194 }
1195 if (otheru->dentry) {
1196 newu->dentry = dget(otheru->dentry);
1197 newu->mnt = mntget(otheru->mnt);
1198 }
1199
1200 /* Set credentials */
1201 copy_peercred(sk, other);
1202
1203 sock->state = SS_CONNECTED;
1204 sk->sk_state = TCP_ESTABLISHED;
1205 sock_hold(newsk);
1206
1207 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1208 unix_peer(sk) = newsk;
1209
1210 unix_state_unlock(sk);
1211
1212 /* take ten and and send info to listening sock */
1213 spin_lock(&other->sk_receive_queue.lock);
1214 __skb_queue_tail(&other->sk_receive_queue, skb);
1215 spin_unlock(&other->sk_receive_queue.lock);
1216 unix_state_unlock(other);
1217 other->sk_data_ready(other, 0);
1218 sock_put(other);
1219 return 0;
1220
1221 out_unlock:
1222 if (other)
1223 unix_state_unlock(other);
1224
1225 out:
1226 kfree_skb(skb);
1227 if (newsk)
1228 unix_release_sock(newsk, 0);
1229 if (other)
1230 sock_put(other);
1231 return err;
1232 }
1233
1234 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1235 {
1236 struct sock *ska = socka->sk, *skb = sockb->sk;
1237
1238 /* Join our sockets back to back */
1239 sock_hold(ska);
1240 sock_hold(skb);
1241 unix_peer(ska) = skb;
1242 unix_peer(skb) = ska;
1243 init_peercred(ska);
1244 init_peercred(skb);
1245
1246 if (ska->sk_type != SOCK_DGRAM) {
1247 ska->sk_state = TCP_ESTABLISHED;
1248 skb->sk_state = TCP_ESTABLISHED;
1249 socka->state = SS_CONNECTED;
1250 sockb->state = SS_CONNECTED;
1251 }
1252 return 0;
1253 }
1254
1255 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1256 {
1257 struct sock *sk = sock->sk;
1258 struct sock *tsk;
1259 struct sk_buff *skb;
1260 int err;
1261
1262 err = -EOPNOTSUPP;
1263 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1264 goto out;
1265
1266 err = -EINVAL;
1267 if (sk->sk_state != TCP_LISTEN)
1268 goto out;
1269
1270 /* If socket state is TCP_LISTEN it cannot change (for now...),
1271 * so that no locks are necessary.
1272 */
1273
1274 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1275 if (!skb) {
1276 /* This means receive shutdown. */
1277 if (err == 0)
1278 err = -EINVAL;
1279 goto out;
1280 }
1281
1282 tsk = skb->sk;
1283 skb_free_datagram(sk, skb);
1284 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1285
1286 /* attach accepted sock to socket */
1287 unix_state_lock(tsk);
1288 newsock->state = SS_CONNECTED;
1289 sock_graft(tsk, newsock);
1290 unix_state_unlock(tsk);
1291 return 0;
1292
1293 out:
1294 return err;
1295 }
1296
1297
1298 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1299 {
1300 struct sock *sk = sock->sk;
1301 struct unix_sock *u;
1302 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1303 int err = 0;
1304
1305 if (peer) {
1306 sk = unix_peer_get(sk);
1307
1308 err = -ENOTCONN;
1309 if (!sk)
1310 goto out;
1311 err = 0;
1312 } else {
1313 sock_hold(sk);
1314 }
1315
1316 u = unix_sk(sk);
1317 unix_state_lock(sk);
1318 if (!u->addr) {
1319 sunaddr->sun_family = AF_UNIX;
1320 sunaddr->sun_path[0] = 0;
1321 *uaddr_len = sizeof(short);
1322 } else {
1323 struct unix_address *addr = u->addr;
1324
1325 *uaddr_len = addr->len;
1326 memcpy(sunaddr, addr->name, *uaddr_len);
1327 }
1328 unix_state_unlock(sk);
1329 sock_put(sk);
1330 out:
1331 return err;
1332 }
1333
1334 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1335 {
1336 int i;
1337
1338 scm->fp = UNIXCB(skb).fp;
1339 UNIXCB(skb).fp = NULL;
1340
1341 for (i = scm->fp->count-1; i >= 0; i--)
1342 unix_notinflight(scm->fp->fp[i]);
1343 }
1344
1345 static void unix_destruct_scm(struct sk_buff *skb)
1346 {
1347 struct scm_cookie scm;
1348 memset(&scm, 0, sizeof(scm));
1349 scm.pid = UNIXCB(skb).pid;
1350 scm.cred = UNIXCB(skb).cred;
1351 if (UNIXCB(skb).fp)
1352 unix_detach_fds(&scm, skb);
1353
1354 /* Alas, it calls VFS */
1355 /* So fscking what? fput() had been SMP-safe since the last Summer */
1356 scm_destroy(&scm);
1357 sock_wfree(skb);
1358 }
1359
1360 #define MAX_RECURSION_LEVEL 4
1361
1362 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1363 {
1364 int i;
1365 unsigned char max_level = 0;
1366 int unix_sock_count = 0;
1367
1368 for (i = scm->fp->count - 1; i >= 0; i--) {
1369 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1370
1371 if (sk) {
1372 unix_sock_count++;
1373 max_level = max(max_level,
1374 unix_sk(sk)->recursion_level);
1375 }
1376 }
1377 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1378 return -ETOOMANYREFS;
1379
1380 /*
1381 * Need to duplicate file references for the sake of garbage
1382 * collection. Otherwise a socket in the fps might become a
1383 * candidate for GC while the skb is not yet queued.
1384 */
1385 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1386 if (!UNIXCB(skb).fp)
1387 return -ENOMEM;
1388
1389 if (unix_sock_count) {
1390 for (i = scm->fp->count - 1; i >= 0; i--)
1391 unix_inflight(scm->fp->fp[i]);
1392 }
1393 return max_level;
1394 }
1395
1396 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1397 {
1398 int err = 0;
1399
1400 UNIXCB(skb).pid = get_pid(scm->pid);
1401 if (scm->cred)
1402 UNIXCB(skb).cred = get_cred(scm->cred);
1403 UNIXCB(skb).fp = NULL;
1404 if (scm->fp && send_fds)
1405 err = unix_attach_fds(scm, skb);
1406
1407 skb->destructor = unix_destruct_scm;
1408 return err;
1409 }
1410
1411 /*
1412 * Some apps rely on write() giving SCM_CREDENTIALS
1413 * We include credentials if source or destination socket
1414 * asserted SOCK_PASSCRED.
1415 */
1416 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1417 const struct sock *other)
1418 {
1419 if (UNIXCB(skb).cred)
1420 return;
1421 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1422 !other->sk_socket ||
1423 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1424 UNIXCB(skb).pid = get_pid(task_tgid(current));
1425 UNIXCB(skb).cred = get_current_cred();
1426 }
1427 }
1428
1429 /*
1430 * Send AF_UNIX data.
1431 */
1432
1433 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1434 struct msghdr *msg, size_t len)
1435 {
1436 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1437 struct sock *sk = sock->sk;
1438 struct net *net = sock_net(sk);
1439 struct unix_sock *u = unix_sk(sk);
1440 struct sockaddr_un *sunaddr = msg->msg_name;
1441 struct sock *other = NULL;
1442 int namelen = 0; /* fake GCC */
1443 int err;
1444 unsigned hash;
1445 struct sk_buff *skb;
1446 long timeo;
1447 struct scm_cookie tmp_scm;
1448 int max_level;
1449
1450 if (NULL == siocb->scm)
1451 siocb->scm = &tmp_scm;
1452 wait_for_unix_gc();
1453 err = scm_send(sock, msg, siocb->scm);
1454 if (err < 0)
1455 return err;
1456
1457 err = -EOPNOTSUPP;
1458 if (msg->msg_flags&MSG_OOB)
1459 goto out;
1460
1461 if (msg->msg_namelen) {
1462 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1463 if (err < 0)
1464 goto out;
1465 namelen = err;
1466 } else {
1467 sunaddr = NULL;
1468 err = -ENOTCONN;
1469 other = unix_peer_get(sk);
1470 if (!other)
1471 goto out;
1472 }
1473
1474 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1475 && (err = unix_autobind(sock)) != 0)
1476 goto out;
1477
1478 err = -EMSGSIZE;
1479 if (len > sk->sk_sndbuf - 32)
1480 goto out;
1481
1482 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1483 if (skb == NULL)
1484 goto out;
1485
1486 err = unix_scm_to_skb(siocb->scm, skb, true);
1487 if (err < 0)
1488 goto out_free;
1489 max_level = err + 1;
1490 unix_get_secdata(siocb->scm, skb);
1491
1492 skb_reset_transport_header(skb);
1493 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1494 if (err)
1495 goto out_free;
1496
1497 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1498
1499 restart:
1500 if (!other) {
1501 err = -ECONNRESET;
1502 if (sunaddr == NULL)
1503 goto out_free;
1504
1505 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1506 hash, &err);
1507 if (other == NULL)
1508 goto out_free;
1509 }
1510
1511 if (sk_filter(other, skb) < 0) {
1512 /* Toss the packet but do not return any error to the sender */
1513 err = len;
1514 goto out_free;
1515 }
1516
1517 unix_state_lock(other);
1518 err = -EPERM;
1519 if (!unix_may_send(sk, other))
1520 goto out_unlock;
1521
1522 if (sock_flag(other, SOCK_DEAD)) {
1523 /*
1524 * Check with 1003.1g - what should
1525 * datagram error
1526 */
1527 unix_state_unlock(other);
1528 sock_put(other);
1529
1530 err = 0;
1531 unix_state_lock(sk);
1532 if (unix_peer(sk) == other) {
1533 unix_peer(sk) = NULL;
1534 unix_state_unlock(sk);
1535
1536 unix_dgram_disconnected(sk, other);
1537 sock_put(other);
1538 err = -ECONNREFUSED;
1539 } else {
1540 unix_state_unlock(sk);
1541 }
1542
1543 other = NULL;
1544 if (err)
1545 goto out_free;
1546 goto restart;
1547 }
1548
1549 err = -EPIPE;
1550 if (other->sk_shutdown & RCV_SHUTDOWN)
1551 goto out_unlock;
1552
1553 if (sk->sk_type != SOCK_SEQPACKET) {
1554 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1555 if (err)
1556 goto out_unlock;
1557 }
1558
1559 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1560 if (!timeo) {
1561 err = -EAGAIN;
1562 goto out_unlock;
1563 }
1564
1565 timeo = unix_wait_for_peer(other, timeo);
1566
1567 err = sock_intr_errno(timeo);
1568 if (signal_pending(current))
1569 goto out_free;
1570
1571 goto restart;
1572 }
1573
1574 if (sock_flag(other, SOCK_RCVTSTAMP))
1575 __net_timestamp(skb);
1576 maybe_add_creds(skb, sock, other);
1577 skb_queue_tail(&other->sk_receive_queue, skb);
1578 if (max_level > unix_sk(other)->recursion_level)
1579 unix_sk(other)->recursion_level = max_level;
1580 unix_state_unlock(other);
1581 other->sk_data_ready(other, len);
1582 sock_put(other);
1583 scm_destroy(siocb->scm);
1584 return len;
1585
1586 out_unlock:
1587 unix_state_unlock(other);
1588 out_free:
1589 kfree_skb(skb);
1590 out:
1591 if (other)
1592 sock_put(other);
1593 scm_destroy(siocb->scm);
1594 return err;
1595 }
1596
1597
1598 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1599 struct msghdr *msg, size_t len)
1600 {
1601 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1602 struct sock *sk = sock->sk;
1603 struct sock *other = NULL;
1604 int err, size;
1605 struct sk_buff *skb;
1606 int sent = 0;
1607 struct scm_cookie tmp_scm;
1608 bool fds_sent = false;
1609 int max_level;
1610
1611 if (NULL == siocb->scm)
1612 siocb->scm = &tmp_scm;
1613 wait_for_unix_gc();
1614 err = scm_send(sock, msg, siocb->scm);
1615 if (err < 0)
1616 return err;
1617
1618 err = -EOPNOTSUPP;
1619 if (msg->msg_flags&MSG_OOB)
1620 goto out_err;
1621
1622 if (msg->msg_namelen) {
1623 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1624 goto out_err;
1625 } else {
1626 err = -ENOTCONN;
1627 other = unix_peer(sk);
1628 if (!other)
1629 goto out_err;
1630 }
1631
1632 if (sk->sk_shutdown & SEND_SHUTDOWN)
1633 goto pipe_err;
1634
1635 while (sent < len) {
1636 /*
1637 * Optimisation for the fact that under 0.01% of X
1638 * messages typically need breaking up.
1639 */
1640
1641 size = len-sent;
1642
1643 /* Keep two messages in the pipe so it schedules better */
1644 if (size > ((sk->sk_sndbuf >> 1) - 64))
1645 size = (sk->sk_sndbuf >> 1) - 64;
1646
1647 if (size > SKB_MAX_ALLOC)
1648 size = SKB_MAX_ALLOC;
1649
1650 /*
1651 * Grab a buffer
1652 */
1653
1654 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1655 &err);
1656
1657 if (skb == NULL)
1658 goto out_err;
1659
1660 /*
1661 * If you pass two values to the sock_alloc_send_skb
1662 * it tries to grab the large buffer with GFP_NOFS
1663 * (which can fail easily), and if it fails grab the
1664 * fallback size buffer which is under a page and will
1665 * succeed. [Alan]
1666 */
1667 size = min_t(int, size, skb_tailroom(skb));
1668
1669
1670 /* Only send the fds in the first buffer */
1671 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1672 if (err < 0) {
1673 kfree_skb(skb);
1674 goto out_err;
1675 }
1676 max_level = err + 1;
1677 fds_sent = true;
1678
1679 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1680 if (err) {
1681 kfree_skb(skb);
1682 goto out_err;
1683 }
1684
1685 unix_state_lock(other);
1686
1687 if (sock_flag(other, SOCK_DEAD) ||
1688 (other->sk_shutdown & RCV_SHUTDOWN))
1689 goto pipe_err_free;
1690
1691 maybe_add_creds(skb, sock, other);
1692 skb_queue_tail(&other->sk_receive_queue, skb);
1693 if (max_level > unix_sk(other)->recursion_level)
1694 unix_sk(other)->recursion_level = max_level;
1695 unix_state_unlock(other);
1696 other->sk_data_ready(other, size);
1697 sent += size;
1698 }
1699
1700 scm_destroy(siocb->scm);
1701 siocb->scm = NULL;
1702
1703 return sent;
1704
1705 pipe_err_free:
1706 unix_state_unlock(other);
1707 kfree_skb(skb);
1708 pipe_err:
1709 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1710 send_sig(SIGPIPE, current, 0);
1711 err = -EPIPE;
1712 out_err:
1713 scm_destroy(siocb->scm);
1714 siocb->scm = NULL;
1715 return sent ? : err;
1716 }
1717
1718 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1719 struct msghdr *msg, size_t len)
1720 {
1721 int err;
1722 struct sock *sk = sock->sk;
1723
1724 err = sock_error(sk);
1725 if (err)
1726 return err;
1727
1728 if (sk->sk_state != TCP_ESTABLISHED)
1729 return -ENOTCONN;
1730
1731 if (msg->msg_namelen)
1732 msg->msg_namelen = 0;
1733
1734 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1735 }
1736
1737 static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1738 struct msghdr *msg, size_t size,
1739 int flags)
1740 {
1741 struct sock *sk = sock->sk;
1742
1743 if (sk->sk_state != TCP_ESTABLISHED)
1744 return -ENOTCONN;
1745
1746 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1747 }
1748
1749 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1750 {
1751 struct unix_sock *u = unix_sk(sk);
1752
1753 msg->msg_namelen = 0;
1754 if (u->addr) {
1755 msg->msg_namelen = u->addr->len;
1756 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1757 }
1758 }
1759
1760 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1761 struct msghdr *msg, size_t size,
1762 int flags)
1763 {
1764 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1765 struct scm_cookie tmp_scm;
1766 struct sock *sk = sock->sk;
1767 struct unix_sock *u = unix_sk(sk);
1768 int noblock = flags & MSG_DONTWAIT;
1769 struct sk_buff *skb;
1770 int err;
1771 int peeked, skip;
1772
1773 err = -EOPNOTSUPP;
1774 if (flags&MSG_OOB)
1775 goto out;
1776
1777 msg->msg_namelen = 0;
1778
1779 err = mutex_lock_interruptible(&u->readlock);
1780 if (err) {
1781 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1782 goto out;
1783 }
1784
1785 skip = sk_peek_offset(sk, flags);
1786
1787 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1788 if (!skb) {
1789 unix_state_lock(sk);
1790 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1791 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1792 (sk->sk_shutdown & RCV_SHUTDOWN))
1793 err = 0;
1794 unix_state_unlock(sk);
1795 goto out_unlock;
1796 }
1797
1798 wake_up_interruptible_sync_poll(&u->peer_wait,
1799 POLLOUT | POLLWRNORM | POLLWRBAND);
1800
1801 if (msg->msg_name)
1802 unix_copy_addr(msg, skb->sk);
1803
1804 if (size > skb->len - skip)
1805 size = skb->len - skip;
1806 else if (size < skb->len - skip)
1807 msg->msg_flags |= MSG_TRUNC;
1808
1809 err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size);
1810 if (err)
1811 goto out_free;
1812
1813 if (sock_flag(sk, SOCK_RCVTSTAMP))
1814 __sock_recv_timestamp(msg, sk, skb);
1815
1816 if (!siocb->scm) {
1817 siocb->scm = &tmp_scm;
1818 memset(&tmp_scm, 0, sizeof(tmp_scm));
1819 }
1820 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1821 unix_set_secdata(siocb->scm, skb);
1822
1823 if (!(flags & MSG_PEEK)) {
1824 if (UNIXCB(skb).fp)
1825 unix_detach_fds(siocb->scm, skb);
1826
1827 sk_peek_offset_bwd(sk, skb->len);
1828 } else {
1829 /* It is questionable: on PEEK we could:
1830 - do not return fds - good, but too simple 8)
1831 - return fds, and do not return them on read (old strategy,
1832 apparently wrong)
1833 - clone fds (I chose it for now, it is the most universal
1834 solution)
1835
1836 POSIX 1003.1g does not actually define this clearly
1837 at all. POSIX 1003.1g doesn't define a lot of things
1838 clearly however!
1839
1840 */
1841
1842 sk_peek_offset_fwd(sk, size);
1843
1844 if (UNIXCB(skb).fp)
1845 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1846 }
1847 err = size;
1848
1849 scm_recv(sock, msg, siocb->scm, flags);
1850
1851 out_free:
1852 skb_free_datagram(sk, skb);
1853 out_unlock:
1854 mutex_unlock(&u->readlock);
1855 out:
1856 return err;
1857 }
1858
1859 /*
1860 * Sleep until data has arrive. But check for races..
1861 */
1862
1863 static long unix_stream_data_wait(struct sock *sk, long timeo)
1864 {
1865 DEFINE_WAIT(wait);
1866
1867 unix_state_lock(sk);
1868
1869 for (;;) {
1870 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1871
1872 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1873 sk->sk_err ||
1874 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1875 signal_pending(current) ||
1876 !timeo)
1877 break;
1878
1879 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1880 unix_state_unlock(sk);
1881 timeo = schedule_timeout(timeo);
1882 unix_state_lock(sk);
1883 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1884 }
1885
1886 finish_wait(sk_sleep(sk), &wait);
1887 unix_state_unlock(sk);
1888 return timeo;
1889 }
1890
1891
1892
1893 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1894 struct msghdr *msg, size_t size,
1895 int flags)
1896 {
1897 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1898 struct scm_cookie tmp_scm;
1899 struct sock *sk = sock->sk;
1900 struct unix_sock *u = unix_sk(sk);
1901 struct sockaddr_un *sunaddr = msg->msg_name;
1902 int copied = 0;
1903 int check_creds = 0;
1904 int target;
1905 int err = 0;
1906 long timeo;
1907
1908 err = -EINVAL;
1909 if (sk->sk_state != TCP_ESTABLISHED)
1910 goto out;
1911
1912 err = -EOPNOTSUPP;
1913 if (flags&MSG_OOB)
1914 goto out;
1915
1916 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1917 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1918
1919 msg->msg_namelen = 0;
1920
1921 /* Lock the socket to prevent queue disordering
1922 * while sleeps in memcpy_tomsg
1923 */
1924
1925 if (!siocb->scm) {
1926 siocb->scm = &tmp_scm;
1927 memset(&tmp_scm, 0, sizeof(tmp_scm));
1928 }
1929
1930 err = mutex_lock_interruptible(&u->readlock);
1931 if (err) {
1932 err = sock_intr_errno(timeo);
1933 goto out;
1934 }
1935
1936 do {
1937 int chunk;
1938 struct sk_buff *skb;
1939
1940 unix_state_lock(sk);
1941 skb = skb_peek(&sk->sk_receive_queue);
1942 if (skb == NULL) {
1943 unix_sk(sk)->recursion_level = 0;
1944 if (copied >= target)
1945 goto unlock;
1946
1947 /*
1948 * POSIX 1003.1g mandates this order.
1949 */
1950
1951 err = sock_error(sk);
1952 if (err)
1953 goto unlock;
1954 if (sk->sk_shutdown & RCV_SHUTDOWN)
1955 goto unlock;
1956
1957 unix_state_unlock(sk);
1958 err = -EAGAIN;
1959 if (!timeo)
1960 break;
1961 mutex_unlock(&u->readlock);
1962
1963 timeo = unix_stream_data_wait(sk, timeo);
1964
1965 if (signal_pending(current)
1966 || mutex_lock_interruptible(&u->readlock)) {
1967 err = sock_intr_errno(timeo);
1968 goto out;
1969 }
1970
1971 continue;
1972 unlock:
1973 unix_state_unlock(sk);
1974 break;
1975 }
1976 unix_state_unlock(sk);
1977
1978 if (check_creds) {
1979 /* Never glue messages from different writers */
1980 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1981 (UNIXCB(skb).cred != siocb->scm->cred))
1982 break;
1983 } else {
1984 /* Copy credentials */
1985 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1986 check_creds = 1;
1987 }
1988
1989 /* Copy address just once */
1990 if (sunaddr) {
1991 unix_copy_addr(msg, skb->sk);
1992 sunaddr = NULL;
1993 }
1994
1995 chunk = min_t(unsigned int, skb->len, size);
1996 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1997 if (copied == 0)
1998 copied = -EFAULT;
1999 break;
2000 }
2001 copied += chunk;
2002 size -= chunk;
2003
2004 /* Mark read part of skb as used */
2005 if (!(flags & MSG_PEEK)) {
2006 skb_pull(skb, chunk);
2007
2008 if (UNIXCB(skb).fp)
2009 unix_detach_fds(siocb->scm, skb);
2010
2011 if (skb->len)
2012 break;
2013
2014 skb_unlink(skb, &sk->sk_receive_queue);
2015 consume_skb(skb);
2016
2017 if (siocb->scm->fp)
2018 break;
2019 } else {
2020 /* It is questionable, see note in unix_dgram_recvmsg.
2021 */
2022 if (UNIXCB(skb).fp)
2023 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2024
2025 break;
2026 }
2027 } while (size);
2028
2029 mutex_unlock(&u->readlock);
2030 scm_recv(sock, msg, siocb->scm, flags);
2031 out:
2032 return copied ? : err;
2033 }
2034
2035 static int unix_shutdown(struct socket *sock, int mode)
2036 {
2037 struct sock *sk = sock->sk;
2038 struct sock *other;
2039
2040 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
2041
2042 if (!mode)
2043 return 0;
2044
2045 unix_state_lock(sk);
2046 sk->sk_shutdown |= mode;
2047 other = unix_peer(sk);
2048 if (other)
2049 sock_hold(other);
2050 unix_state_unlock(sk);
2051 sk->sk_state_change(sk);
2052
2053 if (other &&
2054 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2055
2056 int peer_mode = 0;
2057
2058 if (mode&RCV_SHUTDOWN)
2059 peer_mode |= SEND_SHUTDOWN;
2060 if (mode&SEND_SHUTDOWN)
2061 peer_mode |= RCV_SHUTDOWN;
2062 unix_state_lock(other);
2063 other->sk_shutdown |= peer_mode;
2064 unix_state_unlock(other);
2065 other->sk_state_change(other);
2066 if (peer_mode == SHUTDOWN_MASK)
2067 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2068 else if (peer_mode & RCV_SHUTDOWN)
2069 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2070 }
2071 if (other)
2072 sock_put(other);
2073
2074 return 0;
2075 }
2076
2077 long unix_inq_len(struct sock *sk)
2078 {
2079 struct sk_buff *skb;
2080 long amount = 0;
2081
2082 if (sk->sk_state == TCP_LISTEN)
2083 return -EINVAL;
2084
2085 spin_lock(&sk->sk_receive_queue.lock);
2086 if (sk->sk_type == SOCK_STREAM ||
2087 sk->sk_type == SOCK_SEQPACKET) {
2088 skb_queue_walk(&sk->sk_receive_queue, skb)
2089 amount += skb->len;
2090 } else {
2091 skb = skb_peek(&sk->sk_receive_queue);
2092 if (skb)
2093 amount = skb->len;
2094 }
2095 spin_unlock(&sk->sk_receive_queue.lock);
2096
2097 return amount;
2098 }
2099 EXPORT_SYMBOL_GPL(unix_inq_len);
2100
2101 long unix_outq_len(struct sock *sk)
2102 {
2103 return sk_wmem_alloc_get(sk);
2104 }
2105 EXPORT_SYMBOL_GPL(unix_outq_len);
2106
2107 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2108 {
2109 struct sock *sk = sock->sk;
2110 long amount = 0;
2111 int err;
2112
2113 switch (cmd) {
2114 case SIOCOUTQ:
2115 amount = unix_outq_len(sk);
2116 err = put_user(amount, (int __user *)arg);
2117 break;
2118 case SIOCINQ:
2119 amount = unix_inq_len(sk);
2120 if (amount < 0)
2121 err = amount;
2122 else
2123 err = put_user(amount, (int __user *)arg);
2124 break;
2125 default:
2126 err = -ENOIOCTLCMD;
2127 break;
2128 }
2129 return err;
2130 }
2131
2132 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2133 {
2134 struct sock *sk = sock->sk;
2135 unsigned int mask;
2136
2137 sock_poll_wait(file, sk_sleep(sk), wait);
2138 mask = 0;
2139
2140 /* exceptional events? */
2141 if (sk->sk_err)
2142 mask |= POLLERR;
2143 if (sk->sk_shutdown == SHUTDOWN_MASK)
2144 mask |= POLLHUP;
2145 if (sk->sk_shutdown & RCV_SHUTDOWN)
2146 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2147
2148 /* readable? */
2149 if (!skb_queue_empty(&sk->sk_receive_queue))
2150 mask |= POLLIN | POLLRDNORM;
2151
2152 /* Connection-based need to check for termination and startup */
2153 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2154 sk->sk_state == TCP_CLOSE)
2155 mask |= POLLHUP;
2156
2157 /*
2158 * we set writable also when the other side has shut down the
2159 * connection. This prevents stuck sockets.
2160 */
2161 if (unix_writable(sk))
2162 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2163
2164 return mask;
2165 }
2166
2167 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2168 poll_table *wait)
2169 {
2170 struct sock *sk = sock->sk, *other;
2171 unsigned int mask, writable;
2172
2173 sock_poll_wait(file, sk_sleep(sk), wait);
2174 mask = 0;
2175
2176 /* exceptional events? */
2177 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2178 mask |= POLLERR;
2179 if (sk->sk_shutdown & RCV_SHUTDOWN)
2180 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2181 if (sk->sk_shutdown == SHUTDOWN_MASK)
2182 mask |= POLLHUP;
2183
2184 /* readable? */
2185 if (!skb_queue_empty(&sk->sk_receive_queue))
2186 mask |= POLLIN | POLLRDNORM;
2187
2188 /* Connection-based need to check for termination and startup */
2189 if (sk->sk_type == SOCK_SEQPACKET) {
2190 if (sk->sk_state == TCP_CLOSE)
2191 mask |= POLLHUP;
2192 /* connection hasn't started yet? */
2193 if (sk->sk_state == TCP_SYN_SENT)
2194 return mask;
2195 }
2196
2197 /* No write status requested, avoid expensive OUT tests. */
2198 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2199 return mask;
2200
2201 writable = unix_writable(sk);
2202 other = unix_peer_get(sk);
2203 if (other) {
2204 if (unix_peer(other) != sk) {
2205 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2206 if (unix_recvq_full(other))
2207 writable = 0;
2208 }
2209 sock_put(other);
2210 }
2211
2212 if (writable)
2213 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2214 else
2215 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2216
2217 return mask;
2218 }
2219
2220 #ifdef CONFIG_PROC_FS
2221 static struct sock *first_unix_socket(int *i)
2222 {
2223 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2224 if (!hlist_empty(&unix_socket_table[*i]))
2225 return __sk_head(&unix_socket_table[*i]);
2226 }
2227 return NULL;
2228 }
2229
2230 static struct sock *next_unix_socket(int *i, struct sock *s)
2231 {
2232 struct sock *next = sk_next(s);
2233 /* More in this chain? */
2234 if (next)
2235 return next;
2236 /* Look for next non-empty chain. */
2237 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2238 if (!hlist_empty(&unix_socket_table[*i]))
2239 return __sk_head(&unix_socket_table[*i]);
2240 }
2241 return NULL;
2242 }
2243
2244 struct unix_iter_state {
2245 struct seq_net_private p;
2246 int i;
2247 };
2248
2249 static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2250 {
2251 struct unix_iter_state *iter = seq->private;
2252 loff_t off = 0;
2253 struct sock *s;
2254
2255 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2256 if (sock_net(s) != seq_file_net(seq))
2257 continue;
2258 if (off == pos)
2259 return s;
2260 ++off;
2261 }
2262 return NULL;
2263 }
2264
2265 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2266 __acquires(unix_table_lock)
2267 {
2268 spin_lock(&unix_table_lock);
2269 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2270 }
2271
2272 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2273 {
2274 struct unix_iter_state *iter = seq->private;
2275 struct sock *sk = v;
2276 ++*pos;
2277
2278 if (v == SEQ_START_TOKEN)
2279 sk = first_unix_socket(&iter->i);
2280 else
2281 sk = next_unix_socket(&iter->i, sk);
2282 while (sk && (sock_net(sk) != seq_file_net(seq)))
2283 sk = next_unix_socket(&iter->i, sk);
2284 return sk;
2285 }
2286
2287 static void unix_seq_stop(struct seq_file *seq, void *v)
2288 __releases(unix_table_lock)
2289 {
2290 spin_unlock(&unix_table_lock);
2291 }
2292
2293 static int unix_seq_show(struct seq_file *seq, void *v)
2294 {
2295
2296 if (v == SEQ_START_TOKEN)
2297 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2298 "Inode Path\n");
2299 else {
2300 struct sock *s = v;
2301 struct unix_sock *u = unix_sk(s);
2302 unix_state_lock(s);
2303
2304 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2305 s,
2306 atomic_read(&s->sk_refcnt),
2307 0,
2308 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2309 s->sk_type,
2310 s->sk_socket ?
2311 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2312 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2313 sock_i_ino(s));
2314
2315 if (u->addr) {
2316 int i, len;
2317 seq_putc(seq, ' ');
2318
2319 i = 0;
2320 len = u->addr->len - sizeof(short);
2321 if (!UNIX_ABSTRACT(s))
2322 len--;
2323 else {
2324 seq_putc(seq, '@');
2325 i++;
2326 }
2327 for ( ; i < len; i++)
2328 seq_putc(seq, u->addr->name->sun_path[i]);
2329 }
2330 unix_state_unlock(s);
2331 seq_putc(seq, '\n');
2332 }
2333
2334 return 0;
2335 }
2336
2337 static const struct seq_operations unix_seq_ops = {
2338 .start = unix_seq_start,
2339 .next = unix_seq_next,
2340 .stop = unix_seq_stop,
2341 .show = unix_seq_show,
2342 };
2343
2344 static int unix_seq_open(struct inode *inode, struct file *file)
2345 {
2346 return seq_open_net(inode, file, &unix_seq_ops,
2347 sizeof(struct unix_iter_state));
2348 }
2349
2350 static const struct file_operations unix_seq_fops = {
2351 .owner = THIS_MODULE,
2352 .open = unix_seq_open,
2353 .read = seq_read,
2354 .llseek = seq_lseek,
2355 .release = seq_release_net,
2356 };
2357
2358 #endif
2359
2360 static const struct net_proto_family unix_family_ops = {
2361 .family = PF_UNIX,
2362 .create = unix_create,
2363 .owner = THIS_MODULE,
2364 };
2365
2366
2367 static int __net_init unix_net_init(struct net *net)
2368 {
2369 int error = -ENOMEM;
2370
2371 net->unx.sysctl_max_dgram_qlen = 10;
2372 if (unix_sysctl_register(net))
2373 goto out;
2374
2375 #ifdef CONFIG_PROC_FS
2376 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2377 unix_sysctl_unregister(net);
2378 goto out;
2379 }
2380 #endif
2381 error = 0;
2382 out:
2383 return error;
2384 }
2385
2386 static void __net_exit unix_net_exit(struct net *net)
2387 {
2388 unix_sysctl_unregister(net);
2389 proc_net_remove(net, "unix");
2390 }
2391
2392 static struct pernet_operations unix_net_ops = {
2393 .init = unix_net_init,
2394 .exit = unix_net_exit,
2395 };
2396
2397 static int __init af_unix_init(void)
2398 {
2399 int rc = -1;
2400 struct sk_buff *dummy_skb;
2401
2402 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2403
2404 rc = proto_register(&unix_proto, 1);
2405 if (rc != 0) {
2406 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2407 __func__);
2408 goto out;
2409 }
2410
2411 sock_register(&unix_family_ops);
2412 register_pernet_subsys(&unix_net_ops);
2413 out:
2414 return rc;
2415 }
2416
2417 static void __exit af_unix_exit(void)
2418 {
2419 sock_unregister(PF_UNIX);
2420 proto_unregister(&unix_proto);
2421 unregister_pernet_subsys(&unix_net_ops);
2422 }
2423
2424 /* Earlier than device_initcall() so that other drivers invoking
2425 request_module() don't end up in a loop when modprobe tries
2426 to use a UNIX socket. But later than subsys_initcall() because
2427 we depend on stuff initialised there */
2428 fs_initcall(af_unix_init);
2429 module_exit(af_unix_exit);
2430
2431 MODULE_LICENSE("GPL");
2432 MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 0.08101 seconds and 5 git commands to generate.