Merge tag 'linux-can-fixes-for-4.4-20151123' of git://git.kernel.org/pub/scm/linux...
[deliverable/linux.git] / net / unix / af_unix.c
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
95 #include <linux/un.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
112 #include <net/scm.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
120
121 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
122 EXPORT_SYMBOL_GPL(unix_socket_table);
123 DEFINE_SPINLOCK(unix_table_lock);
124 EXPORT_SYMBOL_GPL(unix_table_lock);
125 static atomic_long_t unix_nr_socks;
126
127
128 static struct hlist_head *unix_sockets_unbound(void *addr)
129 {
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136 }
137
138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
139
140 #ifdef CONFIG_SECURITY_NETWORK
141 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 {
143 UNIXCB(skb).secid = scm->secid;
144 }
145
146 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147 {
148 scm->secid = UNIXCB(skb).secid;
149 }
150
151 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
152 {
153 return (scm->secid == UNIXCB(skb).secid);
154 }
155 #else
156 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
157 { }
158
159 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160 { }
161
162 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
163 {
164 return true;
165 }
166 #endif /* CONFIG_SECURITY_NETWORK */
167
168 /*
169 * SMP locking strategy:
170 * hash table is protected with spinlock unix_table_lock
171 * each socket state is protected by separate spin lock.
172 */
173
174 static inline unsigned int unix_hash_fold(__wsum n)
175 {
176 unsigned int hash = (__force unsigned int)csum_fold(n);
177
178 hash ^= hash>>8;
179 return hash&(UNIX_HASH_SIZE-1);
180 }
181
182 #define unix_peer(sk) (unix_sk(sk)->peer)
183
184 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
185 {
186 return unix_peer(osk) == sk;
187 }
188
189 static inline int unix_may_send(struct sock *sk, struct sock *osk)
190 {
191 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
192 }
193
194 static inline int unix_recvq_full(struct sock const *sk)
195 {
196 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
197 }
198
199 struct sock *unix_peer_get(struct sock *s)
200 {
201 struct sock *peer;
202
203 unix_state_lock(s);
204 peer = unix_peer(s);
205 if (peer)
206 sock_hold(peer);
207 unix_state_unlock(s);
208 return peer;
209 }
210 EXPORT_SYMBOL_GPL(unix_peer_get);
211
212 static inline void unix_release_addr(struct unix_address *addr)
213 {
214 if (atomic_dec_and_test(&addr->refcnt))
215 kfree(addr);
216 }
217
218 /*
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
223 */
224
225 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
226 {
227 if (len <= sizeof(short) || len > sizeof(*sunaddr))
228 return -EINVAL;
229 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230 return -EINVAL;
231 if (sunaddr->sun_path[0]) {
232 /*
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
235 * sun_path[108] doesn't as such exist. However in kernel space
236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
238 */
239 ((char *)sunaddr)[len] = 0;
240 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241 return len;
242 }
243
244 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
245 return len;
246 }
247
248 static void __unix_remove_socket(struct sock *sk)
249 {
250 sk_del_node_init(sk);
251 }
252
253 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254 {
255 WARN_ON(!sk_unhashed(sk));
256 sk_add_node(sk, list);
257 }
258
259 static inline void unix_remove_socket(struct sock *sk)
260 {
261 spin_lock(&unix_table_lock);
262 __unix_remove_socket(sk);
263 spin_unlock(&unix_table_lock);
264 }
265
266 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267 {
268 spin_lock(&unix_table_lock);
269 __unix_insert_socket(list, sk);
270 spin_unlock(&unix_table_lock);
271 }
272
273 static struct sock *__unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
275 int len, int type, unsigned int hash)
276 {
277 struct sock *s;
278
279 sk_for_each(s, &unix_socket_table[hash ^ type]) {
280 struct unix_sock *u = unix_sk(s);
281
282 if (!net_eq(sock_net(s), net))
283 continue;
284
285 if (u->addr->len == len &&
286 !memcmp(u->addr->name, sunname, len))
287 goto found;
288 }
289 s = NULL;
290 found:
291 return s;
292 }
293
294 static inline struct sock *unix_find_socket_byname(struct net *net,
295 struct sockaddr_un *sunname,
296 int len, int type,
297 unsigned int hash)
298 {
299 struct sock *s;
300
301 spin_lock(&unix_table_lock);
302 s = __unix_find_socket_byname(net, sunname, len, type, hash);
303 if (s)
304 sock_hold(s);
305 spin_unlock(&unix_table_lock);
306 return s;
307 }
308
309 static struct sock *unix_find_socket_byinode(struct inode *i)
310 {
311 struct sock *s;
312
313 spin_lock(&unix_table_lock);
314 sk_for_each(s,
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry;
317
318 if (dentry && d_backing_inode(dentry) == i) {
319 sock_hold(s);
320 goto found;
321 }
322 }
323 s = NULL;
324 found:
325 spin_unlock(&unix_table_lock);
326 return s;
327 }
328
329 /* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
345 * In order to propagate a wake up, a wait_queue_t of the client
346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
354 static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355 void *key)
356 {
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
369 wake_up_interruptible_poll(u_sleep, key);
370
371 return 0;
372 }
373
374 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375 {
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393 }
394
395 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397 {
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410 }
411
412 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414 {
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
417 POLLOUT |
418 POLLWRNORM |
419 POLLWRBAND);
420 }
421
422 /* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427 {
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
432 if (unix_recvq_full(other))
433 return 1;
434
435 if (connected)
436 unix_dgram_peer_wake_disconnect(sk, other);
437
438 return 0;
439 }
440
441 static int unix_writable(const struct sock *sk)
442 {
443 return sk->sk_state != TCP_LISTEN &&
444 (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
445 }
446
447 static void unix_write_space(struct sock *sk)
448 {
449 struct socket_wq *wq;
450
451 rcu_read_lock();
452 if (unix_writable(sk)) {
453 wq = rcu_dereference(sk->sk_wq);
454 if (wq_has_sleeper(wq))
455 wake_up_interruptible_sync_poll(&wq->wait,
456 POLLOUT | POLLWRNORM | POLLWRBAND);
457 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
458 }
459 rcu_read_unlock();
460 }
461
462 /* When dgram socket disconnects (or changes its peer), we clear its receive
463 * queue of packets arrived from previous peer. First, it allows to do
464 * flow control based only on wmem_alloc; second, sk connected to peer
465 * may receive messages only from that peer. */
466 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
467 {
468 if (!skb_queue_empty(&sk->sk_receive_queue)) {
469 skb_queue_purge(&sk->sk_receive_queue);
470 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
471
472 /* If one link of bidirectional dgram pipe is disconnected,
473 * we signal error. Messages are lost. Do not make this,
474 * when peer was not connected to us.
475 */
476 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
477 other->sk_err = ECONNRESET;
478 other->sk_error_report(other);
479 }
480 }
481 }
482
483 static void unix_sock_destructor(struct sock *sk)
484 {
485 struct unix_sock *u = unix_sk(sk);
486
487 skb_queue_purge(&sk->sk_receive_queue);
488
489 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
490 WARN_ON(!sk_unhashed(sk));
491 WARN_ON(sk->sk_socket);
492 if (!sock_flag(sk, SOCK_DEAD)) {
493 pr_info("Attempt to release alive unix socket: %p\n", sk);
494 return;
495 }
496
497 if (u->addr)
498 unix_release_addr(u->addr);
499
500 atomic_long_dec(&unix_nr_socks);
501 local_bh_disable();
502 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
503 local_bh_enable();
504 #ifdef UNIX_REFCNT_DEBUG
505 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
506 atomic_long_read(&unix_nr_socks));
507 #endif
508 }
509
510 static void unix_release_sock(struct sock *sk, int embrion)
511 {
512 struct unix_sock *u = unix_sk(sk);
513 struct path path;
514 struct sock *skpair;
515 struct sk_buff *skb;
516 int state;
517
518 unix_remove_socket(sk);
519
520 /* Clear state */
521 unix_state_lock(sk);
522 sock_orphan(sk);
523 sk->sk_shutdown = SHUTDOWN_MASK;
524 path = u->path;
525 u->path.dentry = NULL;
526 u->path.mnt = NULL;
527 state = sk->sk_state;
528 sk->sk_state = TCP_CLOSE;
529 unix_state_unlock(sk);
530
531 wake_up_interruptible_all(&u->peer_wait);
532
533 skpair = unix_peer(sk);
534
535 if (skpair != NULL) {
536 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
537 unix_state_lock(skpair);
538 /* No more writes */
539 skpair->sk_shutdown = SHUTDOWN_MASK;
540 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
541 skpair->sk_err = ECONNRESET;
542 unix_state_unlock(skpair);
543 skpair->sk_state_change(skpair);
544 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
545 }
546
547 unix_dgram_peer_wake_disconnect(sk, skpair);
548 sock_put(skpair); /* It may now die */
549 unix_peer(sk) = NULL;
550 }
551
552 /* Try to flush out this socket. Throw out buffers at least */
553
554 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
555 if (state == TCP_LISTEN)
556 unix_release_sock(skb->sk, 1);
557 /* passed fds are erased in the kfree_skb hook */
558 UNIXCB(skb).consumed = skb->len;
559 kfree_skb(skb);
560 }
561
562 if (path.dentry)
563 path_put(&path);
564
565 sock_put(sk);
566
567 /* ---- Socket is dead now and most probably destroyed ---- */
568
569 /*
570 * Fixme: BSD difference: In BSD all sockets connected to us get
571 * ECONNRESET and we die on the spot. In Linux we behave
572 * like files and pipes do and wait for the last
573 * dereference.
574 *
575 * Can't we simply set sock->err?
576 *
577 * What the above comment does talk about? --ANK(980817)
578 */
579
580 if (unix_tot_inflight)
581 unix_gc(); /* Garbage collect fds */
582 }
583
584 static void init_peercred(struct sock *sk)
585 {
586 put_pid(sk->sk_peer_pid);
587 if (sk->sk_peer_cred)
588 put_cred(sk->sk_peer_cred);
589 sk->sk_peer_pid = get_pid(task_tgid(current));
590 sk->sk_peer_cred = get_current_cred();
591 }
592
593 static void copy_peercred(struct sock *sk, struct sock *peersk)
594 {
595 put_pid(sk->sk_peer_pid);
596 if (sk->sk_peer_cred)
597 put_cred(sk->sk_peer_cred);
598 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
599 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
600 }
601
602 static int unix_listen(struct socket *sock, int backlog)
603 {
604 int err;
605 struct sock *sk = sock->sk;
606 struct unix_sock *u = unix_sk(sk);
607 struct pid *old_pid = NULL;
608
609 err = -EOPNOTSUPP;
610 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
611 goto out; /* Only stream/seqpacket sockets accept */
612 err = -EINVAL;
613 if (!u->addr)
614 goto out; /* No listens on an unbound socket */
615 unix_state_lock(sk);
616 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
617 goto out_unlock;
618 if (backlog > sk->sk_max_ack_backlog)
619 wake_up_interruptible_all(&u->peer_wait);
620 sk->sk_max_ack_backlog = backlog;
621 sk->sk_state = TCP_LISTEN;
622 /* set credentials so connect can copy them */
623 init_peercred(sk);
624 err = 0;
625
626 out_unlock:
627 unix_state_unlock(sk);
628 put_pid(old_pid);
629 out:
630 return err;
631 }
632
633 static int unix_release(struct socket *);
634 static int unix_bind(struct socket *, struct sockaddr *, int);
635 static int unix_stream_connect(struct socket *, struct sockaddr *,
636 int addr_len, int flags);
637 static int unix_socketpair(struct socket *, struct socket *);
638 static int unix_accept(struct socket *, struct socket *, int);
639 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
640 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
641 static unsigned int unix_dgram_poll(struct file *, struct socket *,
642 poll_table *);
643 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
644 static int unix_shutdown(struct socket *, int);
645 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
646 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
647 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
648 size_t size, int flags);
649 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
650 struct pipe_inode_info *, size_t size,
651 unsigned int flags);
652 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
653 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
654 static int unix_dgram_connect(struct socket *, struct sockaddr *,
655 int, int);
656 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
657 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
658 int);
659
660 static int unix_set_peek_off(struct sock *sk, int val)
661 {
662 struct unix_sock *u = unix_sk(sk);
663
664 if (mutex_lock_interruptible(&u->readlock))
665 return -EINTR;
666
667 sk->sk_peek_off = val;
668 mutex_unlock(&u->readlock);
669
670 return 0;
671 }
672
673
674 static const struct proto_ops unix_stream_ops = {
675 .family = PF_UNIX,
676 .owner = THIS_MODULE,
677 .release = unix_release,
678 .bind = unix_bind,
679 .connect = unix_stream_connect,
680 .socketpair = unix_socketpair,
681 .accept = unix_accept,
682 .getname = unix_getname,
683 .poll = unix_poll,
684 .ioctl = unix_ioctl,
685 .listen = unix_listen,
686 .shutdown = unix_shutdown,
687 .setsockopt = sock_no_setsockopt,
688 .getsockopt = sock_no_getsockopt,
689 .sendmsg = unix_stream_sendmsg,
690 .recvmsg = unix_stream_recvmsg,
691 .mmap = sock_no_mmap,
692 .sendpage = unix_stream_sendpage,
693 .splice_read = unix_stream_splice_read,
694 .set_peek_off = unix_set_peek_off,
695 };
696
697 static const struct proto_ops unix_dgram_ops = {
698 .family = PF_UNIX,
699 .owner = THIS_MODULE,
700 .release = unix_release,
701 .bind = unix_bind,
702 .connect = unix_dgram_connect,
703 .socketpair = unix_socketpair,
704 .accept = sock_no_accept,
705 .getname = unix_getname,
706 .poll = unix_dgram_poll,
707 .ioctl = unix_ioctl,
708 .listen = sock_no_listen,
709 .shutdown = unix_shutdown,
710 .setsockopt = sock_no_setsockopt,
711 .getsockopt = sock_no_getsockopt,
712 .sendmsg = unix_dgram_sendmsg,
713 .recvmsg = unix_dgram_recvmsg,
714 .mmap = sock_no_mmap,
715 .sendpage = sock_no_sendpage,
716 .set_peek_off = unix_set_peek_off,
717 };
718
719 static const struct proto_ops unix_seqpacket_ops = {
720 .family = PF_UNIX,
721 .owner = THIS_MODULE,
722 .release = unix_release,
723 .bind = unix_bind,
724 .connect = unix_stream_connect,
725 .socketpair = unix_socketpair,
726 .accept = unix_accept,
727 .getname = unix_getname,
728 .poll = unix_dgram_poll,
729 .ioctl = unix_ioctl,
730 .listen = unix_listen,
731 .shutdown = unix_shutdown,
732 .setsockopt = sock_no_setsockopt,
733 .getsockopt = sock_no_getsockopt,
734 .sendmsg = unix_seqpacket_sendmsg,
735 .recvmsg = unix_seqpacket_recvmsg,
736 .mmap = sock_no_mmap,
737 .sendpage = sock_no_sendpage,
738 .set_peek_off = unix_set_peek_off,
739 };
740
741 static struct proto unix_proto = {
742 .name = "UNIX",
743 .owner = THIS_MODULE,
744 .obj_size = sizeof(struct unix_sock),
745 };
746
747 /*
748 * AF_UNIX sockets do not interact with hardware, hence they
749 * dont trigger interrupts - so it's safe for them to have
750 * bh-unsafe locking for their sk_receive_queue.lock. Split off
751 * this special lock-class by reinitializing the spinlock key:
752 */
753 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
754
755 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
756 {
757 struct sock *sk = NULL;
758 struct unix_sock *u;
759
760 atomic_long_inc(&unix_nr_socks);
761 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
762 goto out;
763
764 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
765 if (!sk)
766 goto out;
767
768 sock_init_data(sock, sk);
769 lockdep_set_class(&sk->sk_receive_queue.lock,
770 &af_unix_sk_receive_queue_lock_key);
771
772 sk->sk_write_space = unix_write_space;
773 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
774 sk->sk_destruct = unix_sock_destructor;
775 u = unix_sk(sk);
776 u->path.dentry = NULL;
777 u->path.mnt = NULL;
778 spin_lock_init(&u->lock);
779 atomic_long_set(&u->inflight, 0);
780 INIT_LIST_HEAD(&u->link);
781 mutex_init(&u->readlock); /* single task reading lock */
782 init_waitqueue_head(&u->peer_wait);
783 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
784 unix_insert_socket(unix_sockets_unbound(sk), sk);
785 out:
786 if (sk == NULL)
787 atomic_long_dec(&unix_nr_socks);
788 else {
789 local_bh_disable();
790 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
791 local_bh_enable();
792 }
793 return sk;
794 }
795
796 static int unix_create(struct net *net, struct socket *sock, int protocol,
797 int kern)
798 {
799 if (protocol && protocol != PF_UNIX)
800 return -EPROTONOSUPPORT;
801
802 sock->state = SS_UNCONNECTED;
803
804 switch (sock->type) {
805 case SOCK_STREAM:
806 sock->ops = &unix_stream_ops;
807 break;
808 /*
809 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
810 * nothing uses it.
811 */
812 case SOCK_RAW:
813 sock->type = SOCK_DGRAM;
814 case SOCK_DGRAM:
815 sock->ops = &unix_dgram_ops;
816 break;
817 case SOCK_SEQPACKET:
818 sock->ops = &unix_seqpacket_ops;
819 break;
820 default:
821 return -ESOCKTNOSUPPORT;
822 }
823
824 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
825 }
826
827 static int unix_release(struct socket *sock)
828 {
829 struct sock *sk = sock->sk;
830
831 if (!sk)
832 return 0;
833
834 unix_release_sock(sk, 0);
835 sock->sk = NULL;
836
837 return 0;
838 }
839
840 static int unix_autobind(struct socket *sock)
841 {
842 struct sock *sk = sock->sk;
843 struct net *net = sock_net(sk);
844 struct unix_sock *u = unix_sk(sk);
845 static u32 ordernum = 1;
846 struct unix_address *addr;
847 int err;
848 unsigned int retries = 0;
849
850 err = mutex_lock_interruptible(&u->readlock);
851 if (err)
852 return err;
853
854 err = 0;
855 if (u->addr)
856 goto out;
857
858 err = -ENOMEM;
859 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
860 if (!addr)
861 goto out;
862
863 addr->name->sun_family = AF_UNIX;
864 atomic_set(&addr->refcnt, 1);
865
866 retry:
867 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
868 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
869
870 spin_lock(&unix_table_lock);
871 ordernum = (ordernum+1)&0xFFFFF;
872
873 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
874 addr->hash)) {
875 spin_unlock(&unix_table_lock);
876 /*
877 * __unix_find_socket_byname() may take long time if many names
878 * are already in use.
879 */
880 cond_resched();
881 /* Give up if all names seems to be in use. */
882 if (retries++ == 0xFFFFF) {
883 err = -ENOSPC;
884 kfree(addr);
885 goto out;
886 }
887 goto retry;
888 }
889 addr->hash ^= sk->sk_type;
890
891 __unix_remove_socket(sk);
892 u->addr = addr;
893 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
894 spin_unlock(&unix_table_lock);
895 err = 0;
896
897 out: mutex_unlock(&u->readlock);
898 return err;
899 }
900
901 static struct sock *unix_find_other(struct net *net,
902 struct sockaddr_un *sunname, int len,
903 int type, unsigned int hash, int *error)
904 {
905 struct sock *u;
906 struct path path;
907 int err = 0;
908
909 if (sunname->sun_path[0]) {
910 struct inode *inode;
911 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
912 if (err)
913 goto fail;
914 inode = d_backing_inode(path.dentry);
915 err = inode_permission(inode, MAY_WRITE);
916 if (err)
917 goto put_fail;
918
919 err = -ECONNREFUSED;
920 if (!S_ISSOCK(inode->i_mode))
921 goto put_fail;
922 u = unix_find_socket_byinode(inode);
923 if (!u)
924 goto put_fail;
925
926 if (u->sk_type == type)
927 touch_atime(&path);
928
929 path_put(&path);
930
931 err = -EPROTOTYPE;
932 if (u->sk_type != type) {
933 sock_put(u);
934 goto fail;
935 }
936 } else {
937 err = -ECONNREFUSED;
938 u = unix_find_socket_byname(net, sunname, len, type, hash);
939 if (u) {
940 struct dentry *dentry;
941 dentry = unix_sk(u)->path.dentry;
942 if (dentry)
943 touch_atime(&unix_sk(u)->path);
944 } else
945 goto fail;
946 }
947 return u;
948
949 put_fail:
950 path_put(&path);
951 fail:
952 *error = err;
953 return NULL;
954 }
955
956 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
957 {
958 struct dentry *dentry;
959 struct path path;
960 int err = 0;
961 /*
962 * Get the parent directory, calculate the hash for last
963 * component.
964 */
965 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
966 err = PTR_ERR(dentry);
967 if (IS_ERR(dentry))
968 return err;
969
970 /*
971 * All right, let's create it.
972 */
973 err = security_path_mknod(&path, dentry, mode, 0);
974 if (!err) {
975 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
976 if (!err) {
977 res->mnt = mntget(path.mnt);
978 res->dentry = dget(dentry);
979 }
980 }
981 done_path_create(&path, dentry);
982 return err;
983 }
984
985 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
986 {
987 struct sock *sk = sock->sk;
988 struct net *net = sock_net(sk);
989 struct unix_sock *u = unix_sk(sk);
990 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
991 char *sun_path = sunaddr->sun_path;
992 int err;
993 unsigned int hash;
994 struct unix_address *addr;
995 struct hlist_head *list;
996
997 err = -EINVAL;
998 if (sunaddr->sun_family != AF_UNIX)
999 goto out;
1000
1001 if (addr_len == sizeof(short)) {
1002 err = unix_autobind(sock);
1003 goto out;
1004 }
1005
1006 err = unix_mkname(sunaddr, addr_len, &hash);
1007 if (err < 0)
1008 goto out;
1009 addr_len = err;
1010
1011 err = mutex_lock_interruptible(&u->readlock);
1012 if (err)
1013 goto out;
1014
1015 err = -EINVAL;
1016 if (u->addr)
1017 goto out_up;
1018
1019 err = -ENOMEM;
1020 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
1021 if (!addr)
1022 goto out_up;
1023
1024 memcpy(addr->name, sunaddr, addr_len);
1025 addr->len = addr_len;
1026 addr->hash = hash ^ sk->sk_type;
1027 atomic_set(&addr->refcnt, 1);
1028
1029 if (sun_path[0]) {
1030 struct path path;
1031 umode_t mode = S_IFSOCK |
1032 (SOCK_INODE(sock)->i_mode & ~current_umask());
1033 err = unix_mknod(sun_path, mode, &path);
1034 if (err) {
1035 if (err == -EEXIST)
1036 err = -EADDRINUSE;
1037 unix_release_addr(addr);
1038 goto out_up;
1039 }
1040 addr->hash = UNIX_HASH_SIZE;
1041 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
1042 spin_lock(&unix_table_lock);
1043 u->path = path;
1044 list = &unix_socket_table[hash];
1045 } else {
1046 spin_lock(&unix_table_lock);
1047 err = -EADDRINUSE;
1048 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1049 sk->sk_type, hash)) {
1050 unix_release_addr(addr);
1051 goto out_unlock;
1052 }
1053
1054 list = &unix_socket_table[addr->hash];
1055 }
1056
1057 err = 0;
1058 __unix_remove_socket(sk);
1059 u->addr = addr;
1060 __unix_insert_socket(list, sk);
1061
1062 out_unlock:
1063 spin_unlock(&unix_table_lock);
1064 out_up:
1065 mutex_unlock(&u->readlock);
1066 out:
1067 return err;
1068 }
1069
1070 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1071 {
1072 if (unlikely(sk1 == sk2) || !sk2) {
1073 unix_state_lock(sk1);
1074 return;
1075 }
1076 if (sk1 < sk2) {
1077 unix_state_lock(sk1);
1078 unix_state_lock_nested(sk2);
1079 } else {
1080 unix_state_lock(sk2);
1081 unix_state_lock_nested(sk1);
1082 }
1083 }
1084
1085 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1086 {
1087 if (unlikely(sk1 == sk2) || !sk2) {
1088 unix_state_unlock(sk1);
1089 return;
1090 }
1091 unix_state_unlock(sk1);
1092 unix_state_unlock(sk2);
1093 }
1094
1095 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1096 int alen, int flags)
1097 {
1098 struct sock *sk = sock->sk;
1099 struct net *net = sock_net(sk);
1100 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1101 struct sock *other;
1102 unsigned int hash;
1103 int err;
1104
1105 if (addr->sa_family != AF_UNSPEC) {
1106 err = unix_mkname(sunaddr, alen, &hash);
1107 if (err < 0)
1108 goto out;
1109 alen = err;
1110
1111 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1112 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
1113 goto out;
1114
1115 restart:
1116 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1117 if (!other)
1118 goto out;
1119
1120 unix_state_double_lock(sk, other);
1121
1122 /* Apparently VFS overslept socket death. Retry. */
1123 if (sock_flag(other, SOCK_DEAD)) {
1124 unix_state_double_unlock(sk, other);
1125 sock_put(other);
1126 goto restart;
1127 }
1128
1129 err = -EPERM;
1130 if (!unix_may_send(sk, other))
1131 goto out_unlock;
1132
1133 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1134 if (err)
1135 goto out_unlock;
1136
1137 } else {
1138 /*
1139 * 1003.1g breaking connected state with AF_UNSPEC
1140 */
1141 other = NULL;
1142 unix_state_double_lock(sk, other);
1143 }
1144
1145 /*
1146 * If it was connected, reconnect.
1147 */
1148 if (unix_peer(sk)) {
1149 struct sock *old_peer = unix_peer(sk);
1150 unix_peer(sk) = other;
1151 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1152
1153 unix_state_double_unlock(sk, other);
1154
1155 if (other != old_peer)
1156 unix_dgram_disconnected(sk, old_peer);
1157 sock_put(old_peer);
1158 } else {
1159 unix_peer(sk) = other;
1160 unix_state_double_unlock(sk, other);
1161 }
1162 return 0;
1163
1164 out_unlock:
1165 unix_state_double_unlock(sk, other);
1166 sock_put(other);
1167 out:
1168 return err;
1169 }
1170
1171 static long unix_wait_for_peer(struct sock *other, long timeo)
1172 {
1173 struct unix_sock *u = unix_sk(other);
1174 int sched;
1175 DEFINE_WAIT(wait);
1176
1177 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1178
1179 sched = !sock_flag(other, SOCK_DEAD) &&
1180 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1181 unix_recvq_full(other);
1182
1183 unix_state_unlock(other);
1184
1185 if (sched)
1186 timeo = schedule_timeout(timeo);
1187
1188 finish_wait(&u->peer_wait, &wait);
1189 return timeo;
1190 }
1191
1192 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1193 int addr_len, int flags)
1194 {
1195 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1196 struct sock *sk = sock->sk;
1197 struct net *net = sock_net(sk);
1198 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1199 struct sock *newsk = NULL;
1200 struct sock *other = NULL;
1201 struct sk_buff *skb = NULL;
1202 unsigned int hash;
1203 int st;
1204 int err;
1205 long timeo;
1206
1207 err = unix_mkname(sunaddr, addr_len, &hash);
1208 if (err < 0)
1209 goto out;
1210 addr_len = err;
1211
1212 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1213 (err = unix_autobind(sock)) != 0)
1214 goto out;
1215
1216 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1217
1218 /* First of all allocate resources.
1219 If we will make it after state is locked,
1220 we will have to recheck all again in any case.
1221 */
1222
1223 err = -ENOMEM;
1224
1225 /* create new sock for complete connection */
1226 newsk = unix_create1(sock_net(sk), NULL, 0);
1227 if (newsk == NULL)
1228 goto out;
1229
1230 /* Allocate skb for sending to listening sock */
1231 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1232 if (skb == NULL)
1233 goto out;
1234
1235 restart:
1236 /* Find listening sock. */
1237 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1238 if (!other)
1239 goto out;
1240
1241 /* Latch state of peer */
1242 unix_state_lock(other);
1243
1244 /* Apparently VFS overslept socket death. Retry. */
1245 if (sock_flag(other, SOCK_DEAD)) {
1246 unix_state_unlock(other);
1247 sock_put(other);
1248 goto restart;
1249 }
1250
1251 err = -ECONNREFUSED;
1252 if (other->sk_state != TCP_LISTEN)
1253 goto out_unlock;
1254 if (other->sk_shutdown & RCV_SHUTDOWN)
1255 goto out_unlock;
1256
1257 if (unix_recvq_full(other)) {
1258 err = -EAGAIN;
1259 if (!timeo)
1260 goto out_unlock;
1261
1262 timeo = unix_wait_for_peer(other, timeo);
1263
1264 err = sock_intr_errno(timeo);
1265 if (signal_pending(current))
1266 goto out;
1267 sock_put(other);
1268 goto restart;
1269 }
1270
1271 /* Latch our state.
1272
1273 It is tricky place. We need to grab our state lock and cannot
1274 drop lock on peer. It is dangerous because deadlock is
1275 possible. Connect to self case and simultaneous
1276 attempt to connect are eliminated by checking socket
1277 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1278 check this before attempt to grab lock.
1279
1280 Well, and we have to recheck the state after socket locked.
1281 */
1282 st = sk->sk_state;
1283
1284 switch (st) {
1285 case TCP_CLOSE:
1286 /* This is ok... continue with connect */
1287 break;
1288 case TCP_ESTABLISHED:
1289 /* Socket is already connected */
1290 err = -EISCONN;
1291 goto out_unlock;
1292 default:
1293 err = -EINVAL;
1294 goto out_unlock;
1295 }
1296
1297 unix_state_lock_nested(sk);
1298
1299 if (sk->sk_state != st) {
1300 unix_state_unlock(sk);
1301 unix_state_unlock(other);
1302 sock_put(other);
1303 goto restart;
1304 }
1305
1306 err = security_unix_stream_connect(sk, other, newsk);
1307 if (err) {
1308 unix_state_unlock(sk);
1309 goto out_unlock;
1310 }
1311
1312 /* The way is open! Fastly set all the necessary fields... */
1313
1314 sock_hold(sk);
1315 unix_peer(newsk) = sk;
1316 newsk->sk_state = TCP_ESTABLISHED;
1317 newsk->sk_type = sk->sk_type;
1318 init_peercred(newsk);
1319 newu = unix_sk(newsk);
1320 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1321 otheru = unix_sk(other);
1322
1323 /* copy address information from listening to new sock*/
1324 if (otheru->addr) {
1325 atomic_inc(&otheru->addr->refcnt);
1326 newu->addr = otheru->addr;
1327 }
1328 if (otheru->path.dentry) {
1329 path_get(&otheru->path);
1330 newu->path = otheru->path;
1331 }
1332
1333 /* Set credentials */
1334 copy_peercred(sk, other);
1335
1336 sock->state = SS_CONNECTED;
1337 sk->sk_state = TCP_ESTABLISHED;
1338 sock_hold(newsk);
1339
1340 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1341 unix_peer(sk) = newsk;
1342
1343 unix_state_unlock(sk);
1344
1345 /* take ten and and send info to listening sock */
1346 spin_lock(&other->sk_receive_queue.lock);
1347 __skb_queue_tail(&other->sk_receive_queue, skb);
1348 spin_unlock(&other->sk_receive_queue.lock);
1349 unix_state_unlock(other);
1350 other->sk_data_ready(other);
1351 sock_put(other);
1352 return 0;
1353
1354 out_unlock:
1355 if (other)
1356 unix_state_unlock(other);
1357
1358 out:
1359 kfree_skb(skb);
1360 if (newsk)
1361 unix_release_sock(newsk, 0);
1362 if (other)
1363 sock_put(other);
1364 return err;
1365 }
1366
1367 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1368 {
1369 struct sock *ska = socka->sk, *skb = sockb->sk;
1370
1371 /* Join our sockets back to back */
1372 sock_hold(ska);
1373 sock_hold(skb);
1374 unix_peer(ska) = skb;
1375 unix_peer(skb) = ska;
1376 init_peercred(ska);
1377 init_peercred(skb);
1378
1379 if (ska->sk_type != SOCK_DGRAM) {
1380 ska->sk_state = TCP_ESTABLISHED;
1381 skb->sk_state = TCP_ESTABLISHED;
1382 socka->state = SS_CONNECTED;
1383 sockb->state = SS_CONNECTED;
1384 }
1385 return 0;
1386 }
1387
1388 static void unix_sock_inherit_flags(const struct socket *old,
1389 struct socket *new)
1390 {
1391 if (test_bit(SOCK_PASSCRED, &old->flags))
1392 set_bit(SOCK_PASSCRED, &new->flags);
1393 if (test_bit(SOCK_PASSSEC, &old->flags))
1394 set_bit(SOCK_PASSSEC, &new->flags);
1395 }
1396
1397 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1398 {
1399 struct sock *sk = sock->sk;
1400 struct sock *tsk;
1401 struct sk_buff *skb;
1402 int err;
1403
1404 err = -EOPNOTSUPP;
1405 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1406 goto out;
1407
1408 err = -EINVAL;
1409 if (sk->sk_state != TCP_LISTEN)
1410 goto out;
1411
1412 /* If socket state is TCP_LISTEN it cannot change (for now...),
1413 * so that no locks are necessary.
1414 */
1415
1416 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1417 if (!skb) {
1418 /* This means receive shutdown. */
1419 if (err == 0)
1420 err = -EINVAL;
1421 goto out;
1422 }
1423
1424 tsk = skb->sk;
1425 skb_free_datagram(sk, skb);
1426 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1427
1428 /* attach accepted sock to socket */
1429 unix_state_lock(tsk);
1430 newsock->state = SS_CONNECTED;
1431 unix_sock_inherit_flags(sock, newsock);
1432 sock_graft(tsk, newsock);
1433 unix_state_unlock(tsk);
1434 return 0;
1435
1436 out:
1437 return err;
1438 }
1439
1440
1441 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1442 {
1443 struct sock *sk = sock->sk;
1444 struct unix_sock *u;
1445 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1446 int err = 0;
1447
1448 if (peer) {
1449 sk = unix_peer_get(sk);
1450
1451 err = -ENOTCONN;
1452 if (!sk)
1453 goto out;
1454 err = 0;
1455 } else {
1456 sock_hold(sk);
1457 }
1458
1459 u = unix_sk(sk);
1460 unix_state_lock(sk);
1461 if (!u->addr) {
1462 sunaddr->sun_family = AF_UNIX;
1463 sunaddr->sun_path[0] = 0;
1464 *uaddr_len = sizeof(short);
1465 } else {
1466 struct unix_address *addr = u->addr;
1467
1468 *uaddr_len = addr->len;
1469 memcpy(sunaddr, addr->name, *uaddr_len);
1470 }
1471 unix_state_unlock(sk);
1472 sock_put(sk);
1473 out:
1474 return err;
1475 }
1476
1477 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1478 {
1479 int i;
1480
1481 scm->fp = UNIXCB(skb).fp;
1482 UNIXCB(skb).fp = NULL;
1483
1484 for (i = scm->fp->count-1; i >= 0; i--)
1485 unix_notinflight(scm->fp->fp[i]);
1486 }
1487
1488 static void unix_destruct_scm(struct sk_buff *skb)
1489 {
1490 struct scm_cookie scm;
1491 memset(&scm, 0, sizeof(scm));
1492 scm.pid = UNIXCB(skb).pid;
1493 if (UNIXCB(skb).fp)
1494 unix_detach_fds(&scm, skb);
1495
1496 /* Alas, it calls VFS */
1497 /* So fscking what? fput() had been SMP-safe since the last Summer */
1498 scm_destroy(&scm);
1499 sock_wfree(skb);
1500 }
1501
1502 #define MAX_RECURSION_LEVEL 4
1503
1504 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1505 {
1506 int i;
1507 unsigned char max_level = 0;
1508 int unix_sock_count = 0;
1509
1510 for (i = scm->fp->count - 1; i >= 0; i--) {
1511 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1512
1513 if (sk) {
1514 unix_sock_count++;
1515 max_level = max(max_level,
1516 unix_sk(sk)->recursion_level);
1517 }
1518 }
1519 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1520 return -ETOOMANYREFS;
1521
1522 /*
1523 * Need to duplicate file references for the sake of garbage
1524 * collection. Otherwise a socket in the fps might become a
1525 * candidate for GC while the skb is not yet queued.
1526 */
1527 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1528 if (!UNIXCB(skb).fp)
1529 return -ENOMEM;
1530
1531 if (unix_sock_count) {
1532 for (i = scm->fp->count - 1; i >= 0; i--)
1533 unix_inflight(scm->fp->fp[i]);
1534 }
1535 return max_level;
1536 }
1537
1538 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1539 {
1540 int err = 0;
1541
1542 UNIXCB(skb).pid = get_pid(scm->pid);
1543 UNIXCB(skb).uid = scm->creds.uid;
1544 UNIXCB(skb).gid = scm->creds.gid;
1545 UNIXCB(skb).fp = NULL;
1546 unix_get_secdata(scm, skb);
1547 if (scm->fp && send_fds)
1548 err = unix_attach_fds(scm, skb);
1549
1550 skb->destructor = unix_destruct_scm;
1551 return err;
1552 }
1553
1554 /*
1555 * Some apps rely on write() giving SCM_CREDENTIALS
1556 * We include credentials if source or destination socket
1557 * asserted SOCK_PASSCRED.
1558 */
1559 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1560 const struct sock *other)
1561 {
1562 if (UNIXCB(skb).pid)
1563 return;
1564 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1565 !other->sk_socket ||
1566 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1567 UNIXCB(skb).pid = get_pid(task_tgid(current));
1568 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1569 }
1570 }
1571
1572 /*
1573 * Send AF_UNIX data.
1574 */
1575
1576 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1577 size_t len)
1578 {
1579 struct sock *sk = sock->sk;
1580 struct net *net = sock_net(sk);
1581 struct unix_sock *u = unix_sk(sk);
1582 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1583 struct sock *other = NULL;
1584 int namelen = 0; /* fake GCC */
1585 int err;
1586 unsigned int hash;
1587 struct sk_buff *skb;
1588 long timeo;
1589 struct scm_cookie scm;
1590 int max_level;
1591 int data_len = 0;
1592 int sk_locked;
1593
1594 wait_for_unix_gc();
1595 err = scm_send(sock, msg, &scm, false);
1596 if (err < 0)
1597 return err;
1598
1599 err = -EOPNOTSUPP;
1600 if (msg->msg_flags&MSG_OOB)
1601 goto out;
1602
1603 if (msg->msg_namelen) {
1604 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1605 if (err < 0)
1606 goto out;
1607 namelen = err;
1608 } else {
1609 sunaddr = NULL;
1610 err = -ENOTCONN;
1611 other = unix_peer_get(sk);
1612 if (!other)
1613 goto out;
1614 }
1615
1616 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1617 && (err = unix_autobind(sock)) != 0)
1618 goto out;
1619
1620 err = -EMSGSIZE;
1621 if (len > sk->sk_sndbuf - 32)
1622 goto out;
1623
1624 if (len > SKB_MAX_ALLOC) {
1625 data_len = min_t(size_t,
1626 len - SKB_MAX_ALLOC,
1627 MAX_SKB_FRAGS * PAGE_SIZE);
1628 data_len = PAGE_ALIGN(data_len);
1629
1630 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1631 }
1632
1633 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1634 msg->msg_flags & MSG_DONTWAIT, &err,
1635 PAGE_ALLOC_COSTLY_ORDER);
1636 if (skb == NULL)
1637 goto out;
1638
1639 err = unix_scm_to_skb(&scm, skb, true);
1640 if (err < 0)
1641 goto out_free;
1642 max_level = err + 1;
1643
1644 skb_put(skb, len - data_len);
1645 skb->data_len = data_len;
1646 skb->len = len;
1647 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1648 if (err)
1649 goto out_free;
1650
1651 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1652
1653 restart:
1654 if (!other) {
1655 err = -ECONNRESET;
1656 if (sunaddr == NULL)
1657 goto out_free;
1658
1659 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1660 hash, &err);
1661 if (other == NULL)
1662 goto out_free;
1663 }
1664
1665 if (sk_filter(other, skb) < 0) {
1666 /* Toss the packet but do not return any error to the sender */
1667 err = len;
1668 goto out_free;
1669 }
1670
1671 sk_locked = 0;
1672 unix_state_lock(other);
1673 restart_locked:
1674 err = -EPERM;
1675 if (!unix_may_send(sk, other))
1676 goto out_unlock;
1677
1678 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1679 /*
1680 * Check with 1003.1g - what should
1681 * datagram error
1682 */
1683 unix_state_unlock(other);
1684 sock_put(other);
1685
1686 if (!sk_locked)
1687 unix_state_lock(sk);
1688
1689 err = 0;
1690 if (unix_peer(sk) == other) {
1691 unix_peer(sk) = NULL;
1692 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1693
1694 unix_state_unlock(sk);
1695
1696 unix_dgram_disconnected(sk, other);
1697 sock_put(other);
1698 err = -ECONNREFUSED;
1699 } else {
1700 unix_state_unlock(sk);
1701 }
1702
1703 other = NULL;
1704 if (err)
1705 goto out_free;
1706 goto restart;
1707 }
1708
1709 err = -EPIPE;
1710 if (other->sk_shutdown & RCV_SHUTDOWN)
1711 goto out_unlock;
1712
1713 if (sk->sk_type != SOCK_SEQPACKET) {
1714 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1715 if (err)
1716 goto out_unlock;
1717 }
1718
1719 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1720 if (timeo) {
1721 timeo = unix_wait_for_peer(other, timeo);
1722
1723 err = sock_intr_errno(timeo);
1724 if (signal_pending(current))
1725 goto out_free;
1726
1727 goto restart;
1728 }
1729
1730 if (!sk_locked) {
1731 unix_state_unlock(other);
1732 unix_state_double_lock(sk, other);
1733 }
1734
1735 if (unix_peer(sk) != other ||
1736 unix_dgram_peer_wake_me(sk, other)) {
1737 err = -EAGAIN;
1738 sk_locked = 1;
1739 goto out_unlock;
1740 }
1741
1742 if (!sk_locked) {
1743 sk_locked = 1;
1744 goto restart_locked;
1745 }
1746 }
1747
1748 if (unlikely(sk_locked))
1749 unix_state_unlock(sk);
1750
1751 if (sock_flag(other, SOCK_RCVTSTAMP))
1752 __net_timestamp(skb);
1753 maybe_add_creds(skb, sock, other);
1754 skb_queue_tail(&other->sk_receive_queue, skb);
1755 if (max_level > unix_sk(other)->recursion_level)
1756 unix_sk(other)->recursion_level = max_level;
1757 unix_state_unlock(other);
1758 other->sk_data_ready(other);
1759 sock_put(other);
1760 scm_destroy(&scm);
1761 return len;
1762
1763 out_unlock:
1764 if (sk_locked)
1765 unix_state_unlock(sk);
1766 unix_state_unlock(other);
1767 out_free:
1768 kfree_skb(skb);
1769 out:
1770 if (other)
1771 sock_put(other);
1772 scm_destroy(&scm);
1773 return err;
1774 }
1775
1776 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1777 * bytes, and a minimun of a full page.
1778 */
1779 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1780
1781 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1782 size_t len)
1783 {
1784 struct sock *sk = sock->sk;
1785 struct sock *other = NULL;
1786 int err, size;
1787 struct sk_buff *skb;
1788 int sent = 0;
1789 struct scm_cookie scm;
1790 bool fds_sent = false;
1791 int max_level;
1792 int data_len;
1793
1794 wait_for_unix_gc();
1795 err = scm_send(sock, msg, &scm, false);
1796 if (err < 0)
1797 return err;
1798
1799 err = -EOPNOTSUPP;
1800 if (msg->msg_flags&MSG_OOB)
1801 goto out_err;
1802
1803 if (msg->msg_namelen) {
1804 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1805 goto out_err;
1806 } else {
1807 err = -ENOTCONN;
1808 other = unix_peer(sk);
1809 if (!other)
1810 goto out_err;
1811 }
1812
1813 if (sk->sk_shutdown & SEND_SHUTDOWN)
1814 goto pipe_err;
1815
1816 while (sent < len) {
1817 size = len - sent;
1818
1819 /* Keep two messages in the pipe so it schedules better */
1820 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1821
1822 /* allow fallback to order-0 allocations */
1823 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1824
1825 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1826
1827 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1828
1829 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1830 msg->msg_flags & MSG_DONTWAIT, &err,
1831 get_order(UNIX_SKB_FRAGS_SZ));
1832 if (!skb)
1833 goto out_err;
1834
1835 /* Only send the fds in the first buffer */
1836 err = unix_scm_to_skb(&scm, skb, !fds_sent);
1837 if (err < 0) {
1838 kfree_skb(skb);
1839 goto out_err;
1840 }
1841 max_level = err + 1;
1842 fds_sent = true;
1843
1844 skb_put(skb, size - data_len);
1845 skb->data_len = data_len;
1846 skb->len = size;
1847 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1848 if (err) {
1849 kfree_skb(skb);
1850 goto out_err;
1851 }
1852
1853 unix_state_lock(other);
1854
1855 if (sock_flag(other, SOCK_DEAD) ||
1856 (other->sk_shutdown & RCV_SHUTDOWN))
1857 goto pipe_err_free;
1858
1859 maybe_add_creds(skb, sock, other);
1860 skb_queue_tail(&other->sk_receive_queue, skb);
1861 if (max_level > unix_sk(other)->recursion_level)
1862 unix_sk(other)->recursion_level = max_level;
1863 unix_state_unlock(other);
1864 other->sk_data_ready(other);
1865 sent += size;
1866 }
1867
1868 scm_destroy(&scm);
1869
1870 return sent;
1871
1872 pipe_err_free:
1873 unix_state_unlock(other);
1874 kfree_skb(skb);
1875 pipe_err:
1876 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1877 send_sig(SIGPIPE, current, 0);
1878 err = -EPIPE;
1879 out_err:
1880 scm_destroy(&scm);
1881 return sent ? : err;
1882 }
1883
1884 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1885 int offset, size_t size, int flags)
1886 {
1887 int err = 0;
1888 bool send_sigpipe = true;
1889 struct sock *other, *sk = socket->sk;
1890 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1891
1892 if (flags & MSG_OOB)
1893 return -EOPNOTSUPP;
1894
1895 other = unix_peer(sk);
1896 if (!other || sk->sk_state != TCP_ESTABLISHED)
1897 return -ENOTCONN;
1898
1899 if (false) {
1900 alloc_skb:
1901 unix_state_unlock(other);
1902 mutex_unlock(&unix_sk(other)->readlock);
1903 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1904 &err, 0);
1905 if (!newskb)
1906 return err;
1907 }
1908
1909 /* we must acquire readlock as we modify already present
1910 * skbs in the sk_receive_queue and mess with skb->len
1911 */
1912 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1913 if (err) {
1914 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1915 send_sigpipe = false;
1916 goto err;
1917 }
1918
1919 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1920 err = -EPIPE;
1921 goto err_unlock;
1922 }
1923
1924 unix_state_lock(other);
1925
1926 if (sock_flag(other, SOCK_DEAD) ||
1927 other->sk_shutdown & RCV_SHUTDOWN) {
1928 err = -EPIPE;
1929 goto err_state_unlock;
1930 }
1931
1932 skb = skb_peek_tail(&other->sk_receive_queue);
1933 if (tail && tail == skb) {
1934 skb = newskb;
1935 } else if (!skb) {
1936 if (newskb)
1937 skb = newskb;
1938 else
1939 goto alloc_skb;
1940 } else if (newskb) {
1941 /* this is fast path, we don't necessarily need to
1942 * call to kfree_skb even though with newskb == NULL
1943 * this - does no harm
1944 */
1945 consume_skb(newskb);
1946 newskb = NULL;
1947 }
1948
1949 if (skb_append_pagefrags(skb, page, offset, size)) {
1950 tail = skb;
1951 goto alloc_skb;
1952 }
1953
1954 skb->len += size;
1955 skb->data_len += size;
1956 skb->truesize += size;
1957 atomic_add(size, &sk->sk_wmem_alloc);
1958
1959 if (newskb) {
1960 spin_lock(&other->sk_receive_queue.lock);
1961 __skb_queue_tail(&other->sk_receive_queue, newskb);
1962 spin_unlock(&other->sk_receive_queue.lock);
1963 }
1964
1965 unix_state_unlock(other);
1966 mutex_unlock(&unix_sk(other)->readlock);
1967
1968 other->sk_data_ready(other);
1969
1970 return size;
1971
1972 err_state_unlock:
1973 unix_state_unlock(other);
1974 err_unlock:
1975 mutex_unlock(&unix_sk(other)->readlock);
1976 err:
1977 kfree_skb(newskb);
1978 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1979 send_sig(SIGPIPE, current, 0);
1980 return err;
1981 }
1982
1983 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1984 size_t len)
1985 {
1986 int err;
1987 struct sock *sk = sock->sk;
1988
1989 err = sock_error(sk);
1990 if (err)
1991 return err;
1992
1993 if (sk->sk_state != TCP_ESTABLISHED)
1994 return -ENOTCONN;
1995
1996 if (msg->msg_namelen)
1997 msg->msg_namelen = 0;
1998
1999 return unix_dgram_sendmsg(sock, msg, len);
2000 }
2001
2002 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2003 size_t size, int flags)
2004 {
2005 struct sock *sk = sock->sk;
2006
2007 if (sk->sk_state != TCP_ESTABLISHED)
2008 return -ENOTCONN;
2009
2010 return unix_dgram_recvmsg(sock, msg, size, flags);
2011 }
2012
2013 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2014 {
2015 struct unix_sock *u = unix_sk(sk);
2016
2017 if (u->addr) {
2018 msg->msg_namelen = u->addr->len;
2019 memcpy(msg->msg_name, u->addr->name, u->addr->len);
2020 }
2021 }
2022
2023 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
2024 size_t size, int flags)
2025 {
2026 struct scm_cookie scm;
2027 struct sock *sk = sock->sk;
2028 struct unix_sock *u = unix_sk(sk);
2029 int noblock = flags & MSG_DONTWAIT;
2030 struct sk_buff *skb;
2031 int err;
2032 int peeked, skip;
2033
2034 err = -EOPNOTSUPP;
2035 if (flags&MSG_OOB)
2036 goto out;
2037
2038 err = mutex_lock_interruptible(&u->readlock);
2039 if (unlikely(err)) {
2040 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2041 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2042 */
2043 err = noblock ? -EAGAIN : -ERESTARTSYS;
2044 goto out;
2045 }
2046
2047 skip = sk_peek_offset(sk, flags);
2048
2049 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
2050 if (!skb) {
2051 unix_state_lock(sk);
2052 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2053 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2054 (sk->sk_shutdown & RCV_SHUTDOWN))
2055 err = 0;
2056 unix_state_unlock(sk);
2057 goto out_unlock;
2058 }
2059
2060 wake_up_interruptible_sync_poll(&u->peer_wait,
2061 POLLOUT | POLLWRNORM | POLLWRBAND);
2062
2063 if (msg->msg_name)
2064 unix_copy_addr(msg, skb->sk);
2065
2066 if (size > skb->len - skip)
2067 size = skb->len - skip;
2068 else if (size < skb->len - skip)
2069 msg->msg_flags |= MSG_TRUNC;
2070
2071 err = skb_copy_datagram_msg(skb, skip, msg, size);
2072 if (err)
2073 goto out_free;
2074
2075 if (sock_flag(sk, SOCK_RCVTSTAMP))
2076 __sock_recv_timestamp(msg, sk, skb);
2077
2078 memset(&scm, 0, sizeof(scm));
2079
2080 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2081 unix_set_secdata(&scm, skb);
2082
2083 if (!(flags & MSG_PEEK)) {
2084 if (UNIXCB(skb).fp)
2085 unix_detach_fds(&scm, skb);
2086
2087 sk_peek_offset_bwd(sk, skb->len);
2088 } else {
2089 /* It is questionable: on PEEK we could:
2090 - do not return fds - good, but too simple 8)
2091 - return fds, and do not return them on read (old strategy,
2092 apparently wrong)
2093 - clone fds (I chose it for now, it is the most universal
2094 solution)
2095
2096 POSIX 1003.1g does not actually define this clearly
2097 at all. POSIX 1003.1g doesn't define a lot of things
2098 clearly however!
2099
2100 */
2101
2102 sk_peek_offset_fwd(sk, size);
2103
2104 if (UNIXCB(skb).fp)
2105 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2106 }
2107 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2108
2109 scm_recv(sock, msg, &scm, flags);
2110
2111 out_free:
2112 skb_free_datagram(sk, skb);
2113 out_unlock:
2114 mutex_unlock(&u->readlock);
2115 out:
2116 return err;
2117 }
2118
2119 /*
2120 * Sleep until more data has arrived. But check for races..
2121 */
2122 static long unix_stream_data_wait(struct sock *sk, long timeo,
2123 struct sk_buff *last, unsigned int last_len)
2124 {
2125 struct sk_buff *tail;
2126 DEFINE_WAIT(wait);
2127
2128 unix_state_lock(sk);
2129
2130 for (;;) {
2131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2132
2133 tail = skb_peek_tail(&sk->sk_receive_queue);
2134 if (tail != last ||
2135 (tail && tail->len != last_len) ||
2136 sk->sk_err ||
2137 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2138 signal_pending(current) ||
2139 !timeo)
2140 break;
2141
2142 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2143 unix_state_unlock(sk);
2144 timeo = freezable_schedule_timeout(timeo);
2145 unix_state_lock(sk);
2146
2147 if (sock_flag(sk, SOCK_DEAD))
2148 break;
2149
2150 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2151 }
2152
2153 finish_wait(sk_sleep(sk), &wait);
2154 unix_state_unlock(sk);
2155 return timeo;
2156 }
2157
2158 static unsigned int unix_skb_len(const struct sk_buff *skb)
2159 {
2160 return skb->len - UNIXCB(skb).consumed;
2161 }
2162
2163 struct unix_stream_read_state {
2164 int (*recv_actor)(struct sk_buff *, int, int,
2165 struct unix_stream_read_state *);
2166 struct socket *socket;
2167 struct msghdr *msg;
2168 struct pipe_inode_info *pipe;
2169 size_t size;
2170 int flags;
2171 unsigned int splice_flags;
2172 };
2173
2174 static int unix_stream_read_generic(struct unix_stream_read_state *state)
2175 {
2176 struct scm_cookie scm;
2177 struct socket *sock = state->socket;
2178 struct sock *sk = sock->sk;
2179 struct unix_sock *u = unix_sk(sk);
2180 int copied = 0;
2181 int flags = state->flags;
2182 int noblock = flags & MSG_DONTWAIT;
2183 bool check_creds = false;
2184 int target;
2185 int err = 0;
2186 long timeo;
2187 int skip;
2188 size_t size = state->size;
2189 unsigned int last_len;
2190
2191 err = -EINVAL;
2192 if (sk->sk_state != TCP_ESTABLISHED)
2193 goto out;
2194
2195 err = -EOPNOTSUPP;
2196 if (flags & MSG_OOB)
2197 goto out;
2198
2199 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2200 timeo = sock_rcvtimeo(sk, noblock);
2201
2202 memset(&scm, 0, sizeof(scm));
2203
2204 /* Lock the socket to prevent queue disordering
2205 * while sleeps in memcpy_tomsg
2206 */
2207 err = mutex_lock_interruptible(&u->readlock);
2208 if (unlikely(err)) {
2209 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2210 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2211 */
2212 err = noblock ? -EAGAIN : -ERESTARTSYS;
2213 goto out;
2214 }
2215
2216 if (flags & MSG_PEEK)
2217 skip = sk_peek_offset(sk, flags);
2218 else
2219 skip = 0;
2220
2221 do {
2222 int chunk;
2223 bool drop_skb;
2224 struct sk_buff *skb, *last;
2225
2226 unix_state_lock(sk);
2227 if (sock_flag(sk, SOCK_DEAD)) {
2228 err = -ECONNRESET;
2229 goto unlock;
2230 }
2231 last = skb = skb_peek(&sk->sk_receive_queue);
2232 last_len = last ? last->len : 0;
2233 again:
2234 if (skb == NULL) {
2235 unix_sk(sk)->recursion_level = 0;
2236 if (copied >= target)
2237 goto unlock;
2238
2239 /*
2240 * POSIX 1003.1g mandates this order.
2241 */
2242
2243 err = sock_error(sk);
2244 if (err)
2245 goto unlock;
2246 if (sk->sk_shutdown & RCV_SHUTDOWN)
2247 goto unlock;
2248
2249 unix_state_unlock(sk);
2250 err = -EAGAIN;
2251 if (!timeo)
2252 break;
2253 mutex_unlock(&u->readlock);
2254
2255 timeo = unix_stream_data_wait(sk, timeo, last,
2256 last_len);
2257
2258 if (signal_pending(current) ||
2259 mutex_lock_interruptible(&u->readlock)) {
2260 err = sock_intr_errno(timeo);
2261 goto out;
2262 }
2263
2264 continue;
2265 unlock:
2266 unix_state_unlock(sk);
2267 break;
2268 }
2269
2270 while (skip >= unix_skb_len(skb)) {
2271 skip -= unix_skb_len(skb);
2272 last = skb;
2273 last_len = skb->len;
2274 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2275 if (!skb)
2276 goto again;
2277 }
2278
2279 unix_state_unlock(sk);
2280
2281 if (check_creds) {
2282 /* Never glue messages from different writers */
2283 if ((UNIXCB(skb).pid != scm.pid) ||
2284 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2285 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2286 !unix_secdata_eq(&scm, skb))
2287 break;
2288 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2289 /* Copy credentials */
2290 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2291 unix_set_secdata(&scm, skb);
2292 check_creds = true;
2293 }
2294
2295 /* Copy address just once */
2296 if (state->msg && state->msg->msg_name) {
2297 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2298 state->msg->msg_name);
2299 unix_copy_addr(state->msg, skb->sk);
2300 sunaddr = NULL;
2301 }
2302
2303 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2304 skb_get(skb);
2305 chunk = state->recv_actor(skb, skip, chunk, state);
2306 drop_skb = !unix_skb_len(skb);
2307 /* skb is only safe to use if !drop_skb */
2308 consume_skb(skb);
2309 if (chunk < 0) {
2310 if (copied == 0)
2311 copied = -EFAULT;
2312 break;
2313 }
2314 copied += chunk;
2315 size -= chunk;
2316
2317 if (drop_skb) {
2318 /* the skb was touched by a concurrent reader;
2319 * we should not expect anything from this skb
2320 * anymore and assume it invalid - we can be
2321 * sure it was dropped from the socket queue
2322 *
2323 * let's report a short read
2324 */
2325 err = 0;
2326 break;
2327 }
2328
2329 /* Mark read part of skb as used */
2330 if (!(flags & MSG_PEEK)) {
2331 UNIXCB(skb).consumed += chunk;
2332
2333 sk_peek_offset_bwd(sk, chunk);
2334
2335 if (UNIXCB(skb).fp)
2336 unix_detach_fds(&scm, skb);
2337
2338 if (unix_skb_len(skb))
2339 break;
2340
2341 skb_unlink(skb, &sk->sk_receive_queue);
2342 consume_skb(skb);
2343
2344 if (scm.fp)
2345 break;
2346 } else {
2347 /* It is questionable, see note in unix_dgram_recvmsg.
2348 */
2349 if (UNIXCB(skb).fp)
2350 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2351
2352 sk_peek_offset_fwd(sk, chunk);
2353
2354 if (UNIXCB(skb).fp)
2355 break;
2356
2357 skip = 0;
2358 last = skb;
2359 last_len = skb->len;
2360 unix_state_lock(sk);
2361 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2362 if (skb)
2363 goto again;
2364 unix_state_unlock(sk);
2365 break;
2366 }
2367 } while (size);
2368
2369 mutex_unlock(&u->readlock);
2370 if (state->msg)
2371 scm_recv(sock, state->msg, &scm, flags);
2372 else
2373 scm_destroy(&scm);
2374 out:
2375 return copied ? : err;
2376 }
2377
2378 static int unix_stream_read_actor(struct sk_buff *skb,
2379 int skip, int chunk,
2380 struct unix_stream_read_state *state)
2381 {
2382 int ret;
2383
2384 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2385 state->msg, chunk);
2386 return ret ?: chunk;
2387 }
2388
2389 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2390 size_t size, int flags)
2391 {
2392 struct unix_stream_read_state state = {
2393 .recv_actor = unix_stream_read_actor,
2394 .socket = sock,
2395 .msg = msg,
2396 .size = size,
2397 .flags = flags
2398 };
2399
2400 return unix_stream_read_generic(&state);
2401 }
2402
2403 static ssize_t skb_unix_socket_splice(struct sock *sk,
2404 struct pipe_inode_info *pipe,
2405 struct splice_pipe_desc *spd)
2406 {
2407 int ret;
2408 struct unix_sock *u = unix_sk(sk);
2409
2410 mutex_unlock(&u->readlock);
2411 ret = splice_to_pipe(pipe, spd);
2412 mutex_lock(&u->readlock);
2413
2414 return ret;
2415 }
2416
2417 static int unix_stream_splice_actor(struct sk_buff *skb,
2418 int skip, int chunk,
2419 struct unix_stream_read_state *state)
2420 {
2421 return skb_splice_bits(skb, state->socket->sk,
2422 UNIXCB(skb).consumed + skip,
2423 state->pipe, chunk, state->splice_flags,
2424 skb_unix_socket_splice);
2425 }
2426
2427 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2428 struct pipe_inode_info *pipe,
2429 size_t size, unsigned int flags)
2430 {
2431 struct unix_stream_read_state state = {
2432 .recv_actor = unix_stream_splice_actor,
2433 .socket = sock,
2434 .pipe = pipe,
2435 .size = size,
2436 .splice_flags = flags,
2437 };
2438
2439 if (unlikely(*ppos))
2440 return -ESPIPE;
2441
2442 if (sock->file->f_flags & O_NONBLOCK ||
2443 flags & SPLICE_F_NONBLOCK)
2444 state.flags = MSG_DONTWAIT;
2445
2446 return unix_stream_read_generic(&state);
2447 }
2448
2449 static int unix_shutdown(struct socket *sock, int mode)
2450 {
2451 struct sock *sk = sock->sk;
2452 struct sock *other;
2453
2454 if (mode < SHUT_RD || mode > SHUT_RDWR)
2455 return -EINVAL;
2456 /* This maps:
2457 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2458 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2459 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2460 */
2461 ++mode;
2462
2463 unix_state_lock(sk);
2464 sk->sk_shutdown |= mode;
2465 other = unix_peer(sk);
2466 if (other)
2467 sock_hold(other);
2468 unix_state_unlock(sk);
2469 sk->sk_state_change(sk);
2470
2471 if (other &&
2472 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2473
2474 int peer_mode = 0;
2475
2476 if (mode&RCV_SHUTDOWN)
2477 peer_mode |= SEND_SHUTDOWN;
2478 if (mode&SEND_SHUTDOWN)
2479 peer_mode |= RCV_SHUTDOWN;
2480 unix_state_lock(other);
2481 other->sk_shutdown |= peer_mode;
2482 unix_state_unlock(other);
2483 other->sk_state_change(other);
2484 if (peer_mode == SHUTDOWN_MASK)
2485 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2486 else if (peer_mode & RCV_SHUTDOWN)
2487 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2488 }
2489 if (other)
2490 sock_put(other);
2491
2492 return 0;
2493 }
2494
2495 long unix_inq_len(struct sock *sk)
2496 {
2497 struct sk_buff *skb;
2498 long amount = 0;
2499
2500 if (sk->sk_state == TCP_LISTEN)
2501 return -EINVAL;
2502
2503 spin_lock(&sk->sk_receive_queue.lock);
2504 if (sk->sk_type == SOCK_STREAM ||
2505 sk->sk_type == SOCK_SEQPACKET) {
2506 skb_queue_walk(&sk->sk_receive_queue, skb)
2507 amount += unix_skb_len(skb);
2508 } else {
2509 skb = skb_peek(&sk->sk_receive_queue);
2510 if (skb)
2511 amount = skb->len;
2512 }
2513 spin_unlock(&sk->sk_receive_queue.lock);
2514
2515 return amount;
2516 }
2517 EXPORT_SYMBOL_GPL(unix_inq_len);
2518
2519 long unix_outq_len(struct sock *sk)
2520 {
2521 return sk_wmem_alloc_get(sk);
2522 }
2523 EXPORT_SYMBOL_GPL(unix_outq_len);
2524
2525 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2526 {
2527 struct sock *sk = sock->sk;
2528 long amount = 0;
2529 int err;
2530
2531 switch (cmd) {
2532 case SIOCOUTQ:
2533 amount = unix_outq_len(sk);
2534 err = put_user(amount, (int __user *)arg);
2535 break;
2536 case SIOCINQ:
2537 amount = unix_inq_len(sk);
2538 if (amount < 0)
2539 err = amount;
2540 else
2541 err = put_user(amount, (int __user *)arg);
2542 break;
2543 default:
2544 err = -ENOIOCTLCMD;
2545 break;
2546 }
2547 return err;
2548 }
2549
2550 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2551 {
2552 struct sock *sk = sock->sk;
2553 unsigned int mask;
2554
2555 sock_poll_wait(file, sk_sleep(sk), wait);
2556 mask = 0;
2557
2558 /* exceptional events? */
2559 if (sk->sk_err)
2560 mask |= POLLERR;
2561 if (sk->sk_shutdown == SHUTDOWN_MASK)
2562 mask |= POLLHUP;
2563 if (sk->sk_shutdown & RCV_SHUTDOWN)
2564 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2565
2566 /* readable? */
2567 if (!skb_queue_empty(&sk->sk_receive_queue))
2568 mask |= POLLIN | POLLRDNORM;
2569
2570 /* Connection-based need to check for termination and startup */
2571 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2572 sk->sk_state == TCP_CLOSE)
2573 mask |= POLLHUP;
2574
2575 /*
2576 * we set writable also when the other side has shut down the
2577 * connection. This prevents stuck sockets.
2578 */
2579 if (unix_writable(sk))
2580 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2581
2582 return mask;
2583 }
2584
2585 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2586 poll_table *wait)
2587 {
2588 struct sock *sk = sock->sk, *other;
2589 unsigned int mask, writable;
2590
2591 sock_poll_wait(file, sk_sleep(sk), wait);
2592 mask = 0;
2593
2594 /* exceptional events? */
2595 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2596 mask |= POLLERR |
2597 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2598
2599 if (sk->sk_shutdown & RCV_SHUTDOWN)
2600 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2601 if (sk->sk_shutdown == SHUTDOWN_MASK)
2602 mask |= POLLHUP;
2603
2604 /* readable? */
2605 if (!skb_queue_empty(&sk->sk_receive_queue))
2606 mask |= POLLIN | POLLRDNORM;
2607
2608 /* Connection-based need to check for termination and startup */
2609 if (sk->sk_type == SOCK_SEQPACKET) {
2610 if (sk->sk_state == TCP_CLOSE)
2611 mask |= POLLHUP;
2612 /* connection hasn't started yet? */
2613 if (sk->sk_state == TCP_SYN_SENT)
2614 return mask;
2615 }
2616
2617 /* No write status requested, avoid expensive OUT tests. */
2618 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2619 return mask;
2620
2621 writable = unix_writable(sk);
2622 if (writable) {
2623 unix_state_lock(sk);
2624
2625 other = unix_peer(sk);
2626 if (other && unix_peer(other) != sk &&
2627 unix_recvq_full(other) &&
2628 unix_dgram_peer_wake_me(sk, other))
2629 writable = 0;
2630
2631 unix_state_unlock(sk);
2632 }
2633
2634 if (writable)
2635 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2636 else
2637 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2638
2639 return mask;
2640 }
2641
2642 #ifdef CONFIG_PROC_FS
2643
2644 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2645
2646 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2647 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2648 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2649
2650 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2651 {
2652 unsigned long offset = get_offset(*pos);
2653 unsigned long bucket = get_bucket(*pos);
2654 struct sock *sk;
2655 unsigned long count = 0;
2656
2657 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2658 if (sock_net(sk) != seq_file_net(seq))
2659 continue;
2660 if (++count == offset)
2661 break;
2662 }
2663
2664 return sk;
2665 }
2666
2667 static struct sock *unix_next_socket(struct seq_file *seq,
2668 struct sock *sk,
2669 loff_t *pos)
2670 {
2671 unsigned long bucket;
2672
2673 while (sk > (struct sock *)SEQ_START_TOKEN) {
2674 sk = sk_next(sk);
2675 if (!sk)
2676 goto next_bucket;
2677 if (sock_net(sk) == seq_file_net(seq))
2678 return sk;
2679 }
2680
2681 do {
2682 sk = unix_from_bucket(seq, pos);
2683 if (sk)
2684 return sk;
2685
2686 next_bucket:
2687 bucket = get_bucket(*pos) + 1;
2688 *pos = set_bucket_offset(bucket, 1);
2689 } while (bucket < ARRAY_SIZE(unix_socket_table));
2690
2691 return NULL;
2692 }
2693
2694 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2695 __acquires(unix_table_lock)
2696 {
2697 spin_lock(&unix_table_lock);
2698
2699 if (!*pos)
2700 return SEQ_START_TOKEN;
2701
2702 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2703 return NULL;
2704
2705 return unix_next_socket(seq, NULL, pos);
2706 }
2707
2708 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2709 {
2710 ++*pos;
2711 return unix_next_socket(seq, v, pos);
2712 }
2713
2714 static void unix_seq_stop(struct seq_file *seq, void *v)
2715 __releases(unix_table_lock)
2716 {
2717 spin_unlock(&unix_table_lock);
2718 }
2719
2720 static int unix_seq_show(struct seq_file *seq, void *v)
2721 {
2722
2723 if (v == SEQ_START_TOKEN)
2724 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2725 "Inode Path\n");
2726 else {
2727 struct sock *s = v;
2728 struct unix_sock *u = unix_sk(s);
2729 unix_state_lock(s);
2730
2731 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2732 s,
2733 atomic_read(&s->sk_refcnt),
2734 0,
2735 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2736 s->sk_type,
2737 s->sk_socket ?
2738 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2739 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2740 sock_i_ino(s));
2741
2742 if (u->addr) {
2743 int i, len;
2744 seq_putc(seq, ' ');
2745
2746 i = 0;
2747 len = u->addr->len - sizeof(short);
2748 if (!UNIX_ABSTRACT(s))
2749 len--;
2750 else {
2751 seq_putc(seq, '@');
2752 i++;
2753 }
2754 for ( ; i < len; i++)
2755 seq_putc(seq, u->addr->name->sun_path[i]);
2756 }
2757 unix_state_unlock(s);
2758 seq_putc(seq, '\n');
2759 }
2760
2761 return 0;
2762 }
2763
2764 static const struct seq_operations unix_seq_ops = {
2765 .start = unix_seq_start,
2766 .next = unix_seq_next,
2767 .stop = unix_seq_stop,
2768 .show = unix_seq_show,
2769 };
2770
2771 static int unix_seq_open(struct inode *inode, struct file *file)
2772 {
2773 return seq_open_net(inode, file, &unix_seq_ops,
2774 sizeof(struct seq_net_private));
2775 }
2776
2777 static const struct file_operations unix_seq_fops = {
2778 .owner = THIS_MODULE,
2779 .open = unix_seq_open,
2780 .read = seq_read,
2781 .llseek = seq_lseek,
2782 .release = seq_release_net,
2783 };
2784
2785 #endif
2786
2787 static const struct net_proto_family unix_family_ops = {
2788 .family = PF_UNIX,
2789 .create = unix_create,
2790 .owner = THIS_MODULE,
2791 };
2792
2793
2794 static int __net_init unix_net_init(struct net *net)
2795 {
2796 int error = -ENOMEM;
2797
2798 net->unx.sysctl_max_dgram_qlen = 10;
2799 if (unix_sysctl_register(net))
2800 goto out;
2801
2802 #ifdef CONFIG_PROC_FS
2803 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2804 unix_sysctl_unregister(net);
2805 goto out;
2806 }
2807 #endif
2808 error = 0;
2809 out:
2810 return error;
2811 }
2812
2813 static void __net_exit unix_net_exit(struct net *net)
2814 {
2815 unix_sysctl_unregister(net);
2816 remove_proc_entry("unix", net->proc_net);
2817 }
2818
2819 static struct pernet_operations unix_net_ops = {
2820 .init = unix_net_init,
2821 .exit = unix_net_exit,
2822 };
2823
2824 static int __init af_unix_init(void)
2825 {
2826 int rc = -1;
2827
2828 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2829
2830 rc = proto_register(&unix_proto, 1);
2831 if (rc != 0) {
2832 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2833 goto out;
2834 }
2835
2836 sock_register(&unix_family_ops);
2837 register_pernet_subsys(&unix_net_ops);
2838 out:
2839 return rc;
2840 }
2841
2842 static void __exit af_unix_exit(void)
2843 {
2844 sock_unregister(PF_UNIX);
2845 proto_unregister(&unix_proto);
2846 unregister_pernet_subsys(&unix_net_ops);
2847 }
2848
2849 /* Earlier than device_initcall() so that other drivers invoking
2850 request_module() don't end up in a loop when modprobe tries
2851 to use a UNIX socket. But later than subsys_initcall() because
2852 we depend on stuff initialised there */
2853 fs_initcall(af_unix_init);
2854 module_exit(af_unix_exit);
2855
2856 MODULE_LICENSE("GPL");
2857 MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 0.467558 seconds and 6 git commands to generate.