af_unix: don't append consumed skbs to sk_receive_queue
[deliverable/linux.git] / net / unix / af_unix.c
1 /*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
84
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
95 #include <linux/un.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/net_namespace.h>
107 #include <net/sock.h>
108 #include <net/tcp_states.h>
109 #include <net/af_unix.h>
110 #include <linux/proc_fs.h>
111 #include <linux/seq_file.h>
112 #include <net/scm.h>
113 #include <linux/init.h>
114 #include <linux/poll.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/freezer.h>
120
121 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
122 EXPORT_SYMBOL_GPL(unix_socket_table);
123 DEFINE_SPINLOCK(unix_table_lock);
124 EXPORT_SYMBOL_GPL(unix_table_lock);
125 static atomic_long_t unix_nr_socks;
126
127
128 static struct hlist_head *unix_sockets_unbound(void *addr)
129 {
130 unsigned long hash = (unsigned long)addr;
131
132 hash ^= hash >> 16;
133 hash ^= hash >> 8;
134 hash %= UNIX_HASH_SIZE;
135 return &unix_socket_table[UNIX_HASH_SIZE + hash];
136 }
137
138 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
139
140 #ifdef CONFIG_SECURITY_NETWORK
141 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
142 {
143 UNIXCB(skb).secid = scm->secid;
144 }
145
146 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147 {
148 scm->secid = UNIXCB(skb).secid;
149 }
150
151 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
152 {
153 return (scm->secid == UNIXCB(skb).secid);
154 }
155 #else
156 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
157 { }
158
159 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
160 { }
161
162 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
163 {
164 return true;
165 }
166 #endif /* CONFIG_SECURITY_NETWORK */
167
168 /*
169 * SMP locking strategy:
170 * hash table is protected with spinlock unix_table_lock
171 * each socket state is protected by separate spin lock.
172 */
173
174 static inline unsigned int unix_hash_fold(__wsum n)
175 {
176 unsigned int hash = (__force unsigned int)csum_fold(n);
177
178 hash ^= hash>>8;
179 return hash&(UNIX_HASH_SIZE-1);
180 }
181
182 #define unix_peer(sk) (unix_sk(sk)->peer)
183
184 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
185 {
186 return unix_peer(osk) == sk;
187 }
188
189 static inline int unix_may_send(struct sock *sk, struct sock *osk)
190 {
191 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
192 }
193
194 static inline int unix_recvq_full(struct sock const *sk)
195 {
196 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
197 }
198
199 struct sock *unix_peer_get(struct sock *s)
200 {
201 struct sock *peer;
202
203 unix_state_lock(s);
204 peer = unix_peer(s);
205 if (peer)
206 sock_hold(peer);
207 unix_state_unlock(s);
208 return peer;
209 }
210 EXPORT_SYMBOL_GPL(unix_peer_get);
211
212 static inline void unix_release_addr(struct unix_address *addr)
213 {
214 if (atomic_dec_and_test(&addr->refcnt))
215 kfree(addr);
216 }
217
218 /*
219 * Check unix socket name:
220 * - should be not zero length.
221 * - if started by not zero, should be NULL terminated (FS object)
222 * - if started by zero, it is abstract name.
223 */
224
225 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
226 {
227 if (len <= sizeof(short) || len > sizeof(*sunaddr))
228 return -EINVAL;
229 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
230 return -EINVAL;
231 if (sunaddr->sun_path[0]) {
232 /*
233 * This may look like an off by one error but it is a bit more
234 * subtle. 108 is the longest valid AF_UNIX path for a binding.
235 * sun_path[108] doesn't as such exist. However in kernel space
236 * we are guaranteed that it is a valid memory location in our
237 * kernel address buffer.
238 */
239 ((char *)sunaddr)[len] = 0;
240 len = strlen(sunaddr->sun_path)+1+sizeof(short);
241 return len;
242 }
243
244 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
245 return len;
246 }
247
248 static void __unix_remove_socket(struct sock *sk)
249 {
250 sk_del_node_init(sk);
251 }
252
253 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
254 {
255 WARN_ON(!sk_unhashed(sk));
256 sk_add_node(sk, list);
257 }
258
259 static inline void unix_remove_socket(struct sock *sk)
260 {
261 spin_lock(&unix_table_lock);
262 __unix_remove_socket(sk);
263 spin_unlock(&unix_table_lock);
264 }
265
266 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
267 {
268 spin_lock(&unix_table_lock);
269 __unix_insert_socket(list, sk);
270 spin_unlock(&unix_table_lock);
271 }
272
273 static struct sock *__unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname,
275 int len, int type, unsigned int hash)
276 {
277 struct sock *s;
278
279 sk_for_each(s, &unix_socket_table[hash ^ type]) {
280 struct unix_sock *u = unix_sk(s);
281
282 if (!net_eq(sock_net(s), net))
283 continue;
284
285 if (u->addr->len == len &&
286 !memcmp(u->addr->name, sunname, len))
287 goto found;
288 }
289 s = NULL;
290 found:
291 return s;
292 }
293
294 static inline struct sock *unix_find_socket_byname(struct net *net,
295 struct sockaddr_un *sunname,
296 int len, int type,
297 unsigned int hash)
298 {
299 struct sock *s;
300
301 spin_lock(&unix_table_lock);
302 s = __unix_find_socket_byname(net, sunname, len, type, hash);
303 if (s)
304 sock_hold(s);
305 spin_unlock(&unix_table_lock);
306 return s;
307 }
308
309 static struct sock *unix_find_socket_byinode(struct inode *i)
310 {
311 struct sock *s;
312
313 spin_lock(&unix_table_lock);
314 sk_for_each(s,
315 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
316 struct dentry *dentry = unix_sk(s)->path.dentry;
317
318 if (dentry && d_backing_inode(dentry) == i) {
319 sock_hold(s);
320 goto found;
321 }
322 }
323 s = NULL;
324 found:
325 spin_unlock(&unix_table_lock);
326 return s;
327 }
328
329 static int unix_writable(const struct sock *sk)
330 {
331 return sk->sk_state != TCP_LISTEN &&
332 (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
333 }
334
335 static void unix_write_space(struct sock *sk)
336 {
337 struct socket_wq *wq;
338
339 rcu_read_lock();
340 if (unix_writable(sk)) {
341 wq = rcu_dereference(sk->sk_wq);
342 if (wq_has_sleeper(wq))
343 wake_up_interruptible_sync_poll(&wq->wait,
344 POLLOUT | POLLWRNORM | POLLWRBAND);
345 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
346 }
347 rcu_read_unlock();
348 }
349
350 /* When dgram socket disconnects (or changes its peer), we clear its receive
351 * queue of packets arrived from previous peer. First, it allows to do
352 * flow control based only on wmem_alloc; second, sk connected to peer
353 * may receive messages only from that peer. */
354 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
355 {
356 if (!skb_queue_empty(&sk->sk_receive_queue)) {
357 skb_queue_purge(&sk->sk_receive_queue);
358 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
359
360 /* If one link of bidirectional dgram pipe is disconnected,
361 * we signal error. Messages are lost. Do not make this,
362 * when peer was not connected to us.
363 */
364 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
365 other->sk_err = ECONNRESET;
366 other->sk_error_report(other);
367 }
368 }
369 }
370
371 static void unix_sock_destructor(struct sock *sk)
372 {
373 struct unix_sock *u = unix_sk(sk);
374
375 skb_queue_purge(&sk->sk_receive_queue);
376
377 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
378 WARN_ON(!sk_unhashed(sk));
379 WARN_ON(sk->sk_socket);
380 if (!sock_flag(sk, SOCK_DEAD)) {
381 pr_info("Attempt to release alive unix socket: %p\n", sk);
382 return;
383 }
384
385 if (u->addr)
386 unix_release_addr(u->addr);
387
388 atomic_long_dec(&unix_nr_socks);
389 local_bh_disable();
390 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
391 local_bh_enable();
392 #ifdef UNIX_REFCNT_DEBUG
393 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
394 atomic_long_read(&unix_nr_socks));
395 #endif
396 }
397
398 static void unix_release_sock(struct sock *sk, int embrion)
399 {
400 struct unix_sock *u = unix_sk(sk);
401 struct path path;
402 struct sock *skpair;
403 struct sk_buff *skb;
404 int state;
405
406 unix_remove_socket(sk);
407
408 /* Clear state */
409 unix_state_lock(sk);
410 sock_orphan(sk);
411 sk->sk_shutdown = SHUTDOWN_MASK;
412 path = u->path;
413 u->path.dentry = NULL;
414 u->path.mnt = NULL;
415 state = sk->sk_state;
416 sk->sk_state = TCP_CLOSE;
417 unix_state_unlock(sk);
418
419 wake_up_interruptible_all(&u->peer_wait);
420
421 skpair = unix_peer(sk);
422
423 if (skpair != NULL) {
424 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
425 unix_state_lock(skpair);
426 /* No more writes */
427 skpair->sk_shutdown = SHUTDOWN_MASK;
428 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
429 skpair->sk_err = ECONNRESET;
430 unix_state_unlock(skpair);
431 skpair->sk_state_change(skpair);
432 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
433 }
434 sock_put(skpair); /* It may now die */
435 unix_peer(sk) = NULL;
436 }
437
438 /* Try to flush out this socket. Throw out buffers at least */
439
440 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
441 if (state == TCP_LISTEN)
442 unix_release_sock(skb->sk, 1);
443 /* passed fds are erased in the kfree_skb hook */
444 UNIXCB(skb).consumed = skb->len;
445 kfree_skb(skb);
446 }
447
448 if (path.dentry)
449 path_put(&path);
450
451 sock_put(sk);
452
453 /* ---- Socket is dead now and most probably destroyed ---- */
454
455 /*
456 * Fixme: BSD difference: In BSD all sockets connected to us get
457 * ECONNRESET and we die on the spot. In Linux we behave
458 * like files and pipes do and wait for the last
459 * dereference.
460 *
461 * Can't we simply set sock->err?
462 *
463 * What the above comment does talk about? --ANK(980817)
464 */
465
466 if (unix_tot_inflight)
467 unix_gc(); /* Garbage collect fds */
468 }
469
470 static void init_peercred(struct sock *sk)
471 {
472 put_pid(sk->sk_peer_pid);
473 if (sk->sk_peer_cred)
474 put_cred(sk->sk_peer_cred);
475 sk->sk_peer_pid = get_pid(task_tgid(current));
476 sk->sk_peer_cred = get_current_cred();
477 }
478
479 static void copy_peercred(struct sock *sk, struct sock *peersk)
480 {
481 put_pid(sk->sk_peer_pid);
482 if (sk->sk_peer_cred)
483 put_cred(sk->sk_peer_cred);
484 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
485 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
486 }
487
488 static int unix_listen(struct socket *sock, int backlog)
489 {
490 int err;
491 struct sock *sk = sock->sk;
492 struct unix_sock *u = unix_sk(sk);
493 struct pid *old_pid = NULL;
494
495 err = -EOPNOTSUPP;
496 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
497 goto out; /* Only stream/seqpacket sockets accept */
498 err = -EINVAL;
499 if (!u->addr)
500 goto out; /* No listens on an unbound socket */
501 unix_state_lock(sk);
502 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
503 goto out_unlock;
504 if (backlog > sk->sk_max_ack_backlog)
505 wake_up_interruptible_all(&u->peer_wait);
506 sk->sk_max_ack_backlog = backlog;
507 sk->sk_state = TCP_LISTEN;
508 /* set credentials so connect can copy them */
509 init_peercred(sk);
510 err = 0;
511
512 out_unlock:
513 unix_state_unlock(sk);
514 put_pid(old_pid);
515 out:
516 return err;
517 }
518
519 static int unix_release(struct socket *);
520 static int unix_bind(struct socket *, struct sockaddr *, int);
521 static int unix_stream_connect(struct socket *, struct sockaddr *,
522 int addr_len, int flags);
523 static int unix_socketpair(struct socket *, struct socket *);
524 static int unix_accept(struct socket *, struct socket *, int);
525 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
526 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
527 static unsigned int unix_dgram_poll(struct file *, struct socket *,
528 poll_table *);
529 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
530 static int unix_shutdown(struct socket *, int);
531 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
532 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
533 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
534 size_t size, int flags);
535 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
536 struct pipe_inode_info *, size_t size,
537 unsigned int flags);
538 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
539 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
540 static int unix_dgram_connect(struct socket *, struct sockaddr *,
541 int, int);
542 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
543 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
544 int);
545
546 static int unix_set_peek_off(struct sock *sk, int val)
547 {
548 struct unix_sock *u = unix_sk(sk);
549
550 if (mutex_lock_interruptible(&u->readlock))
551 return -EINTR;
552
553 sk->sk_peek_off = val;
554 mutex_unlock(&u->readlock);
555
556 return 0;
557 }
558
559
560 static const struct proto_ops unix_stream_ops = {
561 .family = PF_UNIX,
562 .owner = THIS_MODULE,
563 .release = unix_release,
564 .bind = unix_bind,
565 .connect = unix_stream_connect,
566 .socketpair = unix_socketpair,
567 .accept = unix_accept,
568 .getname = unix_getname,
569 .poll = unix_poll,
570 .ioctl = unix_ioctl,
571 .listen = unix_listen,
572 .shutdown = unix_shutdown,
573 .setsockopt = sock_no_setsockopt,
574 .getsockopt = sock_no_getsockopt,
575 .sendmsg = unix_stream_sendmsg,
576 .recvmsg = unix_stream_recvmsg,
577 .mmap = sock_no_mmap,
578 .sendpage = unix_stream_sendpage,
579 .splice_read = unix_stream_splice_read,
580 .set_peek_off = unix_set_peek_off,
581 };
582
583 static const struct proto_ops unix_dgram_ops = {
584 .family = PF_UNIX,
585 .owner = THIS_MODULE,
586 .release = unix_release,
587 .bind = unix_bind,
588 .connect = unix_dgram_connect,
589 .socketpair = unix_socketpair,
590 .accept = sock_no_accept,
591 .getname = unix_getname,
592 .poll = unix_dgram_poll,
593 .ioctl = unix_ioctl,
594 .listen = sock_no_listen,
595 .shutdown = unix_shutdown,
596 .setsockopt = sock_no_setsockopt,
597 .getsockopt = sock_no_getsockopt,
598 .sendmsg = unix_dgram_sendmsg,
599 .recvmsg = unix_dgram_recvmsg,
600 .mmap = sock_no_mmap,
601 .sendpage = sock_no_sendpage,
602 .set_peek_off = unix_set_peek_off,
603 };
604
605 static const struct proto_ops unix_seqpacket_ops = {
606 .family = PF_UNIX,
607 .owner = THIS_MODULE,
608 .release = unix_release,
609 .bind = unix_bind,
610 .connect = unix_stream_connect,
611 .socketpair = unix_socketpair,
612 .accept = unix_accept,
613 .getname = unix_getname,
614 .poll = unix_dgram_poll,
615 .ioctl = unix_ioctl,
616 .listen = unix_listen,
617 .shutdown = unix_shutdown,
618 .setsockopt = sock_no_setsockopt,
619 .getsockopt = sock_no_getsockopt,
620 .sendmsg = unix_seqpacket_sendmsg,
621 .recvmsg = unix_seqpacket_recvmsg,
622 .mmap = sock_no_mmap,
623 .sendpage = sock_no_sendpage,
624 .set_peek_off = unix_set_peek_off,
625 };
626
627 static struct proto unix_proto = {
628 .name = "UNIX",
629 .owner = THIS_MODULE,
630 .obj_size = sizeof(struct unix_sock),
631 };
632
633 /*
634 * AF_UNIX sockets do not interact with hardware, hence they
635 * dont trigger interrupts - so it's safe for them to have
636 * bh-unsafe locking for their sk_receive_queue.lock. Split off
637 * this special lock-class by reinitializing the spinlock key:
638 */
639 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
640
641 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
642 {
643 struct sock *sk = NULL;
644 struct unix_sock *u;
645
646 atomic_long_inc(&unix_nr_socks);
647 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
648 goto out;
649
650 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
651 if (!sk)
652 goto out;
653
654 sock_init_data(sock, sk);
655 lockdep_set_class(&sk->sk_receive_queue.lock,
656 &af_unix_sk_receive_queue_lock_key);
657
658 sk->sk_write_space = unix_write_space;
659 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
660 sk->sk_destruct = unix_sock_destructor;
661 u = unix_sk(sk);
662 u->path.dentry = NULL;
663 u->path.mnt = NULL;
664 spin_lock_init(&u->lock);
665 atomic_long_set(&u->inflight, 0);
666 INIT_LIST_HEAD(&u->link);
667 mutex_init(&u->readlock); /* single task reading lock */
668 init_waitqueue_head(&u->peer_wait);
669 unix_insert_socket(unix_sockets_unbound(sk), sk);
670 out:
671 if (sk == NULL)
672 atomic_long_dec(&unix_nr_socks);
673 else {
674 local_bh_disable();
675 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
676 local_bh_enable();
677 }
678 return sk;
679 }
680
681 static int unix_create(struct net *net, struct socket *sock, int protocol,
682 int kern)
683 {
684 if (protocol && protocol != PF_UNIX)
685 return -EPROTONOSUPPORT;
686
687 sock->state = SS_UNCONNECTED;
688
689 switch (sock->type) {
690 case SOCK_STREAM:
691 sock->ops = &unix_stream_ops;
692 break;
693 /*
694 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
695 * nothing uses it.
696 */
697 case SOCK_RAW:
698 sock->type = SOCK_DGRAM;
699 case SOCK_DGRAM:
700 sock->ops = &unix_dgram_ops;
701 break;
702 case SOCK_SEQPACKET:
703 sock->ops = &unix_seqpacket_ops;
704 break;
705 default:
706 return -ESOCKTNOSUPPORT;
707 }
708
709 return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
710 }
711
712 static int unix_release(struct socket *sock)
713 {
714 struct sock *sk = sock->sk;
715
716 if (!sk)
717 return 0;
718
719 unix_release_sock(sk, 0);
720 sock->sk = NULL;
721
722 return 0;
723 }
724
725 static int unix_autobind(struct socket *sock)
726 {
727 struct sock *sk = sock->sk;
728 struct net *net = sock_net(sk);
729 struct unix_sock *u = unix_sk(sk);
730 static u32 ordernum = 1;
731 struct unix_address *addr;
732 int err;
733 unsigned int retries = 0;
734
735 err = mutex_lock_interruptible(&u->readlock);
736 if (err)
737 return err;
738
739 err = 0;
740 if (u->addr)
741 goto out;
742
743 err = -ENOMEM;
744 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
745 if (!addr)
746 goto out;
747
748 addr->name->sun_family = AF_UNIX;
749 atomic_set(&addr->refcnt, 1);
750
751 retry:
752 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
753 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
754
755 spin_lock(&unix_table_lock);
756 ordernum = (ordernum+1)&0xFFFFF;
757
758 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
759 addr->hash)) {
760 spin_unlock(&unix_table_lock);
761 /*
762 * __unix_find_socket_byname() may take long time if many names
763 * are already in use.
764 */
765 cond_resched();
766 /* Give up if all names seems to be in use. */
767 if (retries++ == 0xFFFFF) {
768 err = -ENOSPC;
769 kfree(addr);
770 goto out;
771 }
772 goto retry;
773 }
774 addr->hash ^= sk->sk_type;
775
776 __unix_remove_socket(sk);
777 u->addr = addr;
778 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
779 spin_unlock(&unix_table_lock);
780 err = 0;
781
782 out: mutex_unlock(&u->readlock);
783 return err;
784 }
785
786 static struct sock *unix_find_other(struct net *net,
787 struct sockaddr_un *sunname, int len,
788 int type, unsigned int hash, int *error)
789 {
790 struct sock *u;
791 struct path path;
792 int err = 0;
793
794 if (sunname->sun_path[0]) {
795 struct inode *inode;
796 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
797 if (err)
798 goto fail;
799 inode = d_backing_inode(path.dentry);
800 err = inode_permission(inode, MAY_WRITE);
801 if (err)
802 goto put_fail;
803
804 err = -ECONNREFUSED;
805 if (!S_ISSOCK(inode->i_mode))
806 goto put_fail;
807 u = unix_find_socket_byinode(inode);
808 if (!u)
809 goto put_fail;
810
811 if (u->sk_type == type)
812 touch_atime(&path);
813
814 path_put(&path);
815
816 err = -EPROTOTYPE;
817 if (u->sk_type != type) {
818 sock_put(u);
819 goto fail;
820 }
821 } else {
822 err = -ECONNREFUSED;
823 u = unix_find_socket_byname(net, sunname, len, type, hash);
824 if (u) {
825 struct dentry *dentry;
826 dentry = unix_sk(u)->path.dentry;
827 if (dentry)
828 touch_atime(&unix_sk(u)->path);
829 } else
830 goto fail;
831 }
832 return u;
833
834 put_fail:
835 path_put(&path);
836 fail:
837 *error = err;
838 return NULL;
839 }
840
841 static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
842 {
843 struct dentry *dentry;
844 struct path path;
845 int err = 0;
846 /*
847 * Get the parent directory, calculate the hash for last
848 * component.
849 */
850 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
851 err = PTR_ERR(dentry);
852 if (IS_ERR(dentry))
853 return err;
854
855 /*
856 * All right, let's create it.
857 */
858 err = security_path_mknod(&path, dentry, mode, 0);
859 if (!err) {
860 err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
861 if (!err) {
862 res->mnt = mntget(path.mnt);
863 res->dentry = dget(dentry);
864 }
865 }
866 done_path_create(&path, dentry);
867 return err;
868 }
869
870 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
871 {
872 struct sock *sk = sock->sk;
873 struct net *net = sock_net(sk);
874 struct unix_sock *u = unix_sk(sk);
875 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
876 char *sun_path = sunaddr->sun_path;
877 int err;
878 unsigned int hash;
879 struct unix_address *addr;
880 struct hlist_head *list;
881
882 err = -EINVAL;
883 if (sunaddr->sun_family != AF_UNIX)
884 goto out;
885
886 if (addr_len == sizeof(short)) {
887 err = unix_autobind(sock);
888 goto out;
889 }
890
891 err = unix_mkname(sunaddr, addr_len, &hash);
892 if (err < 0)
893 goto out;
894 addr_len = err;
895
896 err = mutex_lock_interruptible(&u->readlock);
897 if (err)
898 goto out;
899
900 err = -EINVAL;
901 if (u->addr)
902 goto out_up;
903
904 err = -ENOMEM;
905 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
906 if (!addr)
907 goto out_up;
908
909 memcpy(addr->name, sunaddr, addr_len);
910 addr->len = addr_len;
911 addr->hash = hash ^ sk->sk_type;
912 atomic_set(&addr->refcnt, 1);
913
914 if (sun_path[0]) {
915 struct path path;
916 umode_t mode = S_IFSOCK |
917 (SOCK_INODE(sock)->i_mode & ~current_umask());
918 err = unix_mknod(sun_path, mode, &path);
919 if (err) {
920 if (err == -EEXIST)
921 err = -EADDRINUSE;
922 unix_release_addr(addr);
923 goto out_up;
924 }
925 addr->hash = UNIX_HASH_SIZE;
926 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
927 spin_lock(&unix_table_lock);
928 u->path = path;
929 list = &unix_socket_table[hash];
930 } else {
931 spin_lock(&unix_table_lock);
932 err = -EADDRINUSE;
933 if (__unix_find_socket_byname(net, sunaddr, addr_len,
934 sk->sk_type, hash)) {
935 unix_release_addr(addr);
936 goto out_unlock;
937 }
938
939 list = &unix_socket_table[addr->hash];
940 }
941
942 err = 0;
943 __unix_remove_socket(sk);
944 u->addr = addr;
945 __unix_insert_socket(list, sk);
946
947 out_unlock:
948 spin_unlock(&unix_table_lock);
949 out_up:
950 mutex_unlock(&u->readlock);
951 out:
952 return err;
953 }
954
955 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
956 {
957 if (unlikely(sk1 == sk2) || !sk2) {
958 unix_state_lock(sk1);
959 return;
960 }
961 if (sk1 < sk2) {
962 unix_state_lock(sk1);
963 unix_state_lock_nested(sk2);
964 } else {
965 unix_state_lock(sk2);
966 unix_state_lock_nested(sk1);
967 }
968 }
969
970 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
971 {
972 if (unlikely(sk1 == sk2) || !sk2) {
973 unix_state_unlock(sk1);
974 return;
975 }
976 unix_state_unlock(sk1);
977 unix_state_unlock(sk2);
978 }
979
980 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
981 int alen, int flags)
982 {
983 struct sock *sk = sock->sk;
984 struct net *net = sock_net(sk);
985 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
986 struct sock *other;
987 unsigned int hash;
988 int err;
989
990 if (addr->sa_family != AF_UNSPEC) {
991 err = unix_mkname(sunaddr, alen, &hash);
992 if (err < 0)
993 goto out;
994 alen = err;
995
996 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
997 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
998 goto out;
999
1000 restart:
1001 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1002 if (!other)
1003 goto out;
1004
1005 unix_state_double_lock(sk, other);
1006
1007 /* Apparently VFS overslept socket death. Retry. */
1008 if (sock_flag(other, SOCK_DEAD)) {
1009 unix_state_double_unlock(sk, other);
1010 sock_put(other);
1011 goto restart;
1012 }
1013
1014 err = -EPERM;
1015 if (!unix_may_send(sk, other))
1016 goto out_unlock;
1017
1018 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1019 if (err)
1020 goto out_unlock;
1021
1022 } else {
1023 /*
1024 * 1003.1g breaking connected state with AF_UNSPEC
1025 */
1026 other = NULL;
1027 unix_state_double_lock(sk, other);
1028 }
1029
1030 /*
1031 * If it was connected, reconnect.
1032 */
1033 if (unix_peer(sk)) {
1034 struct sock *old_peer = unix_peer(sk);
1035 unix_peer(sk) = other;
1036 unix_state_double_unlock(sk, other);
1037
1038 if (other != old_peer)
1039 unix_dgram_disconnected(sk, old_peer);
1040 sock_put(old_peer);
1041 } else {
1042 unix_peer(sk) = other;
1043 unix_state_double_unlock(sk, other);
1044 }
1045 return 0;
1046
1047 out_unlock:
1048 unix_state_double_unlock(sk, other);
1049 sock_put(other);
1050 out:
1051 return err;
1052 }
1053
1054 static long unix_wait_for_peer(struct sock *other, long timeo)
1055 {
1056 struct unix_sock *u = unix_sk(other);
1057 int sched;
1058 DEFINE_WAIT(wait);
1059
1060 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1061
1062 sched = !sock_flag(other, SOCK_DEAD) &&
1063 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1064 unix_recvq_full(other);
1065
1066 unix_state_unlock(other);
1067
1068 if (sched)
1069 timeo = schedule_timeout(timeo);
1070
1071 finish_wait(&u->peer_wait, &wait);
1072 return timeo;
1073 }
1074
1075 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1076 int addr_len, int flags)
1077 {
1078 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1079 struct sock *sk = sock->sk;
1080 struct net *net = sock_net(sk);
1081 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1082 struct sock *newsk = NULL;
1083 struct sock *other = NULL;
1084 struct sk_buff *skb = NULL;
1085 unsigned int hash;
1086 int st;
1087 int err;
1088 long timeo;
1089
1090 err = unix_mkname(sunaddr, addr_len, &hash);
1091 if (err < 0)
1092 goto out;
1093 addr_len = err;
1094
1095 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1096 (err = unix_autobind(sock)) != 0)
1097 goto out;
1098
1099 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1100
1101 /* First of all allocate resources.
1102 If we will make it after state is locked,
1103 we will have to recheck all again in any case.
1104 */
1105
1106 err = -ENOMEM;
1107
1108 /* create new sock for complete connection */
1109 newsk = unix_create1(sock_net(sk), NULL, 0);
1110 if (newsk == NULL)
1111 goto out;
1112
1113 /* Allocate skb for sending to listening sock */
1114 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1115 if (skb == NULL)
1116 goto out;
1117
1118 restart:
1119 /* Find listening sock. */
1120 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1121 if (!other)
1122 goto out;
1123
1124 /* Latch state of peer */
1125 unix_state_lock(other);
1126
1127 /* Apparently VFS overslept socket death. Retry. */
1128 if (sock_flag(other, SOCK_DEAD)) {
1129 unix_state_unlock(other);
1130 sock_put(other);
1131 goto restart;
1132 }
1133
1134 err = -ECONNREFUSED;
1135 if (other->sk_state != TCP_LISTEN)
1136 goto out_unlock;
1137 if (other->sk_shutdown & RCV_SHUTDOWN)
1138 goto out_unlock;
1139
1140 if (unix_recvq_full(other)) {
1141 err = -EAGAIN;
1142 if (!timeo)
1143 goto out_unlock;
1144
1145 timeo = unix_wait_for_peer(other, timeo);
1146
1147 err = sock_intr_errno(timeo);
1148 if (signal_pending(current))
1149 goto out;
1150 sock_put(other);
1151 goto restart;
1152 }
1153
1154 /* Latch our state.
1155
1156 It is tricky place. We need to grab our state lock and cannot
1157 drop lock on peer. It is dangerous because deadlock is
1158 possible. Connect to self case and simultaneous
1159 attempt to connect are eliminated by checking socket
1160 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1161 check this before attempt to grab lock.
1162
1163 Well, and we have to recheck the state after socket locked.
1164 */
1165 st = sk->sk_state;
1166
1167 switch (st) {
1168 case TCP_CLOSE:
1169 /* This is ok... continue with connect */
1170 break;
1171 case TCP_ESTABLISHED:
1172 /* Socket is already connected */
1173 err = -EISCONN;
1174 goto out_unlock;
1175 default:
1176 err = -EINVAL;
1177 goto out_unlock;
1178 }
1179
1180 unix_state_lock_nested(sk);
1181
1182 if (sk->sk_state != st) {
1183 unix_state_unlock(sk);
1184 unix_state_unlock(other);
1185 sock_put(other);
1186 goto restart;
1187 }
1188
1189 err = security_unix_stream_connect(sk, other, newsk);
1190 if (err) {
1191 unix_state_unlock(sk);
1192 goto out_unlock;
1193 }
1194
1195 /* The way is open! Fastly set all the necessary fields... */
1196
1197 sock_hold(sk);
1198 unix_peer(newsk) = sk;
1199 newsk->sk_state = TCP_ESTABLISHED;
1200 newsk->sk_type = sk->sk_type;
1201 init_peercred(newsk);
1202 newu = unix_sk(newsk);
1203 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1204 otheru = unix_sk(other);
1205
1206 /* copy address information from listening to new sock*/
1207 if (otheru->addr) {
1208 atomic_inc(&otheru->addr->refcnt);
1209 newu->addr = otheru->addr;
1210 }
1211 if (otheru->path.dentry) {
1212 path_get(&otheru->path);
1213 newu->path = otheru->path;
1214 }
1215
1216 /* Set credentials */
1217 copy_peercred(sk, other);
1218
1219 sock->state = SS_CONNECTED;
1220 sk->sk_state = TCP_ESTABLISHED;
1221 sock_hold(newsk);
1222
1223 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1224 unix_peer(sk) = newsk;
1225
1226 unix_state_unlock(sk);
1227
1228 /* take ten and and send info to listening sock */
1229 spin_lock(&other->sk_receive_queue.lock);
1230 __skb_queue_tail(&other->sk_receive_queue, skb);
1231 spin_unlock(&other->sk_receive_queue.lock);
1232 unix_state_unlock(other);
1233 other->sk_data_ready(other);
1234 sock_put(other);
1235 return 0;
1236
1237 out_unlock:
1238 if (other)
1239 unix_state_unlock(other);
1240
1241 out:
1242 kfree_skb(skb);
1243 if (newsk)
1244 unix_release_sock(newsk, 0);
1245 if (other)
1246 sock_put(other);
1247 return err;
1248 }
1249
1250 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1251 {
1252 struct sock *ska = socka->sk, *skb = sockb->sk;
1253
1254 /* Join our sockets back to back */
1255 sock_hold(ska);
1256 sock_hold(skb);
1257 unix_peer(ska) = skb;
1258 unix_peer(skb) = ska;
1259 init_peercred(ska);
1260 init_peercred(skb);
1261
1262 if (ska->sk_type != SOCK_DGRAM) {
1263 ska->sk_state = TCP_ESTABLISHED;
1264 skb->sk_state = TCP_ESTABLISHED;
1265 socka->state = SS_CONNECTED;
1266 sockb->state = SS_CONNECTED;
1267 }
1268 return 0;
1269 }
1270
1271 static void unix_sock_inherit_flags(const struct socket *old,
1272 struct socket *new)
1273 {
1274 if (test_bit(SOCK_PASSCRED, &old->flags))
1275 set_bit(SOCK_PASSCRED, &new->flags);
1276 if (test_bit(SOCK_PASSSEC, &old->flags))
1277 set_bit(SOCK_PASSSEC, &new->flags);
1278 }
1279
1280 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1281 {
1282 struct sock *sk = sock->sk;
1283 struct sock *tsk;
1284 struct sk_buff *skb;
1285 int err;
1286
1287 err = -EOPNOTSUPP;
1288 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1289 goto out;
1290
1291 err = -EINVAL;
1292 if (sk->sk_state != TCP_LISTEN)
1293 goto out;
1294
1295 /* If socket state is TCP_LISTEN it cannot change (for now...),
1296 * so that no locks are necessary.
1297 */
1298
1299 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1300 if (!skb) {
1301 /* This means receive shutdown. */
1302 if (err == 0)
1303 err = -EINVAL;
1304 goto out;
1305 }
1306
1307 tsk = skb->sk;
1308 skb_free_datagram(sk, skb);
1309 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1310
1311 /* attach accepted sock to socket */
1312 unix_state_lock(tsk);
1313 newsock->state = SS_CONNECTED;
1314 unix_sock_inherit_flags(sock, newsock);
1315 sock_graft(tsk, newsock);
1316 unix_state_unlock(tsk);
1317 return 0;
1318
1319 out:
1320 return err;
1321 }
1322
1323
1324 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1325 {
1326 struct sock *sk = sock->sk;
1327 struct unix_sock *u;
1328 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1329 int err = 0;
1330
1331 if (peer) {
1332 sk = unix_peer_get(sk);
1333
1334 err = -ENOTCONN;
1335 if (!sk)
1336 goto out;
1337 err = 0;
1338 } else {
1339 sock_hold(sk);
1340 }
1341
1342 u = unix_sk(sk);
1343 unix_state_lock(sk);
1344 if (!u->addr) {
1345 sunaddr->sun_family = AF_UNIX;
1346 sunaddr->sun_path[0] = 0;
1347 *uaddr_len = sizeof(short);
1348 } else {
1349 struct unix_address *addr = u->addr;
1350
1351 *uaddr_len = addr->len;
1352 memcpy(sunaddr, addr->name, *uaddr_len);
1353 }
1354 unix_state_unlock(sk);
1355 sock_put(sk);
1356 out:
1357 return err;
1358 }
1359
1360 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1361 {
1362 int i;
1363
1364 scm->fp = UNIXCB(skb).fp;
1365 UNIXCB(skb).fp = NULL;
1366
1367 for (i = scm->fp->count-1; i >= 0; i--)
1368 unix_notinflight(scm->fp->fp[i]);
1369 }
1370
1371 static void unix_destruct_scm(struct sk_buff *skb)
1372 {
1373 struct scm_cookie scm;
1374 memset(&scm, 0, sizeof(scm));
1375 scm.pid = UNIXCB(skb).pid;
1376 if (UNIXCB(skb).fp)
1377 unix_detach_fds(&scm, skb);
1378
1379 /* Alas, it calls VFS */
1380 /* So fscking what? fput() had been SMP-safe since the last Summer */
1381 scm_destroy(&scm);
1382 sock_wfree(skb);
1383 }
1384
1385 #define MAX_RECURSION_LEVEL 4
1386
1387 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1388 {
1389 int i;
1390 unsigned char max_level = 0;
1391 int unix_sock_count = 0;
1392
1393 for (i = scm->fp->count - 1; i >= 0; i--) {
1394 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1395
1396 if (sk) {
1397 unix_sock_count++;
1398 max_level = max(max_level,
1399 unix_sk(sk)->recursion_level);
1400 }
1401 }
1402 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1403 return -ETOOMANYREFS;
1404
1405 /*
1406 * Need to duplicate file references for the sake of garbage
1407 * collection. Otherwise a socket in the fps might become a
1408 * candidate for GC while the skb is not yet queued.
1409 */
1410 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1411 if (!UNIXCB(skb).fp)
1412 return -ENOMEM;
1413
1414 if (unix_sock_count) {
1415 for (i = scm->fp->count - 1; i >= 0; i--)
1416 unix_inflight(scm->fp->fp[i]);
1417 }
1418 return max_level;
1419 }
1420
1421 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1422 {
1423 int err = 0;
1424
1425 UNIXCB(skb).pid = get_pid(scm->pid);
1426 UNIXCB(skb).uid = scm->creds.uid;
1427 UNIXCB(skb).gid = scm->creds.gid;
1428 UNIXCB(skb).fp = NULL;
1429 unix_get_secdata(scm, skb);
1430 if (scm->fp && send_fds)
1431 err = unix_attach_fds(scm, skb);
1432
1433 skb->destructor = unix_destruct_scm;
1434 return err;
1435 }
1436
1437 /*
1438 * Some apps rely on write() giving SCM_CREDENTIALS
1439 * We include credentials if source or destination socket
1440 * asserted SOCK_PASSCRED.
1441 */
1442 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1443 const struct sock *other)
1444 {
1445 if (UNIXCB(skb).pid)
1446 return;
1447 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1448 !other->sk_socket ||
1449 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1450 UNIXCB(skb).pid = get_pid(task_tgid(current));
1451 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1452 }
1453 }
1454
1455 /*
1456 * Send AF_UNIX data.
1457 */
1458
1459 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1460 size_t len)
1461 {
1462 struct sock *sk = sock->sk;
1463 struct net *net = sock_net(sk);
1464 struct unix_sock *u = unix_sk(sk);
1465 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1466 struct sock *other = NULL;
1467 int namelen = 0; /* fake GCC */
1468 int err;
1469 unsigned int hash;
1470 struct sk_buff *skb;
1471 long timeo;
1472 struct scm_cookie scm;
1473 int max_level;
1474 int data_len = 0;
1475
1476 wait_for_unix_gc();
1477 err = scm_send(sock, msg, &scm, false);
1478 if (err < 0)
1479 return err;
1480
1481 err = -EOPNOTSUPP;
1482 if (msg->msg_flags&MSG_OOB)
1483 goto out;
1484
1485 if (msg->msg_namelen) {
1486 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1487 if (err < 0)
1488 goto out;
1489 namelen = err;
1490 } else {
1491 sunaddr = NULL;
1492 err = -ENOTCONN;
1493 other = unix_peer_get(sk);
1494 if (!other)
1495 goto out;
1496 }
1497
1498 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1499 && (err = unix_autobind(sock)) != 0)
1500 goto out;
1501
1502 err = -EMSGSIZE;
1503 if (len > sk->sk_sndbuf - 32)
1504 goto out;
1505
1506 if (len > SKB_MAX_ALLOC) {
1507 data_len = min_t(size_t,
1508 len - SKB_MAX_ALLOC,
1509 MAX_SKB_FRAGS * PAGE_SIZE);
1510 data_len = PAGE_ALIGN(data_len);
1511
1512 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1513 }
1514
1515 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1516 msg->msg_flags & MSG_DONTWAIT, &err,
1517 PAGE_ALLOC_COSTLY_ORDER);
1518 if (skb == NULL)
1519 goto out;
1520
1521 err = unix_scm_to_skb(&scm, skb, true);
1522 if (err < 0)
1523 goto out_free;
1524 max_level = err + 1;
1525
1526 skb_put(skb, len - data_len);
1527 skb->data_len = data_len;
1528 skb->len = len;
1529 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1530 if (err)
1531 goto out_free;
1532
1533 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1534
1535 restart:
1536 if (!other) {
1537 err = -ECONNRESET;
1538 if (sunaddr == NULL)
1539 goto out_free;
1540
1541 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1542 hash, &err);
1543 if (other == NULL)
1544 goto out_free;
1545 }
1546
1547 if (sk_filter(other, skb) < 0) {
1548 /* Toss the packet but do not return any error to the sender */
1549 err = len;
1550 goto out_free;
1551 }
1552
1553 unix_state_lock(other);
1554 err = -EPERM;
1555 if (!unix_may_send(sk, other))
1556 goto out_unlock;
1557
1558 if (sock_flag(other, SOCK_DEAD)) {
1559 /*
1560 * Check with 1003.1g - what should
1561 * datagram error
1562 */
1563 unix_state_unlock(other);
1564 sock_put(other);
1565
1566 err = 0;
1567 unix_state_lock(sk);
1568 if (unix_peer(sk) == other) {
1569 unix_peer(sk) = NULL;
1570 unix_state_unlock(sk);
1571
1572 unix_dgram_disconnected(sk, other);
1573 sock_put(other);
1574 err = -ECONNREFUSED;
1575 } else {
1576 unix_state_unlock(sk);
1577 }
1578
1579 other = NULL;
1580 if (err)
1581 goto out_free;
1582 goto restart;
1583 }
1584
1585 err = -EPIPE;
1586 if (other->sk_shutdown & RCV_SHUTDOWN)
1587 goto out_unlock;
1588
1589 if (sk->sk_type != SOCK_SEQPACKET) {
1590 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1591 if (err)
1592 goto out_unlock;
1593 }
1594
1595 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1596 if (!timeo) {
1597 err = -EAGAIN;
1598 goto out_unlock;
1599 }
1600
1601 timeo = unix_wait_for_peer(other, timeo);
1602
1603 err = sock_intr_errno(timeo);
1604 if (signal_pending(current))
1605 goto out_free;
1606
1607 goto restart;
1608 }
1609
1610 if (sock_flag(other, SOCK_RCVTSTAMP))
1611 __net_timestamp(skb);
1612 maybe_add_creds(skb, sock, other);
1613 skb_queue_tail(&other->sk_receive_queue, skb);
1614 if (max_level > unix_sk(other)->recursion_level)
1615 unix_sk(other)->recursion_level = max_level;
1616 unix_state_unlock(other);
1617 other->sk_data_ready(other);
1618 sock_put(other);
1619 scm_destroy(&scm);
1620 return len;
1621
1622 out_unlock:
1623 unix_state_unlock(other);
1624 out_free:
1625 kfree_skb(skb);
1626 out:
1627 if (other)
1628 sock_put(other);
1629 scm_destroy(&scm);
1630 return err;
1631 }
1632
1633 /* We use paged skbs for stream sockets, and limit occupancy to 32768
1634 * bytes, and a minimun of a full page.
1635 */
1636 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1637
1638 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1639 size_t len)
1640 {
1641 struct sock *sk = sock->sk;
1642 struct sock *other = NULL;
1643 int err, size;
1644 struct sk_buff *skb;
1645 int sent = 0;
1646 struct scm_cookie scm;
1647 bool fds_sent = false;
1648 int max_level;
1649 int data_len;
1650
1651 wait_for_unix_gc();
1652 err = scm_send(sock, msg, &scm, false);
1653 if (err < 0)
1654 return err;
1655
1656 err = -EOPNOTSUPP;
1657 if (msg->msg_flags&MSG_OOB)
1658 goto out_err;
1659
1660 if (msg->msg_namelen) {
1661 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1662 goto out_err;
1663 } else {
1664 err = -ENOTCONN;
1665 other = unix_peer(sk);
1666 if (!other)
1667 goto out_err;
1668 }
1669
1670 if (sk->sk_shutdown & SEND_SHUTDOWN)
1671 goto pipe_err;
1672
1673 while (sent < len) {
1674 size = len - sent;
1675
1676 /* Keep two messages in the pipe so it schedules better */
1677 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
1678
1679 /* allow fallback to order-0 allocations */
1680 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
1681
1682 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
1683
1684 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
1685
1686 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
1687 msg->msg_flags & MSG_DONTWAIT, &err,
1688 get_order(UNIX_SKB_FRAGS_SZ));
1689 if (!skb)
1690 goto out_err;
1691
1692 /* Only send the fds in the first buffer */
1693 err = unix_scm_to_skb(&scm, skb, !fds_sent);
1694 if (err < 0) {
1695 kfree_skb(skb);
1696 goto out_err;
1697 }
1698 max_level = err + 1;
1699 fds_sent = true;
1700
1701 skb_put(skb, size - data_len);
1702 skb->data_len = data_len;
1703 skb->len = size;
1704 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
1705 if (err) {
1706 kfree_skb(skb);
1707 goto out_err;
1708 }
1709
1710 unix_state_lock(other);
1711
1712 if (sock_flag(other, SOCK_DEAD) ||
1713 (other->sk_shutdown & RCV_SHUTDOWN))
1714 goto pipe_err_free;
1715
1716 maybe_add_creds(skb, sock, other);
1717 skb_queue_tail(&other->sk_receive_queue, skb);
1718 if (max_level > unix_sk(other)->recursion_level)
1719 unix_sk(other)->recursion_level = max_level;
1720 unix_state_unlock(other);
1721 other->sk_data_ready(other);
1722 sent += size;
1723 }
1724
1725 scm_destroy(&scm);
1726
1727 return sent;
1728
1729 pipe_err_free:
1730 unix_state_unlock(other);
1731 kfree_skb(skb);
1732 pipe_err:
1733 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1734 send_sig(SIGPIPE, current, 0);
1735 err = -EPIPE;
1736 out_err:
1737 scm_destroy(&scm);
1738 return sent ? : err;
1739 }
1740
1741 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1742 int offset, size_t size, int flags)
1743 {
1744 int err = 0;
1745 bool send_sigpipe = true;
1746 struct sock *other, *sk = socket->sk;
1747 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1748
1749 if (flags & MSG_OOB)
1750 return -EOPNOTSUPP;
1751
1752 other = unix_peer(sk);
1753 if (!other || sk->sk_state != TCP_ESTABLISHED)
1754 return -ENOTCONN;
1755
1756 if (false) {
1757 alloc_skb:
1758 unix_state_unlock(other);
1759 mutex_unlock(&unix_sk(other)->readlock);
1760 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1761 &err, 0);
1762 if (!newskb)
1763 return err;
1764 }
1765
1766 /* we must acquire readlock as we modify already present
1767 * skbs in the sk_receive_queue and mess with skb->len
1768 */
1769 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1770 if (err) {
1771 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1772 send_sigpipe = false;
1773 goto err;
1774 }
1775
1776 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1777 err = -EPIPE;
1778 goto err_unlock;
1779 }
1780
1781 unix_state_lock(other);
1782
1783 if (sock_flag(other, SOCK_DEAD) ||
1784 other->sk_shutdown & RCV_SHUTDOWN) {
1785 err = -EPIPE;
1786 goto err_state_unlock;
1787 }
1788
1789 skb = skb_peek_tail(&other->sk_receive_queue);
1790 if (tail && tail == skb) {
1791 skb = newskb;
1792 } else if (!skb) {
1793 if (newskb)
1794 skb = newskb;
1795 else
1796 goto alloc_skb;
1797 } else if (newskb) {
1798 /* this is fast path, we don't necessarily need to
1799 * call to kfree_skb even though with newskb == NULL
1800 * this - does no harm
1801 */
1802 consume_skb(newskb);
1803 newskb = NULL;
1804 }
1805
1806 if (skb_append_pagefrags(skb, page, offset, size)) {
1807 tail = skb;
1808 goto alloc_skb;
1809 }
1810
1811 skb->len += size;
1812 skb->data_len += size;
1813 skb->truesize += size;
1814 atomic_add(size, &sk->sk_wmem_alloc);
1815
1816 if (newskb)
1817 __skb_queue_tail(&other->sk_receive_queue, newskb);
1818
1819 unix_state_unlock(other);
1820 mutex_unlock(&unix_sk(other)->readlock);
1821
1822 other->sk_data_ready(other);
1823
1824 return size;
1825
1826 err_state_unlock:
1827 unix_state_unlock(other);
1828 err_unlock:
1829 mutex_unlock(&unix_sk(other)->readlock);
1830 err:
1831 kfree_skb(newskb);
1832 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1833 send_sig(SIGPIPE, current, 0);
1834 return err;
1835 }
1836
1837 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
1838 size_t len)
1839 {
1840 int err;
1841 struct sock *sk = sock->sk;
1842
1843 err = sock_error(sk);
1844 if (err)
1845 return err;
1846
1847 if (sk->sk_state != TCP_ESTABLISHED)
1848 return -ENOTCONN;
1849
1850 if (msg->msg_namelen)
1851 msg->msg_namelen = 0;
1852
1853 return unix_dgram_sendmsg(sock, msg, len);
1854 }
1855
1856 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
1857 size_t size, int flags)
1858 {
1859 struct sock *sk = sock->sk;
1860
1861 if (sk->sk_state != TCP_ESTABLISHED)
1862 return -ENOTCONN;
1863
1864 return unix_dgram_recvmsg(sock, msg, size, flags);
1865 }
1866
1867 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1868 {
1869 struct unix_sock *u = unix_sk(sk);
1870
1871 if (u->addr) {
1872 msg->msg_namelen = u->addr->len;
1873 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1874 }
1875 }
1876
1877 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1878 size_t size, int flags)
1879 {
1880 struct scm_cookie scm;
1881 struct sock *sk = sock->sk;
1882 struct unix_sock *u = unix_sk(sk);
1883 int noblock = flags & MSG_DONTWAIT;
1884 struct sk_buff *skb;
1885 int err;
1886 int peeked, skip;
1887
1888 err = -EOPNOTSUPP;
1889 if (flags&MSG_OOB)
1890 goto out;
1891
1892 err = mutex_lock_interruptible(&u->readlock);
1893 if (unlikely(err)) {
1894 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1895 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1896 */
1897 err = noblock ? -EAGAIN : -ERESTARTSYS;
1898 goto out;
1899 }
1900
1901 skip = sk_peek_offset(sk, flags);
1902
1903 skb = __skb_recv_datagram(sk, flags, &peeked, &skip, &err);
1904 if (!skb) {
1905 unix_state_lock(sk);
1906 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1907 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1908 (sk->sk_shutdown & RCV_SHUTDOWN))
1909 err = 0;
1910 unix_state_unlock(sk);
1911 goto out_unlock;
1912 }
1913
1914 wake_up_interruptible_sync_poll(&u->peer_wait,
1915 POLLOUT | POLLWRNORM | POLLWRBAND);
1916
1917 if (msg->msg_name)
1918 unix_copy_addr(msg, skb->sk);
1919
1920 if (size > skb->len - skip)
1921 size = skb->len - skip;
1922 else if (size < skb->len - skip)
1923 msg->msg_flags |= MSG_TRUNC;
1924
1925 err = skb_copy_datagram_msg(skb, skip, msg, size);
1926 if (err)
1927 goto out_free;
1928
1929 if (sock_flag(sk, SOCK_RCVTSTAMP))
1930 __sock_recv_timestamp(msg, sk, skb);
1931
1932 memset(&scm, 0, sizeof(scm));
1933
1934 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
1935 unix_set_secdata(&scm, skb);
1936
1937 if (!(flags & MSG_PEEK)) {
1938 if (UNIXCB(skb).fp)
1939 unix_detach_fds(&scm, skb);
1940
1941 sk_peek_offset_bwd(sk, skb->len);
1942 } else {
1943 /* It is questionable: on PEEK we could:
1944 - do not return fds - good, but too simple 8)
1945 - return fds, and do not return them on read (old strategy,
1946 apparently wrong)
1947 - clone fds (I chose it for now, it is the most universal
1948 solution)
1949
1950 POSIX 1003.1g does not actually define this clearly
1951 at all. POSIX 1003.1g doesn't define a lot of things
1952 clearly however!
1953
1954 */
1955
1956 sk_peek_offset_fwd(sk, size);
1957
1958 if (UNIXCB(skb).fp)
1959 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
1960 }
1961 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1962
1963 scm_recv(sock, msg, &scm, flags);
1964
1965 out_free:
1966 skb_free_datagram(sk, skb);
1967 out_unlock:
1968 mutex_unlock(&u->readlock);
1969 out:
1970 return err;
1971 }
1972
1973 /*
1974 * Sleep until more data has arrived. But check for races..
1975 */
1976 static long unix_stream_data_wait(struct sock *sk, long timeo,
1977 struct sk_buff *last, unsigned int last_len)
1978 {
1979 struct sk_buff *tail;
1980 DEFINE_WAIT(wait);
1981
1982 unix_state_lock(sk);
1983
1984 for (;;) {
1985 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1986
1987 tail = skb_peek_tail(&sk->sk_receive_queue);
1988 if (tail != last ||
1989 (tail && tail->len != last_len) ||
1990 sk->sk_err ||
1991 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1992 signal_pending(current) ||
1993 !timeo)
1994 break;
1995
1996 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1997 unix_state_unlock(sk);
1998 timeo = freezable_schedule_timeout(timeo);
1999 unix_state_lock(sk);
2000
2001 if (sock_flag(sk, SOCK_DEAD))
2002 break;
2003
2004 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2005 }
2006
2007 finish_wait(sk_sleep(sk), &wait);
2008 unix_state_unlock(sk);
2009 return timeo;
2010 }
2011
2012 static unsigned int unix_skb_len(const struct sk_buff *skb)
2013 {
2014 return skb->len - UNIXCB(skb).consumed;
2015 }
2016
2017 struct unix_stream_read_state {
2018 int (*recv_actor)(struct sk_buff *, int, int,
2019 struct unix_stream_read_state *);
2020 struct socket *socket;
2021 struct msghdr *msg;
2022 struct pipe_inode_info *pipe;
2023 size_t size;
2024 int flags;
2025 unsigned int splice_flags;
2026 };
2027
2028 static int unix_stream_read_generic(struct unix_stream_read_state *state)
2029 {
2030 struct scm_cookie scm;
2031 struct socket *sock = state->socket;
2032 struct sock *sk = sock->sk;
2033 struct unix_sock *u = unix_sk(sk);
2034 int copied = 0;
2035 int flags = state->flags;
2036 int noblock = flags & MSG_DONTWAIT;
2037 bool check_creds = false;
2038 int target;
2039 int err = 0;
2040 long timeo;
2041 int skip;
2042 size_t size = state->size;
2043 unsigned int last_len;
2044
2045 err = -EINVAL;
2046 if (sk->sk_state != TCP_ESTABLISHED)
2047 goto out;
2048
2049 err = -EOPNOTSUPP;
2050 if (flags & MSG_OOB)
2051 goto out;
2052
2053 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2054 timeo = sock_rcvtimeo(sk, noblock);
2055
2056 memset(&scm, 0, sizeof(scm));
2057
2058 /* Lock the socket to prevent queue disordering
2059 * while sleeps in memcpy_tomsg
2060 */
2061 err = mutex_lock_interruptible(&u->readlock);
2062 if (unlikely(err)) {
2063 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2064 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2065 */
2066 err = noblock ? -EAGAIN : -ERESTARTSYS;
2067 goto out;
2068 }
2069
2070 if (flags & MSG_PEEK)
2071 skip = sk_peek_offset(sk, flags);
2072 else
2073 skip = 0;
2074
2075 do {
2076 int chunk;
2077 bool drop_skb;
2078 struct sk_buff *skb, *last;
2079
2080 unix_state_lock(sk);
2081 if (sock_flag(sk, SOCK_DEAD)) {
2082 err = -ECONNRESET;
2083 goto unlock;
2084 }
2085 last = skb = skb_peek(&sk->sk_receive_queue);
2086 last_len = last ? last->len : 0;
2087 again:
2088 if (skb == NULL) {
2089 unix_sk(sk)->recursion_level = 0;
2090 if (copied >= target)
2091 goto unlock;
2092
2093 /*
2094 * POSIX 1003.1g mandates this order.
2095 */
2096
2097 err = sock_error(sk);
2098 if (err)
2099 goto unlock;
2100 if (sk->sk_shutdown & RCV_SHUTDOWN)
2101 goto unlock;
2102
2103 unix_state_unlock(sk);
2104 err = -EAGAIN;
2105 if (!timeo)
2106 break;
2107 mutex_unlock(&u->readlock);
2108
2109 timeo = unix_stream_data_wait(sk, timeo, last,
2110 last_len);
2111
2112 if (signal_pending(current) ||
2113 mutex_lock_interruptible(&u->readlock)) {
2114 err = sock_intr_errno(timeo);
2115 goto out;
2116 }
2117
2118 continue;
2119 unlock:
2120 unix_state_unlock(sk);
2121 break;
2122 }
2123
2124 while (skip >= unix_skb_len(skb)) {
2125 skip -= unix_skb_len(skb);
2126 last = skb;
2127 last_len = skb->len;
2128 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2129 if (!skb)
2130 goto again;
2131 }
2132
2133 unix_state_unlock(sk);
2134
2135 if (check_creds) {
2136 /* Never glue messages from different writers */
2137 if ((UNIXCB(skb).pid != scm.pid) ||
2138 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2139 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2140 !unix_secdata_eq(&scm, skb))
2141 break;
2142 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2143 /* Copy credentials */
2144 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2145 unix_set_secdata(&scm, skb);
2146 check_creds = true;
2147 }
2148
2149 /* Copy address just once */
2150 if (state->msg && state->msg->msg_name) {
2151 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2152 state->msg->msg_name);
2153 unix_copy_addr(state->msg, skb->sk);
2154 sunaddr = NULL;
2155 }
2156
2157 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2158 skb_get(skb);
2159 chunk = state->recv_actor(skb, skip, chunk, state);
2160 drop_skb = !unix_skb_len(skb);
2161 /* skb is only safe to use if !drop_skb */
2162 consume_skb(skb);
2163 if (chunk < 0) {
2164 if (copied == 0)
2165 copied = -EFAULT;
2166 break;
2167 }
2168 copied += chunk;
2169 size -= chunk;
2170
2171 if (drop_skb) {
2172 /* the skb was touched by a concurrent reader;
2173 * we should not expect anything from this skb
2174 * anymore and assume it invalid - we can be
2175 * sure it was dropped from the socket queue
2176 *
2177 * let's report a short read
2178 */
2179 err = 0;
2180 break;
2181 }
2182
2183 /* Mark read part of skb as used */
2184 if (!(flags & MSG_PEEK)) {
2185 UNIXCB(skb).consumed += chunk;
2186
2187 sk_peek_offset_bwd(sk, chunk);
2188
2189 if (UNIXCB(skb).fp)
2190 unix_detach_fds(&scm, skb);
2191
2192 if (unix_skb_len(skb))
2193 break;
2194
2195 skb_unlink(skb, &sk->sk_receive_queue);
2196 consume_skb(skb);
2197
2198 if (scm.fp)
2199 break;
2200 } else {
2201 /* It is questionable, see note in unix_dgram_recvmsg.
2202 */
2203 if (UNIXCB(skb).fp)
2204 scm.fp = scm_fp_dup(UNIXCB(skb).fp);
2205
2206 sk_peek_offset_fwd(sk, chunk);
2207
2208 if (UNIXCB(skb).fp)
2209 break;
2210
2211 skip = 0;
2212 last = skb;
2213 last_len = skb->len;
2214 unix_state_lock(sk);
2215 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2216 if (skb)
2217 goto again;
2218 unix_state_unlock(sk);
2219 break;
2220 }
2221 } while (size);
2222
2223 mutex_unlock(&u->readlock);
2224 if (state->msg)
2225 scm_recv(sock, state->msg, &scm, flags);
2226 else
2227 scm_destroy(&scm);
2228 out:
2229 return copied ? : err;
2230 }
2231
2232 static int unix_stream_read_actor(struct sk_buff *skb,
2233 int skip, int chunk,
2234 struct unix_stream_read_state *state)
2235 {
2236 int ret;
2237
2238 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2239 state->msg, chunk);
2240 return ret ?: chunk;
2241 }
2242
2243 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2244 size_t size, int flags)
2245 {
2246 struct unix_stream_read_state state = {
2247 .recv_actor = unix_stream_read_actor,
2248 .socket = sock,
2249 .msg = msg,
2250 .size = size,
2251 .flags = flags
2252 };
2253
2254 return unix_stream_read_generic(&state);
2255 }
2256
2257 static ssize_t skb_unix_socket_splice(struct sock *sk,
2258 struct pipe_inode_info *pipe,
2259 struct splice_pipe_desc *spd)
2260 {
2261 int ret;
2262 struct unix_sock *u = unix_sk(sk);
2263
2264 mutex_unlock(&u->readlock);
2265 ret = splice_to_pipe(pipe, spd);
2266 mutex_lock(&u->readlock);
2267
2268 return ret;
2269 }
2270
2271 static int unix_stream_splice_actor(struct sk_buff *skb,
2272 int skip, int chunk,
2273 struct unix_stream_read_state *state)
2274 {
2275 return skb_splice_bits(skb, state->socket->sk,
2276 UNIXCB(skb).consumed + skip,
2277 state->pipe, chunk, state->splice_flags,
2278 skb_unix_socket_splice);
2279 }
2280
2281 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2282 struct pipe_inode_info *pipe,
2283 size_t size, unsigned int flags)
2284 {
2285 struct unix_stream_read_state state = {
2286 .recv_actor = unix_stream_splice_actor,
2287 .socket = sock,
2288 .pipe = pipe,
2289 .size = size,
2290 .splice_flags = flags,
2291 };
2292
2293 if (unlikely(*ppos))
2294 return -ESPIPE;
2295
2296 if (sock->file->f_flags & O_NONBLOCK ||
2297 flags & SPLICE_F_NONBLOCK)
2298 state.flags = MSG_DONTWAIT;
2299
2300 return unix_stream_read_generic(&state);
2301 }
2302
2303 static int unix_shutdown(struct socket *sock, int mode)
2304 {
2305 struct sock *sk = sock->sk;
2306 struct sock *other;
2307
2308 if (mode < SHUT_RD || mode > SHUT_RDWR)
2309 return -EINVAL;
2310 /* This maps:
2311 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2312 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2313 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
2314 */
2315 ++mode;
2316
2317 unix_state_lock(sk);
2318 sk->sk_shutdown |= mode;
2319 other = unix_peer(sk);
2320 if (other)
2321 sock_hold(other);
2322 unix_state_unlock(sk);
2323 sk->sk_state_change(sk);
2324
2325 if (other &&
2326 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2327
2328 int peer_mode = 0;
2329
2330 if (mode&RCV_SHUTDOWN)
2331 peer_mode |= SEND_SHUTDOWN;
2332 if (mode&SEND_SHUTDOWN)
2333 peer_mode |= RCV_SHUTDOWN;
2334 unix_state_lock(other);
2335 other->sk_shutdown |= peer_mode;
2336 unix_state_unlock(other);
2337 other->sk_state_change(other);
2338 if (peer_mode == SHUTDOWN_MASK)
2339 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2340 else if (peer_mode & RCV_SHUTDOWN)
2341 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2342 }
2343 if (other)
2344 sock_put(other);
2345
2346 return 0;
2347 }
2348
2349 long unix_inq_len(struct sock *sk)
2350 {
2351 struct sk_buff *skb;
2352 long amount = 0;
2353
2354 if (sk->sk_state == TCP_LISTEN)
2355 return -EINVAL;
2356
2357 spin_lock(&sk->sk_receive_queue.lock);
2358 if (sk->sk_type == SOCK_STREAM ||
2359 sk->sk_type == SOCK_SEQPACKET) {
2360 skb_queue_walk(&sk->sk_receive_queue, skb)
2361 amount += unix_skb_len(skb);
2362 } else {
2363 skb = skb_peek(&sk->sk_receive_queue);
2364 if (skb)
2365 amount = skb->len;
2366 }
2367 spin_unlock(&sk->sk_receive_queue.lock);
2368
2369 return amount;
2370 }
2371 EXPORT_SYMBOL_GPL(unix_inq_len);
2372
2373 long unix_outq_len(struct sock *sk)
2374 {
2375 return sk_wmem_alloc_get(sk);
2376 }
2377 EXPORT_SYMBOL_GPL(unix_outq_len);
2378
2379 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2380 {
2381 struct sock *sk = sock->sk;
2382 long amount = 0;
2383 int err;
2384
2385 switch (cmd) {
2386 case SIOCOUTQ:
2387 amount = unix_outq_len(sk);
2388 err = put_user(amount, (int __user *)arg);
2389 break;
2390 case SIOCINQ:
2391 amount = unix_inq_len(sk);
2392 if (amount < 0)
2393 err = amount;
2394 else
2395 err = put_user(amount, (int __user *)arg);
2396 break;
2397 default:
2398 err = -ENOIOCTLCMD;
2399 break;
2400 }
2401 return err;
2402 }
2403
2404 static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2405 {
2406 struct sock *sk = sock->sk;
2407 unsigned int mask;
2408
2409 sock_poll_wait(file, sk_sleep(sk), wait);
2410 mask = 0;
2411
2412 /* exceptional events? */
2413 if (sk->sk_err)
2414 mask |= POLLERR;
2415 if (sk->sk_shutdown == SHUTDOWN_MASK)
2416 mask |= POLLHUP;
2417 if (sk->sk_shutdown & RCV_SHUTDOWN)
2418 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2419
2420 /* readable? */
2421 if (!skb_queue_empty(&sk->sk_receive_queue))
2422 mask |= POLLIN | POLLRDNORM;
2423
2424 /* Connection-based need to check for termination and startup */
2425 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2426 sk->sk_state == TCP_CLOSE)
2427 mask |= POLLHUP;
2428
2429 /*
2430 * we set writable also when the other side has shut down the
2431 * connection. This prevents stuck sockets.
2432 */
2433 if (unix_writable(sk))
2434 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2435
2436 return mask;
2437 }
2438
2439 static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2440 poll_table *wait)
2441 {
2442 struct sock *sk = sock->sk, *other;
2443 unsigned int mask, writable;
2444
2445 sock_poll_wait(file, sk_sleep(sk), wait);
2446 mask = 0;
2447
2448 /* exceptional events? */
2449 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2450 mask |= POLLERR |
2451 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2452
2453 if (sk->sk_shutdown & RCV_SHUTDOWN)
2454 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2455 if (sk->sk_shutdown == SHUTDOWN_MASK)
2456 mask |= POLLHUP;
2457
2458 /* readable? */
2459 if (!skb_queue_empty(&sk->sk_receive_queue))
2460 mask |= POLLIN | POLLRDNORM;
2461
2462 /* Connection-based need to check for termination and startup */
2463 if (sk->sk_type == SOCK_SEQPACKET) {
2464 if (sk->sk_state == TCP_CLOSE)
2465 mask |= POLLHUP;
2466 /* connection hasn't started yet? */
2467 if (sk->sk_state == TCP_SYN_SENT)
2468 return mask;
2469 }
2470
2471 /* No write status requested, avoid expensive OUT tests. */
2472 if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
2473 return mask;
2474
2475 writable = unix_writable(sk);
2476 other = unix_peer_get(sk);
2477 if (other) {
2478 if (unix_peer(other) != sk) {
2479 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2480 if (unix_recvq_full(other))
2481 writable = 0;
2482 }
2483 sock_put(other);
2484 }
2485
2486 if (writable)
2487 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2488 else
2489 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2490
2491 return mask;
2492 }
2493
2494 #ifdef CONFIG_PROC_FS
2495
2496 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
2497
2498 #define get_bucket(x) ((x) >> BUCKET_SPACE)
2499 #define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
2500 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
2501
2502 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
2503 {
2504 unsigned long offset = get_offset(*pos);
2505 unsigned long bucket = get_bucket(*pos);
2506 struct sock *sk;
2507 unsigned long count = 0;
2508
2509 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
2510 if (sock_net(sk) != seq_file_net(seq))
2511 continue;
2512 if (++count == offset)
2513 break;
2514 }
2515
2516 return sk;
2517 }
2518
2519 static struct sock *unix_next_socket(struct seq_file *seq,
2520 struct sock *sk,
2521 loff_t *pos)
2522 {
2523 unsigned long bucket;
2524
2525 while (sk > (struct sock *)SEQ_START_TOKEN) {
2526 sk = sk_next(sk);
2527 if (!sk)
2528 goto next_bucket;
2529 if (sock_net(sk) == seq_file_net(seq))
2530 return sk;
2531 }
2532
2533 do {
2534 sk = unix_from_bucket(seq, pos);
2535 if (sk)
2536 return sk;
2537
2538 next_bucket:
2539 bucket = get_bucket(*pos) + 1;
2540 *pos = set_bucket_offset(bucket, 1);
2541 } while (bucket < ARRAY_SIZE(unix_socket_table));
2542
2543 return NULL;
2544 }
2545
2546 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2547 __acquires(unix_table_lock)
2548 {
2549 spin_lock(&unix_table_lock);
2550
2551 if (!*pos)
2552 return SEQ_START_TOKEN;
2553
2554 if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
2555 return NULL;
2556
2557 return unix_next_socket(seq, NULL, pos);
2558 }
2559
2560 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2561 {
2562 ++*pos;
2563 return unix_next_socket(seq, v, pos);
2564 }
2565
2566 static void unix_seq_stop(struct seq_file *seq, void *v)
2567 __releases(unix_table_lock)
2568 {
2569 spin_unlock(&unix_table_lock);
2570 }
2571
2572 static int unix_seq_show(struct seq_file *seq, void *v)
2573 {
2574
2575 if (v == SEQ_START_TOKEN)
2576 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2577 "Inode Path\n");
2578 else {
2579 struct sock *s = v;
2580 struct unix_sock *u = unix_sk(s);
2581 unix_state_lock(s);
2582
2583 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2584 s,
2585 atomic_read(&s->sk_refcnt),
2586 0,
2587 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2588 s->sk_type,
2589 s->sk_socket ?
2590 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2591 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2592 sock_i_ino(s));
2593
2594 if (u->addr) {
2595 int i, len;
2596 seq_putc(seq, ' ');
2597
2598 i = 0;
2599 len = u->addr->len - sizeof(short);
2600 if (!UNIX_ABSTRACT(s))
2601 len--;
2602 else {
2603 seq_putc(seq, '@');
2604 i++;
2605 }
2606 for ( ; i < len; i++)
2607 seq_putc(seq, u->addr->name->sun_path[i]);
2608 }
2609 unix_state_unlock(s);
2610 seq_putc(seq, '\n');
2611 }
2612
2613 return 0;
2614 }
2615
2616 static const struct seq_operations unix_seq_ops = {
2617 .start = unix_seq_start,
2618 .next = unix_seq_next,
2619 .stop = unix_seq_stop,
2620 .show = unix_seq_show,
2621 };
2622
2623 static int unix_seq_open(struct inode *inode, struct file *file)
2624 {
2625 return seq_open_net(inode, file, &unix_seq_ops,
2626 sizeof(struct seq_net_private));
2627 }
2628
2629 static const struct file_operations unix_seq_fops = {
2630 .owner = THIS_MODULE,
2631 .open = unix_seq_open,
2632 .read = seq_read,
2633 .llseek = seq_lseek,
2634 .release = seq_release_net,
2635 };
2636
2637 #endif
2638
2639 static const struct net_proto_family unix_family_ops = {
2640 .family = PF_UNIX,
2641 .create = unix_create,
2642 .owner = THIS_MODULE,
2643 };
2644
2645
2646 static int __net_init unix_net_init(struct net *net)
2647 {
2648 int error = -ENOMEM;
2649
2650 net->unx.sysctl_max_dgram_qlen = 10;
2651 if (unix_sysctl_register(net))
2652 goto out;
2653
2654 #ifdef CONFIG_PROC_FS
2655 if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
2656 unix_sysctl_unregister(net);
2657 goto out;
2658 }
2659 #endif
2660 error = 0;
2661 out:
2662 return error;
2663 }
2664
2665 static void __net_exit unix_net_exit(struct net *net)
2666 {
2667 unix_sysctl_unregister(net);
2668 remove_proc_entry("unix", net->proc_net);
2669 }
2670
2671 static struct pernet_operations unix_net_ops = {
2672 .init = unix_net_init,
2673 .exit = unix_net_exit,
2674 };
2675
2676 static int __init af_unix_init(void)
2677 {
2678 int rc = -1;
2679
2680 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
2681
2682 rc = proto_register(&unix_proto, 1);
2683 if (rc != 0) {
2684 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
2685 goto out;
2686 }
2687
2688 sock_register(&unix_family_ops);
2689 register_pernet_subsys(&unix_net_ops);
2690 out:
2691 return rc;
2692 }
2693
2694 static void __exit af_unix_exit(void)
2695 {
2696 sock_unregister(PF_UNIX);
2697 proto_unregister(&unix_proto);
2698 unregister_pernet_subsys(&unix_net_ops);
2699 }
2700
2701 /* Earlier than device_initcall() so that other drivers invoking
2702 request_module() don't end up in a loop when modprobe tries
2703 to use a UNIX socket. But later than subsys_initcall() because
2704 we depend on stuff initialised there */
2705 fs_initcall(af_unix_init);
2706 module_exit(af_unix_exit);
2707
2708 MODULE_LICENSE("GPL");
2709 MODULE_ALIAS_NETPROTO(PF_UNIX);
This page took 0.188383 seconds and 6 git commands to generate.