Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "port.h"
39
40 #include <linux/export.h>
41
42 #define SS_LISTENING -1 /* socket is listening */
43 #define SS_READY -2 /* socket is connectionless */
44
45 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
46
47 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
48 static void tipc_data_ready(struct sock *sk, int len);
49 static void tipc_write_space(struct sock *sk);
50 static int tipc_release(struct socket *sock);
51 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
52
53 static const struct proto_ops packet_ops;
54 static const struct proto_ops stream_ops;
55 static const struct proto_ops msg_ops;
56
57 static struct proto tipc_proto;
58 static struct proto tipc_proto_kern;
59
60 /*
61 * Revised TIPC socket locking policy:
62 *
63 * Most socket operations take the standard socket lock when they start
64 * and hold it until they finish (or until they need to sleep). Acquiring
65 * this lock grants the owner exclusive access to the fields of the socket
66 * data structures, with the exception of the backlog queue. A few socket
67 * operations can be done without taking the socket lock because they only
68 * read socket information that never changes during the life of the socket.
69 *
70 * Socket operations may acquire the lock for the associated TIPC port if they
71 * need to perform an operation on the port. If any routine needs to acquire
72 * both the socket lock and the port lock it must take the socket lock first
73 * to avoid the risk of deadlock.
74 *
75 * The dispatcher handling incoming messages cannot grab the socket lock in
76 * the standard fashion, since invoked it runs at the BH level and cannot block.
77 * Instead, it checks to see if the socket lock is currently owned by someone,
78 * and either handles the message itself or adds it to the socket's backlog
79 * queue; in the latter case the queued message is processed once the process
80 * owning the socket lock releases it.
81 *
82 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
83 * the problem of a blocked socket operation preventing any other operations
84 * from occurring. However, applications must be careful if they have
85 * multiple threads trying to send (or receive) on the same socket, as these
86 * operations might interfere with each other. For example, doing a connect
87 * and a receive at the same time might allow the receive to consume the
88 * ACK message meant for the connect. While additional work could be done
89 * to try and overcome this, it doesn't seem to be worthwhile at the present.
90 *
91 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
92 * that another operation that must be performed in a non-blocking manner is
93 * not delayed for very long because the lock has already been taken.
94 *
95 * NOTE: This code assumes that certain fields of a port/socket pair are
96 * constant over its lifetime; such fields can be examined without taking
97 * the socket lock and/or port lock, and do not need to be re-read even
98 * after resuming processing after waiting. These fields include:
99 * - socket type
100 * - pointer to socket sk structure (aka tipc_sock structure)
101 * - pointer to port structure
102 * - port reference
103 */
104
105 #include "socket.h"
106
107 /**
108 * advance_rx_queue - discard first buffer in socket receive queue
109 *
110 * Caller must hold socket lock
111 */
112 static void advance_rx_queue(struct sock *sk)
113 {
114 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
115 }
116
117 /**
118 * reject_rx_queue - reject all buffers in socket receive queue
119 *
120 * Caller must hold socket lock
121 */
122 static void reject_rx_queue(struct sock *sk)
123 {
124 struct sk_buff *buf;
125
126 while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
127 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
128 }
129
130 /**
131 * tipc_sk_create - create a TIPC socket
132 * @net: network namespace (must be default network)
133 * @sock: pre-allocated socket structure
134 * @protocol: protocol indicator (must be 0)
135 * @kern: caused by kernel or by userspace?
136 *
137 * This routine creates additional data structures used by the TIPC socket,
138 * initializes them, and links them together.
139 *
140 * Returns 0 on success, errno otherwise
141 */
142 static int tipc_sk_create(struct net *net, struct socket *sock,
143 int protocol, int kern)
144 {
145 const struct proto_ops *ops;
146 socket_state state;
147 struct sock *sk;
148 struct tipc_sock *tsk;
149 struct tipc_port *port;
150 u32 ref;
151
152 /* Validate arguments */
153 if (unlikely(protocol != 0))
154 return -EPROTONOSUPPORT;
155
156 switch (sock->type) {
157 case SOCK_STREAM:
158 ops = &stream_ops;
159 state = SS_UNCONNECTED;
160 break;
161 case SOCK_SEQPACKET:
162 ops = &packet_ops;
163 state = SS_UNCONNECTED;
164 break;
165 case SOCK_DGRAM:
166 case SOCK_RDM:
167 ops = &msg_ops;
168 state = SS_READY;
169 break;
170 default:
171 return -EPROTOTYPE;
172 }
173
174 /* Allocate socket's protocol area */
175 if (!kern)
176 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
177 else
178 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
179
180 if (sk == NULL)
181 return -ENOMEM;
182
183 tsk = tipc_sk(sk);
184 port = &tsk->port;
185
186 ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE);
187 if (!ref) {
188 pr_warn("Socket registration failed, ref. table exhausted\n");
189 sk_free(sk);
190 return -ENOMEM;
191 }
192
193 /* Finish initializing socket data structures */
194 sock->ops = ops;
195 sock->state = state;
196
197 sock_init_data(sock, sk);
198 sk->sk_backlog_rcv = backlog_rcv;
199 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
200 sk->sk_data_ready = tipc_data_ready;
201 sk->sk_write_space = tipc_write_space;
202 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
203 tipc_port_unlock(port);
204
205 if (sock->state == SS_READY) {
206 tipc_port_set_unreturnable(port, true);
207 if (sock->type == SOCK_DGRAM)
208 tipc_port_set_unreliable(port, true);
209 }
210 return 0;
211 }
212
213 /**
214 * tipc_sock_create_local - create TIPC socket from inside TIPC module
215 * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
216 *
217 * We cannot use sock_creat_kern here because it bumps module user count.
218 * Since socket owner and creator is the same module we must make sure
219 * that module count remains zero for module local sockets, otherwise
220 * we cannot do rmmod.
221 *
222 * Returns 0 on success, errno otherwise
223 */
224 int tipc_sock_create_local(int type, struct socket **res)
225 {
226 int rc;
227
228 rc = sock_create_lite(AF_TIPC, type, 0, res);
229 if (rc < 0) {
230 pr_err("Failed to create kernel socket\n");
231 return rc;
232 }
233 tipc_sk_create(&init_net, *res, 0, 1);
234
235 return 0;
236 }
237
238 /**
239 * tipc_sock_release_local - release socket created by tipc_sock_create_local
240 * @sock: the socket to be released.
241 *
242 * Module reference count is not incremented when such sockets are created,
243 * so we must keep it from being decremented when they are released.
244 */
245 void tipc_sock_release_local(struct socket *sock)
246 {
247 tipc_release(sock);
248 sock->ops = NULL;
249 sock_release(sock);
250 }
251
252 /**
253 * tipc_sock_accept_local - accept a connection on a socket created
254 * with tipc_sock_create_local. Use this function to avoid that
255 * module reference count is inadvertently incremented.
256 *
257 * @sock: the accepting socket
258 * @newsock: reference to the new socket to be created
259 * @flags: socket flags
260 */
261
262 int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
263 int flags)
264 {
265 struct sock *sk = sock->sk;
266 int ret;
267
268 ret = sock_create_lite(sk->sk_family, sk->sk_type,
269 sk->sk_protocol, newsock);
270 if (ret < 0)
271 return ret;
272
273 ret = tipc_accept(sock, *newsock, flags);
274 if (ret < 0) {
275 sock_release(*newsock);
276 return ret;
277 }
278 (*newsock)->ops = sock->ops;
279 return ret;
280 }
281
282 /**
283 * tipc_release - destroy a TIPC socket
284 * @sock: socket to destroy
285 *
286 * This routine cleans up any messages that are still queued on the socket.
287 * For DGRAM and RDM socket types, all queued messages are rejected.
288 * For SEQPACKET and STREAM socket types, the first message is rejected
289 * and any others are discarded. (If the first message on a STREAM socket
290 * is partially-read, it is discarded and the next one is rejected instead.)
291 *
292 * NOTE: Rejected messages are not necessarily returned to the sender! They
293 * are returned or discarded according to the "destination droppable" setting
294 * specified for the message by the sender.
295 *
296 * Returns 0 on success, errno otherwise
297 */
298 static int tipc_release(struct socket *sock)
299 {
300 struct sock *sk = sock->sk;
301 struct tipc_sock *tsk;
302 struct tipc_port *port;
303 struct sk_buff *buf;
304 int res;
305
306 /*
307 * Exit if socket isn't fully initialized (occurs when a failed accept()
308 * releases a pre-allocated child socket that was never used)
309 */
310 if (sk == NULL)
311 return 0;
312
313 tsk = tipc_sk(sk);
314 port = &tsk->port;
315 lock_sock(sk);
316
317 /*
318 * Reject all unreceived messages, except on an active connection
319 * (which disconnects locally & sends a 'FIN+' to peer)
320 */
321 while (sock->state != SS_DISCONNECTING) {
322 buf = __skb_dequeue(&sk->sk_receive_queue);
323 if (buf == NULL)
324 break;
325 if (TIPC_SKB_CB(buf)->handle != NULL)
326 kfree_skb(buf);
327 else {
328 if ((sock->state == SS_CONNECTING) ||
329 (sock->state == SS_CONNECTED)) {
330 sock->state = SS_DISCONNECTING;
331 tipc_port_disconnect(port->ref);
332 }
333 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
334 }
335 }
336
337 /* Destroy TIPC port; also disconnects an active connection and
338 * sends a 'FIN-' to peer.
339 */
340 tipc_port_destroy(port);
341
342 /* Discard any remaining (connection-based) messages in receive queue */
343 __skb_queue_purge(&sk->sk_receive_queue);
344
345 /* Reject any messages that accumulated in backlog queue */
346 sock->state = SS_DISCONNECTING;
347 release_sock(sk);
348
349 sock_put(sk);
350 sock->sk = NULL;
351
352 return res;
353 }
354
355 /**
356 * tipc_bind - associate or disassocate TIPC name(s) with a socket
357 * @sock: socket structure
358 * @uaddr: socket address describing name(s) and desired operation
359 * @uaddr_len: size of socket address data structure
360 *
361 * Name and name sequence binding is indicated using a positive scope value;
362 * a negative scope value unbinds the specified name. Specifying no name
363 * (i.e. a socket address length of 0) unbinds all names from the socket.
364 *
365 * Returns 0 on success, errno otherwise
366 *
367 * NOTE: This routine doesn't need to take the socket lock since it doesn't
368 * access any non-constant socket information.
369 */
370 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
371 int uaddr_len)
372 {
373 struct sock *sk = sock->sk;
374 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
375 struct tipc_sock *tsk = tipc_sk(sk);
376 int res = -EINVAL;
377
378 lock_sock(sk);
379 if (unlikely(!uaddr_len)) {
380 res = tipc_withdraw(&tsk->port, 0, NULL);
381 goto exit;
382 }
383
384 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
385 res = -EINVAL;
386 goto exit;
387 }
388 if (addr->family != AF_TIPC) {
389 res = -EAFNOSUPPORT;
390 goto exit;
391 }
392
393 if (addr->addrtype == TIPC_ADDR_NAME)
394 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
395 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
396 res = -EAFNOSUPPORT;
397 goto exit;
398 }
399
400 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
401 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
402 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
403 res = -EACCES;
404 goto exit;
405 }
406
407 res = (addr->scope > 0) ?
408 tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) :
409 tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq);
410 exit:
411 release_sock(sk);
412 return res;
413 }
414
415 /**
416 * tipc_getname - get port ID of socket or peer socket
417 * @sock: socket structure
418 * @uaddr: area for returned socket address
419 * @uaddr_len: area for returned length of socket address
420 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
421 *
422 * Returns 0 on success, errno otherwise
423 *
424 * NOTE: This routine doesn't need to take the socket lock since it only
425 * accesses socket information that is unchanging (or which changes in
426 * a completely predictable manner).
427 */
428 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
429 int *uaddr_len, int peer)
430 {
431 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
432 struct tipc_sock *tsk = tipc_sk(sock->sk);
433
434 memset(addr, 0, sizeof(*addr));
435 if (peer) {
436 if ((sock->state != SS_CONNECTED) &&
437 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
438 return -ENOTCONN;
439 addr->addr.id.ref = tipc_port_peerport(&tsk->port);
440 addr->addr.id.node = tipc_port_peernode(&tsk->port);
441 } else {
442 addr->addr.id.ref = tsk->port.ref;
443 addr->addr.id.node = tipc_own_addr;
444 }
445
446 *uaddr_len = sizeof(*addr);
447 addr->addrtype = TIPC_ADDR_ID;
448 addr->family = AF_TIPC;
449 addr->scope = 0;
450 addr->addr.name.domain = 0;
451
452 return 0;
453 }
454
455 /**
456 * tipc_poll - read and possibly block on pollmask
457 * @file: file structure associated with the socket
458 * @sock: socket for which to calculate the poll bits
459 * @wait: ???
460 *
461 * Returns pollmask value
462 *
463 * COMMENTARY:
464 * It appears that the usual socket locking mechanisms are not useful here
465 * since the pollmask info is potentially out-of-date the moment this routine
466 * exits. TCP and other protocols seem to rely on higher level poll routines
467 * to handle any preventable race conditions, so TIPC will do the same ...
468 *
469 * TIPC sets the returned events as follows:
470 *
471 * socket state flags set
472 * ------------ ---------
473 * unconnected no read flags
474 * POLLOUT if port is not congested
475 *
476 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
477 * no write flags
478 *
479 * connected POLLIN/POLLRDNORM if data in rx queue
480 * POLLOUT if port is not congested
481 *
482 * disconnecting POLLIN/POLLRDNORM/POLLHUP
483 * no write flags
484 *
485 * listening POLLIN if SYN in rx queue
486 * no write flags
487 *
488 * ready POLLIN/POLLRDNORM if data in rx queue
489 * [connectionless] POLLOUT (since port cannot be congested)
490 *
491 * IMPORTANT: The fact that a read or write operation is indicated does NOT
492 * imply that the operation will succeed, merely that it should be performed
493 * and will not block.
494 */
495 static unsigned int tipc_poll(struct file *file, struct socket *sock,
496 poll_table *wait)
497 {
498 struct sock *sk = sock->sk;
499 struct tipc_sock *tsk = tipc_sk(sk);
500 u32 mask = 0;
501
502 sock_poll_wait(file, sk_sleep(sk), wait);
503
504 switch ((int)sock->state) {
505 case SS_UNCONNECTED:
506 if (!tsk->port.congested)
507 mask |= POLLOUT;
508 break;
509 case SS_READY:
510 case SS_CONNECTED:
511 if (!tsk->port.congested)
512 mask |= POLLOUT;
513 /* fall thru' */
514 case SS_CONNECTING:
515 case SS_LISTENING:
516 if (!skb_queue_empty(&sk->sk_receive_queue))
517 mask |= (POLLIN | POLLRDNORM);
518 break;
519 case SS_DISCONNECTING:
520 mask = (POLLIN | POLLRDNORM | POLLHUP);
521 break;
522 }
523
524 return mask;
525 }
526
527 /**
528 * dest_name_check - verify user is permitted to send to specified port name
529 * @dest: destination address
530 * @m: descriptor for message to be sent
531 *
532 * Prevents restricted configuration commands from being issued by
533 * unauthorized users.
534 *
535 * Returns 0 if permission is granted, otherwise errno
536 */
537 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
538 {
539 struct tipc_cfg_msg_hdr hdr;
540
541 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
542 return 0;
543 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
544 return 0;
545 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
546 return -EACCES;
547
548 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
549 return -EMSGSIZE;
550 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
551 return -EFAULT;
552 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
553 return -EACCES;
554
555 return 0;
556 }
557
558 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
559 {
560 struct sock *sk = sock->sk;
561 struct tipc_sock *tsk = tipc_sk(sk);
562 DEFINE_WAIT(wait);
563 int done;
564
565 do {
566 int err = sock_error(sk);
567 if (err)
568 return err;
569 if (sock->state == SS_DISCONNECTING)
570 return -EPIPE;
571 if (!*timeo_p)
572 return -EAGAIN;
573 if (signal_pending(current))
574 return sock_intr_errno(*timeo_p);
575
576 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
577 done = sk_wait_event(sk, timeo_p, !tsk->port.congested);
578 finish_wait(sk_sleep(sk), &wait);
579 } while (!done);
580 return 0;
581 }
582
583
584 /**
585 * tipc_sendmsg - send message in connectionless manner
586 * @iocb: if NULL, indicates that socket lock is already held
587 * @sock: socket structure
588 * @m: message to send
589 * @total_len: length of message
590 *
591 * Message must have an destination specified explicitly.
592 * Used for SOCK_RDM and SOCK_DGRAM messages,
593 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
594 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
595 *
596 * Returns the number of bytes sent on success, or errno otherwise
597 */
598 static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
599 struct msghdr *m, size_t total_len)
600 {
601 struct sock *sk = sock->sk;
602 struct tipc_sock *tsk = tipc_sk(sk);
603 struct tipc_port *port = &tsk->port;
604 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
605 int needs_conn;
606 long timeo;
607 int res = -EINVAL;
608
609 if (unlikely(!dest))
610 return -EDESTADDRREQ;
611 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
612 (dest->family != AF_TIPC)))
613 return -EINVAL;
614 if (total_len > TIPC_MAX_USER_MSG_SIZE)
615 return -EMSGSIZE;
616
617 if (iocb)
618 lock_sock(sk);
619
620 needs_conn = (sock->state != SS_READY);
621 if (unlikely(needs_conn)) {
622 if (sock->state == SS_LISTENING) {
623 res = -EPIPE;
624 goto exit;
625 }
626 if (sock->state != SS_UNCONNECTED) {
627 res = -EISCONN;
628 goto exit;
629 }
630 if (tsk->port.published) {
631 res = -EOPNOTSUPP;
632 goto exit;
633 }
634 if (dest->addrtype == TIPC_ADDR_NAME) {
635 tsk->port.conn_type = dest->addr.name.name.type;
636 tsk->port.conn_instance = dest->addr.name.name.instance;
637 }
638
639 /* Abort any pending connection attempts (very unlikely) */
640 reject_rx_queue(sk);
641 }
642
643 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
644 do {
645 if (dest->addrtype == TIPC_ADDR_NAME) {
646 res = dest_name_check(dest, m);
647 if (res)
648 break;
649 res = tipc_send2name(port,
650 &dest->addr.name.name,
651 dest->addr.name.domain,
652 m->msg_iov,
653 total_len);
654 } else if (dest->addrtype == TIPC_ADDR_ID) {
655 res = tipc_send2port(port,
656 &dest->addr.id,
657 m->msg_iov,
658 total_len);
659 } else if (dest->addrtype == TIPC_ADDR_MCAST) {
660 if (needs_conn) {
661 res = -EOPNOTSUPP;
662 break;
663 }
664 res = dest_name_check(dest, m);
665 if (res)
666 break;
667 res = tipc_port_mcast_xmit(port,
668 &dest->addr.nameseq,
669 m->msg_iov,
670 total_len);
671 }
672 if (likely(res != -ELINKCONG)) {
673 if (needs_conn && (res >= 0))
674 sock->state = SS_CONNECTING;
675 break;
676 }
677 res = tipc_wait_for_sndmsg(sock, &timeo);
678 if (res)
679 break;
680 } while (1);
681
682 exit:
683 if (iocb)
684 release_sock(sk);
685 return res;
686 }
687
688 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
689 {
690 struct sock *sk = sock->sk;
691 struct tipc_sock *tsk = tipc_sk(sk);
692 struct tipc_port *port = &tsk->port;
693 DEFINE_WAIT(wait);
694 int done;
695
696 do {
697 int err = sock_error(sk);
698 if (err)
699 return err;
700 if (sock->state == SS_DISCONNECTING)
701 return -EPIPE;
702 else if (sock->state != SS_CONNECTED)
703 return -ENOTCONN;
704 if (!*timeo_p)
705 return -EAGAIN;
706 if (signal_pending(current))
707 return sock_intr_errno(*timeo_p);
708
709 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
710 done = sk_wait_event(sk, timeo_p,
711 (!port->congested || !port->connected));
712 finish_wait(sk_sleep(sk), &wait);
713 } while (!done);
714 return 0;
715 }
716
717 /**
718 * tipc_send_packet - send a connection-oriented message
719 * @iocb: if NULL, indicates that socket lock is already held
720 * @sock: socket structure
721 * @m: message to send
722 * @total_len: length of message
723 *
724 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
725 *
726 * Returns the number of bytes sent on success, or errno otherwise
727 */
728 static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
729 struct msghdr *m, size_t total_len)
730 {
731 struct sock *sk = sock->sk;
732 struct tipc_sock *tsk = tipc_sk(sk);
733 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
734 int res = -EINVAL;
735 long timeo;
736
737 /* Handle implied connection establishment */
738 if (unlikely(dest))
739 return tipc_sendmsg(iocb, sock, m, total_len);
740
741 if (total_len > TIPC_MAX_USER_MSG_SIZE)
742 return -EMSGSIZE;
743
744 if (iocb)
745 lock_sock(sk);
746
747 if (unlikely(sock->state != SS_CONNECTED)) {
748 if (sock->state == SS_DISCONNECTING)
749 res = -EPIPE;
750 else
751 res = -ENOTCONN;
752 goto exit;
753 }
754
755 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
756 do {
757 res = tipc_send(&tsk->port, m->msg_iov, total_len);
758 if (likely(res != -ELINKCONG))
759 break;
760 res = tipc_wait_for_sndpkt(sock, &timeo);
761 if (res)
762 break;
763 } while (1);
764 exit:
765 if (iocb)
766 release_sock(sk);
767 return res;
768 }
769
770 /**
771 * tipc_send_stream - send stream-oriented data
772 * @iocb: (unused)
773 * @sock: socket structure
774 * @m: data to send
775 * @total_len: total length of data to be sent
776 *
777 * Used for SOCK_STREAM data.
778 *
779 * Returns the number of bytes sent on success (or partial success),
780 * or errno if no data sent
781 */
782 static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
783 struct msghdr *m, size_t total_len)
784 {
785 struct sock *sk = sock->sk;
786 struct tipc_sock *tsk = tipc_sk(sk);
787 struct msghdr my_msg;
788 struct iovec my_iov;
789 struct iovec *curr_iov;
790 int curr_iovlen;
791 char __user *curr_start;
792 u32 hdr_size;
793 int curr_left;
794 int bytes_to_send;
795 int bytes_sent;
796 int res;
797
798 lock_sock(sk);
799
800 /* Handle special cases where there is no connection */
801 if (unlikely(sock->state != SS_CONNECTED)) {
802 if (sock->state == SS_UNCONNECTED)
803 res = tipc_send_packet(NULL, sock, m, total_len);
804 else
805 res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
806 goto exit;
807 }
808
809 if (unlikely(m->msg_name)) {
810 res = -EISCONN;
811 goto exit;
812 }
813
814 if (total_len > (unsigned int)INT_MAX) {
815 res = -EMSGSIZE;
816 goto exit;
817 }
818
819 /*
820 * Send each iovec entry using one or more messages
821 *
822 * Note: This algorithm is good for the most likely case
823 * (i.e. one large iovec entry), but could be improved to pass sets
824 * of small iovec entries into send_packet().
825 */
826 curr_iov = m->msg_iov;
827 curr_iovlen = m->msg_iovlen;
828 my_msg.msg_iov = &my_iov;
829 my_msg.msg_iovlen = 1;
830 my_msg.msg_flags = m->msg_flags;
831 my_msg.msg_name = NULL;
832 bytes_sent = 0;
833
834 hdr_size = msg_hdr_sz(&tsk->port.phdr);
835
836 while (curr_iovlen--) {
837 curr_start = curr_iov->iov_base;
838 curr_left = curr_iov->iov_len;
839
840 while (curr_left) {
841 bytes_to_send = tsk->port.max_pkt - hdr_size;
842 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
843 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
844 if (curr_left < bytes_to_send)
845 bytes_to_send = curr_left;
846 my_iov.iov_base = curr_start;
847 my_iov.iov_len = bytes_to_send;
848 res = tipc_send_packet(NULL, sock, &my_msg,
849 bytes_to_send);
850 if (res < 0) {
851 if (bytes_sent)
852 res = bytes_sent;
853 goto exit;
854 }
855 curr_left -= bytes_to_send;
856 curr_start += bytes_to_send;
857 bytes_sent += bytes_to_send;
858 }
859
860 curr_iov++;
861 }
862 res = bytes_sent;
863 exit:
864 release_sock(sk);
865 return res;
866 }
867
868 /**
869 * auto_connect - complete connection setup to a remote port
870 * @tsk: tipc socket structure
871 * @msg: peer's response message
872 *
873 * Returns 0 on success, errno otherwise
874 */
875 static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg)
876 {
877 struct tipc_port *port = &tsk->port;
878 struct socket *sock = tsk->sk.sk_socket;
879 struct tipc_portid peer;
880
881 peer.ref = msg_origport(msg);
882 peer.node = msg_orignode(msg);
883
884 __tipc_port_connect(port->ref, port, &peer);
885
886 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
887 return -EINVAL;
888 msg_set_importance(&port->phdr, (u32)msg_importance(msg));
889 sock->state = SS_CONNECTED;
890 return 0;
891 }
892
893 /**
894 * set_orig_addr - capture sender's address for received message
895 * @m: descriptor for message info
896 * @msg: received message header
897 *
898 * Note: Address is not captured if not requested by receiver.
899 */
900 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
901 {
902 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
903
904 if (addr) {
905 addr->family = AF_TIPC;
906 addr->addrtype = TIPC_ADDR_ID;
907 memset(&addr->addr, 0, sizeof(addr->addr));
908 addr->addr.id.ref = msg_origport(msg);
909 addr->addr.id.node = msg_orignode(msg);
910 addr->addr.name.domain = 0; /* could leave uninitialized */
911 addr->scope = 0; /* could leave uninitialized */
912 m->msg_namelen = sizeof(struct sockaddr_tipc);
913 }
914 }
915
916 /**
917 * anc_data_recv - optionally capture ancillary data for received message
918 * @m: descriptor for message info
919 * @msg: received message header
920 * @tport: TIPC port associated with message
921 *
922 * Note: Ancillary data is not captured if not requested by receiver.
923 *
924 * Returns 0 if successful, otherwise errno
925 */
926 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
927 struct tipc_port *tport)
928 {
929 u32 anc_data[3];
930 u32 err;
931 u32 dest_type;
932 int has_name;
933 int res;
934
935 if (likely(m->msg_controllen == 0))
936 return 0;
937
938 /* Optionally capture errored message object(s) */
939 err = msg ? msg_errcode(msg) : 0;
940 if (unlikely(err)) {
941 anc_data[0] = err;
942 anc_data[1] = msg_data_sz(msg);
943 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
944 if (res)
945 return res;
946 if (anc_data[1]) {
947 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
948 msg_data(msg));
949 if (res)
950 return res;
951 }
952 }
953
954 /* Optionally capture message destination object */
955 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
956 switch (dest_type) {
957 case TIPC_NAMED_MSG:
958 has_name = 1;
959 anc_data[0] = msg_nametype(msg);
960 anc_data[1] = msg_namelower(msg);
961 anc_data[2] = msg_namelower(msg);
962 break;
963 case TIPC_MCAST_MSG:
964 has_name = 1;
965 anc_data[0] = msg_nametype(msg);
966 anc_data[1] = msg_namelower(msg);
967 anc_data[2] = msg_nameupper(msg);
968 break;
969 case TIPC_CONN_MSG:
970 has_name = (tport->conn_type != 0);
971 anc_data[0] = tport->conn_type;
972 anc_data[1] = tport->conn_instance;
973 anc_data[2] = tport->conn_instance;
974 break;
975 default:
976 has_name = 0;
977 }
978 if (has_name) {
979 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
980 if (res)
981 return res;
982 }
983
984 return 0;
985 }
986
987 static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
988 {
989 struct sock *sk = sock->sk;
990 DEFINE_WAIT(wait);
991 int err;
992
993 for (;;) {
994 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
995 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
996 if (sock->state == SS_DISCONNECTING) {
997 err = -ENOTCONN;
998 break;
999 }
1000 release_sock(sk);
1001 timeo = schedule_timeout(timeo);
1002 lock_sock(sk);
1003 }
1004 err = 0;
1005 if (!skb_queue_empty(&sk->sk_receive_queue))
1006 break;
1007 err = sock_intr_errno(timeo);
1008 if (signal_pending(current))
1009 break;
1010 err = -EAGAIN;
1011 if (!timeo)
1012 break;
1013 }
1014 finish_wait(sk_sleep(sk), &wait);
1015 return err;
1016 }
1017
1018 /**
1019 * tipc_recvmsg - receive packet-oriented message
1020 * @iocb: (unused)
1021 * @m: descriptor for message info
1022 * @buf_len: total size of user buffer area
1023 * @flags: receive flags
1024 *
1025 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1026 * If the complete message doesn't fit in user area, truncate it.
1027 *
1028 * Returns size of returned message data, errno otherwise
1029 */
1030 static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1031 struct msghdr *m, size_t buf_len, int flags)
1032 {
1033 struct sock *sk = sock->sk;
1034 struct tipc_sock *tsk = tipc_sk(sk);
1035 struct tipc_port *port = &tsk->port;
1036 struct sk_buff *buf;
1037 struct tipc_msg *msg;
1038 long timeo;
1039 unsigned int sz;
1040 u32 err;
1041 int res;
1042
1043 /* Catch invalid receive requests */
1044 if (unlikely(!buf_len))
1045 return -EINVAL;
1046
1047 lock_sock(sk);
1048
1049 if (unlikely(sock->state == SS_UNCONNECTED)) {
1050 res = -ENOTCONN;
1051 goto exit;
1052 }
1053
1054 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1055 restart:
1056
1057 /* Look for a message in receive queue; wait if necessary */
1058 res = tipc_wait_for_rcvmsg(sock, timeo);
1059 if (res)
1060 goto exit;
1061
1062 /* Look at first message in receive queue */
1063 buf = skb_peek(&sk->sk_receive_queue);
1064 msg = buf_msg(buf);
1065 sz = msg_data_sz(msg);
1066 err = msg_errcode(msg);
1067
1068 /* Discard an empty non-errored message & try again */
1069 if ((!sz) && (!err)) {
1070 advance_rx_queue(sk);
1071 goto restart;
1072 }
1073
1074 /* Capture sender's address (optional) */
1075 set_orig_addr(m, msg);
1076
1077 /* Capture ancillary data (optional) */
1078 res = anc_data_recv(m, msg, port);
1079 if (res)
1080 goto exit;
1081
1082 /* Capture message data (if valid) & compute return value (always) */
1083 if (!err) {
1084 if (unlikely(buf_len < sz)) {
1085 sz = buf_len;
1086 m->msg_flags |= MSG_TRUNC;
1087 }
1088 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
1089 m->msg_iov, sz);
1090 if (res)
1091 goto exit;
1092 res = sz;
1093 } else {
1094 if ((sock->state == SS_READY) ||
1095 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1096 res = 0;
1097 else
1098 res = -ECONNRESET;
1099 }
1100
1101 /* Consume received message (optional) */
1102 if (likely(!(flags & MSG_PEEK))) {
1103 if ((sock->state != SS_READY) &&
1104 (++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1105 tipc_acknowledge(port->ref, port->conn_unacked);
1106 advance_rx_queue(sk);
1107 }
1108 exit:
1109 release_sock(sk);
1110 return res;
1111 }
1112
1113 /**
1114 * tipc_recv_stream - receive stream-oriented data
1115 * @iocb: (unused)
1116 * @m: descriptor for message info
1117 * @buf_len: total size of user buffer area
1118 * @flags: receive flags
1119 *
1120 * Used for SOCK_STREAM messages only. If not enough data is available
1121 * will optionally wait for more; never truncates data.
1122 *
1123 * Returns size of returned message data, errno otherwise
1124 */
1125 static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1126 struct msghdr *m, size_t buf_len, int flags)
1127 {
1128 struct sock *sk = sock->sk;
1129 struct tipc_sock *tsk = tipc_sk(sk);
1130 struct tipc_port *port = &tsk->port;
1131 struct sk_buff *buf;
1132 struct tipc_msg *msg;
1133 long timeo;
1134 unsigned int sz;
1135 int sz_to_copy, target, needed;
1136 int sz_copied = 0;
1137 u32 err;
1138 int res = 0;
1139
1140 /* Catch invalid receive attempts */
1141 if (unlikely(!buf_len))
1142 return -EINVAL;
1143
1144 lock_sock(sk);
1145
1146 if (unlikely(sock->state == SS_UNCONNECTED)) {
1147 res = -ENOTCONN;
1148 goto exit;
1149 }
1150
1151 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1152 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1153
1154 restart:
1155 /* Look for a message in receive queue; wait if necessary */
1156 res = tipc_wait_for_rcvmsg(sock, timeo);
1157 if (res)
1158 goto exit;
1159
1160 /* Look at first message in receive queue */
1161 buf = skb_peek(&sk->sk_receive_queue);
1162 msg = buf_msg(buf);
1163 sz = msg_data_sz(msg);
1164 err = msg_errcode(msg);
1165
1166 /* Discard an empty non-errored message & try again */
1167 if ((!sz) && (!err)) {
1168 advance_rx_queue(sk);
1169 goto restart;
1170 }
1171
1172 /* Optionally capture sender's address & ancillary data of first msg */
1173 if (sz_copied == 0) {
1174 set_orig_addr(m, msg);
1175 res = anc_data_recv(m, msg, port);
1176 if (res)
1177 goto exit;
1178 }
1179
1180 /* Capture message data (if valid) & compute return value (always) */
1181 if (!err) {
1182 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1183
1184 sz -= offset;
1185 needed = (buf_len - sz_copied);
1186 sz_to_copy = (sz <= needed) ? sz : needed;
1187
1188 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
1189 m->msg_iov, sz_to_copy);
1190 if (res)
1191 goto exit;
1192
1193 sz_copied += sz_to_copy;
1194
1195 if (sz_to_copy < sz) {
1196 if (!(flags & MSG_PEEK))
1197 TIPC_SKB_CB(buf)->handle =
1198 (void *)(unsigned long)(offset + sz_to_copy);
1199 goto exit;
1200 }
1201 } else {
1202 if (sz_copied != 0)
1203 goto exit; /* can't add error msg to valid data */
1204
1205 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1206 res = 0;
1207 else
1208 res = -ECONNRESET;
1209 }
1210
1211 /* Consume received message (optional) */
1212 if (likely(!(flags & MSG_PEEK))) {
1213 if (unlikely(++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1214 tipc_acknowledge(port->ref, port->conn_unacked);
1215 advance_rx_queue(sk);
1216 }
1217
1218 /* Loop around if more data is required */
1219 if ((sz_copied < buf_len) && /* didn't get all requested data */
1220 (!skb_queue_empty(&sk->sk_receive_queue) ||
1221 (sz_copied < target)) && /* and more is ready or required */
1222 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1223 (!err)) /* and haven't reached a FIN */
1224 goto restart;
1225
1226 exit:
1227 release_sock(sk);
1228 return sz_copied ? sz_copied : res;
1229 }
1230
1231 /**
1232 * tipc_write_space - wake up thread if port congestion is released
1233 * @sk: socket
1234 */
1235 static void tipc_write_space(struct sock *sk)
1236 {
1237 struct socket_wq *wq;
1238
1239 rcu_read_lock();
1240 wq = rcu_dereference(sk->sk_wq);
1241 if (wq_has_sleeper(wq))
1242 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1243 POLLWRNORM | POLLWRBAND);
1244 rcu_read_unlock();
1245 }
1246
1247 /**
1248 * tipc_data_ready - wake up threads to indicate messages have been received
1249 * @sk: socket
1250 * @len: the length of messages
1251 */
1252 static void tipc_data_ready(struct sock *sk, int len)
1253 {
1254 struct socket_wq *wq;
1255
1256 rcu_read_lock();
1257 wq = rcu_dereference(sk->sk_wq);
1258 if (wq_has_sleeper(wq))
1259 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1260 POLLRDNORM | POLLRDBAND);
1261 rcu_read_unlock();
1262 }
1263
1264 /**
1265 * filter_connect - Handle all incoming messages for a connection-based socket
1266 * @tsk: TIPC socket
1267 * @msg: message
1268 *
1269 * Returns TIPC error status code and socket error status code
1270 * once it encounters some errors
1271 */
1272 static u32 filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1273 {
1274 struct sock *sk = &tsk->sk;
1275 struct tipc_port *port = &tsk->port;
1276 struct socket *sock = sk->sk_socket;
1277 struct tipc_msg *msg = buf_msg(*buf);
1278
1279 u32 retval = TIPC_ERR_NO_PORT;
1280 int res;
1281
1282 if (msg_mcast(msg))
1283 return retval;
1284
1285 switch ((int)sock->state) {
1286 case SS_CONNECTED:
1287 /* Accept only connection-based messages sent by peer */
1288 if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) {
1289 if (unlikely(msg_errcode(msg))) {
1290 sock->state = SS_DISCONNECTING;
1291 __tipc_port_disconnect(port);
1292 }
1293 retval = TIPC_OK;
1294 }
1295 break;
1296 case SS_CONNECTING:
1297 /* Accept only ACK or NACK message */
1298 if (unlikely(msg_errcode(msg))) {
1299 sock->state = SS_DISCONNECTING;
1300 sk->sk_err = ECONNREFUSED;
1301 retval = TIPC_OK;
1302 break;
1303 }
1304
1305 if (unlikely(!msg_connected(msg)))
1306 break;
1307
1308 res = auto_connect(tsk, msg);
1309 if (res) {
1310 sock->state = SS_DISCONNECTING;
1311 sk->sk_err = -res;
1312 retval = TIPC_OK;
1313 break;
1314 }
1315
1316 /* If an incoming message is an 'ACK-', it should be
1317 * discarded here because it doesn't contain useful
1318 * data. In addition, we should try to wake up
1319 * connect() routine if sleeping.
1320 */
1321 if (msg_data_sz(msg) == 0) {
1322 kfree_skb(*buf);
1323 *buf = NULL;
1324 if (waitqueue_active(sk_sleep(sk)))
1325 wake_up_interruptible(sk_sleep(sk));
1326 }
1327 retval = TIPC_OK;
1328 break;
1329 case SS_LISTENING:
1330 case SS_UNCONNECTED:
1331 /* Accept only SYN message */
1332 if (!msg_connected(msg) && !(msg_errcode(msg)))
1333 retval = TIPC_OK;
1334 break;
1335 case SS_DISCONNECTING:
1336 break;
1337 default:
1338 pr_err("Unknown socket state %u\n", sock->state);
1339 }
1340 return retval;
1341 }
1342
1343 /**
1344 * rcvbuf_limit - get proper overload limit of socket receive queue
1345 * @sk: socket
1346 * @buf: message
1347 *
1348 * For all connection oriented messages, irrespective of importance,
1349 * the default overload value (i.e. 67MB) is set as limit.
1350 *
1351 * For all connectionless messages, by default new queue limits are
1352 * as belows:
1353 *
1354 * TIPC_LOW_IMPORTANCE (4 MB)
1355 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1356 * TIPC_HIGH_IMPORTANCE (16 MB)
1357 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1358 *
1359 * Returns overload limit according to corresponding message importance
1360 */
1361 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1362 {
1363 struct tipc_msg *msg = buf_msg(buf);
1364
1365 if (msg_connected(msg))
1366 return sysctl_tipc_rmem[2];
1367
1368 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1369 msg_importance(msg);
1370 }
1371
1372 /**
1373 * filter_rcv - validate incoming message
1374 * @sk: socket
1375 * @buf: message
1376 *
1377 * Enqueues message on receive queue if acceptable; optionally handles
1378 * disconnect indication for a connected socket.
1379 *
1380 * Called with socket lock already taken; port lock may also be taken.
1381 *
1382 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1383 */
1384 static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1385 {
1386 struct socket *sock = sk->sk_socket;
1387 struct tipc_sock *tsk = tipc_sk(sk);
1388 struct tipc_msg *msg = buf_msg(buf);
1389 unsigned int limit = rcvbuf_limit(sk, buf);
1390 u32 res = TIPC_OK;
1391
1392 /* Reject message if it is wrong sort of message for socket */
1393 if (msg_type(msg) > TIPC_DIRECT_MSG)
1394 return TIPC_ERR_NO_PORT;
1395
1396 if (sock->state == SS_READY) {
1397 if (msg_connected(msg))
1398 return TIPC_ERR_NO_PORT;
1399 } else {
1400 res = filter_connect(tsk, &buf);
1401 if (res != TIPC_OK || buf == NULL)
1402 return res;
1403 }
1404
1405 /* Reject message if there isn't room to queue it */
1406 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1407 return TIPC_ERR_OVERLOAD;
1408
1409 /* Enqueue message */
1410 TIPC_SKB_CB(buf)->handle = NULL;
1411 __skb_queue_tail(&sk->sk_receive_queue, buf);
1412 skb_set_owner_r(buf, sk);
1413
1414 sk->sk_data_ready(sk, 0);
1415 return TIPC_OK;
1416 }
1417
1418 /**
1419 * backlog_rcv - handle incoming message from backlog queue
1420 * @sk: socket
1421 * @buf: message
1422 *
1423 * Caller must hold socket lock, but not port lock.
1424 *
1425 * Returns 0
1426 */
1427 static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1428 {
1429 u32 res;
1430
1431 res = filter_rcv(sk, buf);
1432 if (res)
1433 tipc_reject_msg(buf, res);
1434 return 0;
1435 }
1436
1437 /**
1438 * tipc_sk_rcv - handle incoming message
1439 * @sk: socket receiving message
1440 * @buf: message
1441 *
1442 * Called with port lock already taken.
1443 *
1444 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1445 */
1446 u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf)
1447 {
1448 u32 res;
1449
1450 /*
1451 * Process message if socket is unlocked; otherwise add to backlog queue
1452 *
1453 * This code is based on sk_receive_skb(), but must be distinct from it
1454 * since a TIPC-specific filter/reject mechanism is utilized
1455 */
1456 bh_lock_sock(sk);
1457 if (!sock_owned_by_user(sk)) {
1458 res = filter_rcv(sk, buf);
1459 } else {
1460 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf)))
1461 res = TIPC_ERR_OVERLOAD;
1462 else
1463 res = TIPC_OK;
1464 }
1465 bh_unlock_sock(sk);
1466
1467 return res;
1468 }
1469
1470 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1471 {
1472 struct sock *sk = sock->sk;
1473 DEFINE_WAIT(wait);
1474 int done;
1475
1476 do {
1477 int err = sock_error(sk);
1478 if (err)
1479 return err;
1480 if (!*timeo_p)
1481 return -ETIMEDOUT;
1482 if (signal_pending(current))
1483 return sock_intr_errno(*timeo_p);
1484
1485 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1486 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1487 finish_wait(sk_sleep(sk), &wait);
1488 } while (!done);
1489 return 0;
1490 }
1491
1492 /**
1493 * tipc_connect - establish a connection to another TIPC port
1494 * @sock: socket structure
1495 * @dest: socket address for destination port
1496 * @destlen: size of socket address data structure
1497 * @flags: file-related flags associated with socket
1498 *
1499 * Returns 0 on success, errno otherwise
1500 */
1501 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1502 int destlen, int flags)
1503 {
1504 struct sock *sk = sock->sk;
1505 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1506 struct msghdr m = {NULL,};
1507 long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1508 socket_state previous;
1509 int res;
1510
1511 lock_sock(sk);
1512
1513 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1514 if (sock->state == SS_READY) {
1515 res = -EOPNOTSUPP;
1516 goto exit;
1517 }
1518
1519 /*
1520 * Reject connection attempt using multicast address
1521 *
1522 * Note: send_msg() validates the rest of the address fields,
1523 * so there's no need to do it here
1524 */
1525 if (dst->addrtype == TIPC_ADDR_MCAST) {
1526 res = -EINVAL;
1527 goto exit;
1528 }
1529
1530 previous = sock->state;
1531 switch (sock->state) {
1532 case SS_UNCONNECTED:
1533 /* Send a 'SYN-' to destination */
1534 m.msg_name = dest;
1535 m.msg_namelen = destlen;
1536
1537 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1538 * indicate send_msg() is never blocked.
1539 */
1540 if (!timeout)
1541 m.msg_flags = MSG_DONTWAIT;
1542
1543 res = tipc_sendmsg(NULL, sock, &m, 0);
1544 if ((res < 0) && (res != -EWOULDBLOCK))
1545 goto exit;
1546
1547 /* Just entered SS_CONNECTING state; the only
1548 * difference is that return value in non-blocking
1549 * case is EINPROGRESS, rather than EALREADY.
1550 */
1551 res = -EINPROGRESS;
1552 case SS_CONNECTING:
1553 if (previous == SS_CONNECTING)
1554 res = -EALREADY;
1555 if (!timeout)
1556 goto exit;
1557 timeout = msecs_to_jiffies(timeout);
1558 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1559 res = tipc_wait_for_connect(sock, &timeout);
1560 break;
1561 case SS_CONNECTED:
1562 res = -EISCONN;
1563 break;
1564 default:
1565 res = -EINVAL;
1566 break;
1567 }
1568 exit:
1569 release_sock(sk);
1570 return res;
1571 }
1572
1573 /**
1574 * tipc_listen - allow socket to listen for incoming connections
1575 * @sock: socket structure
1576 * @len: (unused)
1577 *
1578 * Returns 0 on success, errno otherwise
1579 */
1580 static int tipc_listen(struct socket *sock, int len)
1581 {
1582 struct sock *sk = sock->sk;
1583 int res;
1584
1585 lock_sock(sk);
1586
1587 if (sock->state != SS_UNCONNECTED)
1588 res = -EINVAL;
1589 else {
1590 sock->state = SS_LISTENING;
1591 res = 0;
1592 }
1593
1594 release_sock(sk);
1595 return res;
1596 }
1597
1598 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1599 {
1600 struct sock *sk = sock->sk;
1601 DEFINE_WAIT(wait);
1602 int err;
1603
1604 /* True wake-one mechanism for incoming connections: only
1605 * one process gets woken up, not the 'whole herd'.
1606 * Since we do not 'race & poll' for established sockets
1607 * anymore, the common case will execute the loop only once.
1608 */
1609 for (;;) {
1610 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1611 TASK_INTERRUPTIBLE);
1612 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1613 release_sock(sk);
1614 timeo = schedule_timeout(timeo);
1615 lock_sock(sk);
1616 }
1617 err = 0;
1618 if (!skb_queue_empty(&sk->sk_receive_queue))
1619 break;
1620 err = -EINVAL;
1621 if (sock->state != SS_LISTENING)
1622 break;
1623 err = sock_intr_errno(timeo);
1624 if (signal_pending(current))
1625 break;
1626 err = -EAGAIN;
1627 if (!timeo)
1628 break;
1629 }
1630 finish_wait(sk_sleep(sk), &wait);
1631 return err;
1632 }
1633
1634 /**
1635 * tipc_accept - wait for connection request
1636 * @sock: listening socket
1637 * @newsock: new socket that is to be connected
1638 * @flags: file-related flags associated with socket
1639 *
1640 * Returns 0 on success, errno otherwise
1641 */
1642 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1643 {
1644 struct sock *new_sk, *sk = sock->sk;
1645 struct sk_buff *buf;
1646 struct tipc_port *new_port;
1647 struct tipc_msg *msg;
1648 struct tipc_portid peer;
1649 u32 new_ref;
1650 long timeo;
1651 int res;
1652
1653 lock_sock(sk);
1654
1655 if (sock->state != SS_LISTENING) {
1656 res = -EINVAL;
1657 goto exit;
1658 }
1659 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1660 res = tipc_wait_for_accept(sock, timeo);
1661 if (res)
1662 goto exit;
1663
1664 buf = skb_peek(&sk->sk_receive_queue);
1665
1666 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
1667 if (res)
1668 goto exit;
1669
1670 new_sk = new_sock->sk;
1671 new_port = &tipc_sk(new_sk)->port;
1672 new_ref = new_port->ref;
1673 msg = buf_msg(buf);
1674
1675 /* we lock on new_sk; but lockdep sees the lock on sk */
1676 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1677
1678 /*
1679 * Reject any stray messages received by new socket
1680 * before the socket lock was taken (very, very unlikely)
1681 */
1682 reject_rx_queue(new_sk);
1683
1684 /* Connect new socket to it's peer */
1685 peer.ref = msg_origport(msg);
1686 peer.node = msg_orignode(msg);
1687 tipc_port_connect(new_ref, &peer);
1688 new_sock->state = SS_CONNECTED;
1689
1690 tipc_port_set_importance(new_port, msg_importance(msg));
1691 if (msg_named(msg)) {
1692 new_port->conn_type = msg_nametype(msg);
1693 new_port->conn_instance = msg_nameinst(msg);
1694 }
1695
1696 /*
1697 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1698 * Respond to 'SYN+' by queuing it on new socket.
1699 */
1700 if (!msg_data_sz(msg)) {
1701 struct msghdr m = {NULL,};
1702
1703 advance_rx_queue(sk);
1704 tipc_send_packet(NULL, new_sock, &m, 0);
1705 } else {
1706 __skb_dequeue(&sk->sk_receive_queue);
1707 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1708 skb_set_owner_r(buf, new_sk);
1709 }
1710 release_sock(new_sk);
1711 exit:
1712 release_sock(sk);
1713 return res;
1714 }
1715
1716 /**
1717 * tipc_shutdown - shutdown socket connection
1718 * @sock: socket structure
1719 * @how: direction to close (must be SHUT_RDWR)
1720 *
1721 * Terminates connection (if necessary), then purges socket's receive queue.
1722 *
1723 * Returns 0 on success, errno otherwise
1724 */
1725 static int tipc_shutdown(struct socket *sock, int how)
1726 {
1727 struct sock *sk = sock->sk;
1728 struct tipc_sock *tsk = tipc_sk(sk);
1729 struct tipc_port *port = &tsk->port;
1730 struct sk_buff *buf;
1731 int res;
1732
1733 if (how != SHUT_RDWR)
1734 return -EINVAL;
1735
1736 lock_sock(sk);
1737
1738 switch (sock->state) {
1739 case SS_CONNECTING:
1740 case SS_CONNECTED:
1741
1742 restart:
1743 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1744 buf = __skb_dequeue(&sk->sk_receive_queue);
1745 if (buf) {
1746 if (TIPC_SKB_CB(buf)->handle != NULL) {
1747 kfree_skb(buf);
1748 goto restart;
1749 }
1750 tipc_port_disconnect(port->ref);
1751 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1752 } else {
1753 tipc_port_shutdown(port->ref);
1754 }
1755
1756 sock->state = SS_DISCONNECTING;
1757
1758 /* fall through */
1759
1760 case SS_DISCONNECTING:
1761
1762 /* Discard any unreceived messages */
1763 __skb_queue_purge(&sk->sk_receive_queue);
1764
1765 /* Wake up anyone sleeping in poll */
1766 sk->sk_state_change(sk);
1767 res = 0;
1768 break;
1769
1770 default:
1771 res = -ENOTCONN;
1772 }
1773
1774 release_sock(sk);
1775 return res;
1776 }
1777
1778 /**
1779 * tipc_setsockopt - set socket option
1780 * @sock: socket structure
1781 * @lvl: option level
1782 * @opt: option identifier
1783 * @ov: pointer to new option value
1784 * @ol: length of option value
1785 *
1786 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1787 * (to ease compatibility).
1788 *
1789 * Returns 0 on success, errno otherwise
1790 */
1791 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
1792 char __user *ov, unsigned int ol)
1793 {
1794 struct sock *sk = sock->sk;
1795 struct tipc_sock *tsk = tipc_sk(sk);
1796 struct tipc_port *port = &tsk->port;
1797 u32 value;
1798 int res;
1799
1800 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1801 return 0;
1802 if (lvl != SOL_TIPC)
1803 return -ENOPROTOOPT;
1804 if (ol < sizeof(value))
1805 return -EINVAL;
1806 res = get_user(value, (u32 __user *)ov);
1807 if (res)
1808 return res;
1809
1810 lock_sock(sk);
1811
1812 switch (opt) {
1813 case TIPC_IMPORTANCE:
1814 tipc_port_set_importance(port, value);
1815 break;
1816 case TIPC_SRC_DROPPABLE:
1817 if (sock->type != SOCK_STREAM)
1818 tipc_port_set_unreliable(port, value);
1819 else
1820 res = -ENOPROTOOPT;
1821 break;
1822 case TIPC_DEST_DROPPABLE:
1823 tipc_port_set_unreturnable(port, value);
1824 break;
1825 case TIPC_CONN_TIMEOUT:
1826 tipc_sk(sk)->conn_timeout = value;
1827 /* no need to set "res", since already 0 at this point */
1828 break;
1829 default:
1830 res = -EINVAL;
1831 }
1832
1833 release_sock(sk);
1834
1835 return res;
1836 }
1837
1838 /**
1839 * tipc_getsockopt - get socket option
1840 * @sock: socket structure
1841 * @lvl: option level
1842 * @opt: option identifier
1843 * @ov: receptacle for option value
1844 * @ol: receptacle for length of option value
1845 *
1846 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1847 * (to ease compatibility).
1848 *
1849 * Returns 0 on success, errno otherwise
1850 */
1851 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
1852 char __user *ov, int __user *ol)
1853 {
1854 struct sock *sk = sock->sk;
1855 struct tipc_sock *tsk = tipc_sk(sk);
1856 struct tipc_port *port = &tsk->port;
1857 int len;
1858 u32 value;
1859 int res;
1860
1861 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1862 return put_user(0, ol);
1863 if (lvl != SOL_TIPC)
1864 return -ENOPROTOOPT;
1865 res = get_user(len, ol);
1866 if (res)
1867 return res;
1868
1869 lock_sock(sk);
1870
1871 switch (opt) {
1872 case TIPC_IMPORTANCE:
1873 value = tipc_port_importance(port);
1874 break;
1875 case TIPC_SRC_DROPPABLE:
1876 value = tipc_port_unreliable(port);
1877 break;
1878 case TIPC_DEST_DROPPABLE:
1879 value = tipc_port_unreturnable(port);
1880 break;
1881 case TIPC_CONN_TIMEOUT:
1882 value = tipc_sk(sk)->conn_timeout;
1883 /* no need to set "res", since already 0 at this point */
1884 break;
1885 case TIPC_NODE_RECVQ_DEPTH:
1886 value = 0; /* was tipc_queue_size, now obsolete */
1887 break;
1888 case TIPC_SOCK_RECVQ_DEPTH:
1889 value = skb_queue_len(&sk->sk_receive_queue);
1890 break;
1891 default:
1892 res = -EINVAL;
1893 }
1894
1895 release_sock(sk);
1896
1897 if (res)
1898 return res; /* "get" failed */
1899
1900 if (len < sizeof(value))
1901 return -EINVAL;
1902
1903 if (copy_to_user(ov, &value, sizeof(value)))
1904 return -EFAULT;
1905
1906 return put_user(sizeof(value), ol);
1907 }
1908
1909 /* Protocol switches for the various types of TIPC sockets */
1910
1911 static const struct proto_ops msg_ops = {
1912 .owner = THIS_MODULE,
1913 .family = AF_TIPC,
1914 .release = tipc_release,
1915 .bind = tipc_bind,
1916 .connect = tipc_connect,
1917 .socketpair = sock_no_socketpair,
1918 .accept = sock_no_accept,
1919 .getname = tipc_getname,
1920 .poll = tipc_poll,
1921 .ioctl = sock_no_ioctl,
1922 .listen = sock_no_listen,
1923 .shutdown = tipc_shutdown,
1924 .setsockopt = tipc_setsockopt,
1925 .getsockopt = tipc_getsockopt,
1926 .sendmsg = tipc_sendmsg,
1927 .recvmsg = tipc_recvmsg,
1928 .mmap = sock_no_mmap,
1929 .sendpage = sock_no_sendpage
1930 };
1931
1932 static const struct proto_ops packet_ops = {
1933 .owner = THIS_MODULE,
1934 .family = AF_TIPC,
1935 .release = tipc_release,
1936 .bind = tipc_bind,
1937 .connect = tipc_connect,
1938 .socketpair = sock_no_socketpair,
1939 .accept = tipc_accept,
1940 .getname = tipc_getname,
1941 .poll = tipc_poll,
1942 .ioctl = sock_no_ioctl,
1943 .listen = tipc_listen,
1944 .shutdown = tipc_shutdown,
1945 .setsockopt = tipc_setsockopt,
1946 .getsockopt = tipc_getsockopt,
1947 .sendmsg = tipc_send_packet,
1948 .recvmsg = tipc_recvmsg,
1949 .mmap = sock_no_mmap,
1950 .sendpage = sock_no_sendpage
1951 };
1952
1953 static const struct proto_ops stream_ops = {
1954 .owner = THIS_MODULE,
1955 .family = AF_TIPC,
1956 .release = tipc_release,
1957 .bind = tipc_bind,
1958 .connect = tipc_connect,
1959 .socketpair = sock_no_socketpair,
1960 .accept = tipc_accept,
1961 .getname = tipc_getname,
1962 .poll = tipc_poll,
1963 .ioctl = sock_no_ioctl,
1964 .listen = tipc_listen,
1965 .shutdown = tipc_shutdown,
1966 .setsockopt = tipc_setsockopt,
1967 .getsockopt = tipc_getsockopt,
1968 .sendmsg = tipc_send_stream,
1969 .recvmsg = tipc_recv_stream,
1970 .mmap = sock_no_mmap,
1971 .sendpage = sock_no_sendpage
1972 };
1973
1974 static const struct net_proto_family tipc_family_ops = {
1975 .owner = THIS_MODULE,
1976 .family = AF_TIPC,
1977 .create = tipc_sk_create
1978 };
1979
1980 static struct proto tipc_proto = {
1981 .name = "TIPC",
1982 .owner = THIS_MODULE,
1983 .obj_size = sizeof(struct tipc_sock),
1984 .sysctl_rmem = sysctl_tipc_rmem
1985 };
1986
1987 static struct proto tipc_proto_kern = {
1988 .name = "TIPC",
1989 .obj_size = sizeof(struct tipc_sock),
1990 .sysctl_rmem = sysctl_tipc_rmem
1991 };
1992
1993 /**
1994 * tipc_socket_init - initialize TIPC socket interface
1995 *
1996 * Returns 0 on success, errno otherwise
1997 */
1998 int tipc_socket_init(void)
1999 {
2000 int res;
2001
2002 res = proto_register(&tipc_proto, 1);
2003 if (res) {
2004 pr_err("Failed to register TIPC protocol type\n");
2005 goto out;
2006 }
2007
2008 res = sock_register(&tipc_family_ops);
2009 if (res) {
2010 pr_err("Failed to register TIPC socket type\n");
2011 proto_unregister(&tipc_proto);
2012 goto out;
2013 }
2014 out:
2015 return res;
2016 }
2017
2018 /**
2019 * tipc_socket_stop - stop TIPC socket interface
2020 */
2021 void tipc_socket_stop(void)
2022 {
2023 sock_unregister(tipc_family_ops.family);
2024 proto_unregister(&tipc_proto);
2025 }
This page took 0.171418 seconds and 6 git commands to generate.