tipc: name tipc name table support net namespace
[deliverable/linux.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/jhash.h>
39 #include "core.h"
40 #include "name_table.h"
41 #include "node.h"
42 #include "link.h"
43 #include "config.h"
44 #include "socket.h"
45
46 #define SS_LISTENING -1 /* socket is listening */
47 #define SS_READY -2 /* socket is connectionless */
48
49 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
50 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
51 #define TIPC_FWD_MSG 1
52 #define TIPC_CONN_OK 0
53 #define TIPC_CONN_PROBING 1
54 #define TIPC_MAX_PORT 0xffffffff
55 #define TIPC_MIN_PORT 1
56
57 /**
58 * struct tipc_sock - TIPC socket structure
59 * @sk: socket - interacts with 'port' and with user via the socket API
60 * @connected: non-zero if port is currently connected to a peer port
61 * @conn_type: TIPC type used when connection was established
62 * @conn_instance: TIPC instance used when connection was established
63 * @published: non-zero if port has one or more associated names
64 * @max_pkt: maximum packet size "hint" used when building messages sent by port
65 * @portid: unique port identity in TIPC socket hash table
66 * @phdr: preformatted message header used when sending messages
67 * @port_list: adjacent ports in TIPC's global list of ports
68 * @publications: list of publications for port
69 * @pub_count: total # of publications port has made during its lifetime
70 * @probing_state:
71 * @probing_intv:
72 * @timer:
73 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
74 * @peer_name: the peer of the connection, if any
75 * @conn_timeout: the time we can wait for an unresponded setup request
76 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
77 * @link_cong: non-zero if owner must sleep because of link congestion
78 * @sent_unacked: # messages sent by socket, and not yet acked by peer
79 * @rcv_unacked: # messages read by user, but not yet acked back to peer
80 * @node: hash table node
81 * @rcu: rcu struct for tipc_sock
82 */
83 struct tipc_sock {
84 struct sock sk;
85 int connected;
86 u32 conn_type;
87 u32 conn_instance;
88 int published;
89 u32 max_pkt;
90 u32 portid;
91 struct tipc_msg phdr;
92 struct list_head sock_list;
93 struct list_head publications;
94 u32 pub_count;
95 u32 probing_state;
96 unsigned long probing_intv;
97 struct timer_list timer;
98 uint conn_timeout;
99 atomic_t dupl_rcvcnt;
100 bool link_cong;
101 uint sent_unacked;
102 uint rcv_unacked;
103 struct rhash_head node;
104 struct rcu_head rcu;
105 };
106
107 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
108 static void tipc_data_ready(struct sock *sk);
109 static void tipc_write_space(struct sock *sk);
110 static int tipc_release(struct socket *sock);
111 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
112 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
113 static void tipc_sk_timeout(unsigned long data);
114 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
115 struct tipc_name_seq const *seq);
116 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
117 struct tipc_name_seq const *seq);
118 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
119 static int tipc_sk_insert(struct tipc_sock *tsk);
120 static void tipc_sk_remove(struct tipc_sock *tsk);
121
122 static const struct proto_ops packet_ops;
123 static const struct proto_ops stream_ops;
124 static const struct proto_ops msg_ops;
125
126 static struct proto tipc_proto;
127 static struct proto tipc_proto_kern;
128
129 static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
130 [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
131 [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
132 [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
133 [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
134 [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
135 };
136
137 /*
138 * Revised TIPC socket locking policy:
139 *
140 * Most socket operations take the standard socket lock when they start
141 * and hold it until they finish (or until they need to sleep). Acquiring
142 * this lock grants the owner exclusive access to the fields of the socket
143 * data structures, with the exception of the backlog queue. A few socket
144 * operations can be done without taking the socket lock because they only
145 * read socket information that never changes during the life of the socket.
146 *
147 * Socket operations may acquire the lock for the associated TIPC port if they
148 * need to perform an operation on the port. If any routine needs to acquire
149 * both the socket lock and the port lock it must take the socket lock first
150 * to avoid the risk of deadlock.
151 *
152 * The dispatcher handling incoming messages cannot grab the socket lock in
153 * the standard fashion, since invoked it runs at the BH level and cannot block.
154 * Instead, it checks to see if the socket lock is currently owned by someone,
155 * and either handles the message itself or adds it to the socket's backlog
156 * queue; in the latter case the queued message is processed once the process
157 * owning the socket lock releases it.
158 *
159 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
160 * the problem of a blocked socket operation preventing any other operations
161 * from occurring. However, applications must be careful if they have
162 * multiple threads trying to send (or receive) on the same socket, as these
163 * operations might interfere with each other. For example, doing a connect
164 * and a receive at the same time might allow the receive to consume the
165 * ACK message meant for the connect. While additional work could be done
166 * to try and overcome this, it doesn't seem to be worthwhile at the present.
167 *
168 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
169 * that another operation that must be performed in a non-blocking manner is
170 * not delayed for very long because the lock has already been taken.
171 *
172 * NOTE: This code assumes that certain fields of a port/socket pair are
173 * constant over its lifetime; such fields can be examined without taking
174 * the socket lock and/or port lock, and do not need to be re-read even
175 * after resuming processing after waiting. These fields include:
176 * - socket type
177 * - pointer to socket sk structure (aka tipc_sock structure)
178 * - pointer to port structure
179 * - port reference
180 */
181
182 static u32 tsk_peer_node(struct tipc_sock *tsk)
183 {
184 return msg_destnode(&tsk->phdr);
185 }
186
187 static u32 tsk_peer_port(struct tipc_sock *tsk)
188 {
189 return msg_destport(&tsk->phdr);
190 }
191
192 static bool tsk_unreliable(struct tipc_sock *tsk)
193 {
194 return msg_src_droppable(&tsk->phdr) != 0;
195 }
196
197 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
198 {
199 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
200 }
201
202 static bool tsk_unreturnable(struct tipc_sock *tsk)
203 {
204 return msg_dest_droppable(&tsk->phdr) != 0;
205 }
206
207 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
208 {
209 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
210 }
211
212 static int tsk_importance(struct tipc_sock *tsk)
213 {
214 return msg_importance(&tsk->phdr);
215 }
216
217 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
218 {
219 if (imp > TIPC_CRITICAL_IMPORTANCE)
220 return -EINVAL;
221 msg_set_importance(&tsk->phdr, (u32)imp);
222 return 0;
223 }
224
225 static struct tipc_sock *tipc_sk(const struct sock *sk)
226 {
227 return container_of(sk, struct tipc_sock, sk);
228 }
229
230 static int tsk_conn_cong(struct tipc_sock *tsk)
231 {
232 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
233 }
234
235 /**
236 * tsk_advance_rx_queue - discard first buffer in socket receive queue
237 *
238 * Caller must hold socket lock
239 */
240 static void tsk_advance_rx_queue(struct sock *sk)
241 {
242 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
243 }
244
245 /**
246 * tsk_rej_rx_queue - reject all buffers in socket receive queue
247 *
248 * Caller must hold socket lock
249 */
250 static void tsk_rej_rx_queue(struct sock *sk)
251 {
252 struct sk_buff *skb;
253 u32 dnode;
254
255 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
256 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
257 tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
258 }
259 }
260
261 /* tsk_peer_msg - verify if message was sent by connected port's peer
262 *
263 * Handles cases where the node's network address has changed from
264 * the default of <0.0.0> to its configured setting.
265 */
266 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
267 {
268 u32 peer_port = tsk_peer_port(tsk);
269 u32 orig_node;
270 u32 peer_node;
271
272 if (unlikely(!tsk->connected))
273 return false;
274
275 if (unlikely(msg_origport(msg) != peer_port))
276 return false;
277
278 orig_node = msg_orignode(msg);
279 peer_node = tsk_peer_node(tsk);
280
281 if (likely(orig_node == peer_node))
282 return true;
283
284 if (!orig_node && (peer_node == tipc_own_addr))
285 return true;
286
287 if (!peer_node && (orig_node == tipc_own_addr))
288 return true;
289
290 return false;
291 }
292
293 /**
294 * tipc_sk_create - create a TIPC socket
295 * @net: network namespace (must be default network)
296 * @sock: pre-allocated socket structure
297 * @protocol: protocol indicator (must be 0)
298 * @kern: caused by kernel or by userspace?
299 *
300 * This routine creates additional data structures used by the TIPC socket,
301 * initializes them, and links them together.
302 *
303 * Returns 0 on success, errno otherwise
304 */
305 static int tipc_sk_create(struct net *net, struct socket *sock,
306 int protocol, int kern)
307 {
308 const struct proto_ops *ops;
309 socket_state state;
310 struct sock *sk;
311 struct tipc_sock *tsk;
312 struct tipc_msg *msg;
313
314 /* Validate arguments */
315 if (unlikely(protocol != 0))
316 return -EPROTONOSUPPORT;
317
318 switch (sock->type) {
319 case SOCK_STREAM:
320 ops = &stream_ops;
321 state = SS_UNCONNECTED;
322 break;
323 case SOCK_SEQPACKET:
324 ops = &packet_ops;
325 state = SS_UNCONNECTED;
326 break;
327 case SOCK_DGRAM:
328 case SOCK_RDM:
329 ops = &msg_ops;
330 state = SS_READY;
331 break;
332 default:
333 return -EPROTOTYPE;
334 }
335
336 /* Allocate socket's protocol area */
337 if (!kern)
338 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
339 else
340 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
341
342 if (sk == NULL)
343 return -ENOMEM;
344
345 tsk = tipc_sk(sk);
346 tsk->max_pkt = MAX_PKT_DEFAULT;
347 INIT_LIST_HEAD(&tsk->publications);
348 msg = &tsk->phdr;
349 tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
350 NAMED_H_SIZE, 0);
351
352 /* Finish initializing socket data structures */
353 sock->ops = ops;
354 sock->state = state;
355 sock_init_data(sock, sk);
356 if (tipc_sk_insert(tsk)) {
357 pr_warn("Socket create failed; port numbrer exhausted\n");
358 return -EINVAL;
359 }
360 msg_set_origport(msg, tsk->portid);
361 setup_timer(&tsk->timer, tipc_sk_timeout, (unsigned long)tsk);
362 sk->sk_backlog_rcv = tipc_backlog_rcv;
363 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
364 sk->sk_data_ready = tipc_data_ready;
365 sk->sk_write_space = tipc_write_space;
366 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
367 tsk->sent_unacked = 0;
368 atomic_set(&tsk->dupl_rcvcnt, 0);
369
370 if (sock->state == SS_READY) {
371 tsk_set_unreturnable(tsk, true);
372 if (sock->type == SOCK_DGRAM)
373 tsk_set_unreliable(tsk, true);
374 }
375 return 0;
376 }
377
378 /**
379 * tipc_sock_create_local - create TIPC socket from inside TIPC module
380 * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
381 *
382 * We cannot use sock_creat_kern here because it bumps module user count.
383 * Since socket owner and creator is the same module we must make sure
384 * that module count remains zero for module local sockets, otherwise
385 * we cannot do rmmod.
386 *
387 * Returns 0 on success, errno otherwise
388 */
389 int tipc_sock_create_local(int type, struct socket **res)
390 {
391 int rc;
392
393 rc = sock_create_lite(AF_TIPC, type, 0, res);
394 if (rc < 0) {
395 pr_err("Failed to create kernel socket\n");
396 return rc;
397 }
398 tipc_sk_create(&init_net, *res, 0, 1);
399
400 return 0;
401 }
402
403 /**
404 * tipc_sock_release_local - release socket created by tipc_sock_create_local
405 * @sock: the socket to be released.
406 *
407 * Module reference count is not incremented when such sockets are created,
408 * so we must keep it from being decremented when they are released.
409 */
410 void tipc_sock_release_local(struct socket *sock)
411 {
412 tipc_release(sock);
413 sock->ops = NULL;
414 sock_release(sock);
415 }
416
417 /**
418 * tipc_sock_accept_local - accept a connection on a socket created
419 * with tipc_sock_create_local. Use this function to avoid that
420 * module reference count is inadvertently incremented.
421 *
422 * @sock: the accepting socket
423 * @newsock: reference to the new socket to be created
424 * @flags: socket flags
425 */
426
427 int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
428 int flags)
429 {
430 struct sock *sk = sock->sk;
431 int ret;
432
433 ret = sock_create_lite(sk->sk_family, sk->sk_type,
434 sk->sk_protocol, newsock);
435 if (ret < 0)
436 return ret;
437
438 ret = tipc_accept(sock, *newsock, flags);
439 if (ret < 0) {
440 sock_release(*newsock);
441 return ret;
442 }
443 (*newsock)->ops = sock->ops;
444 return ret;
445 }
446
447 static void tipc_sk_callback(struct rcu_head *head)
448 {
449 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
450
451 sock_put(&tsk->sk);
452 }
453
454 /**
455 * tipc_release - destroy a TIPC socket
456 * @sock: socket to destroy
457 *
458 * This routine cleans up any messages that are still queued on the socket.
459 * For DGRAM and RDM socket types, all queued messages are rejected.
460 * For SEQPACKET and STREAM socket types, the first message is rejected
461 * and any others are discarded. (If the first message on a STREAM socket
462 * is partially-read, it is discarded and the next one is rejected instead.)
463 *
464 * NOTE: Rejected messages are not necessarily returned to the sender! They
465 * are returned or discarded according to the "destination droppable" setting
466 * specified for the message by the sender.
467 *
468 * Returns 0 on success, errno otherwise
469 */
470 static int tipc_release(struct socket *sock)
471 {
472 struct sock *sk = sock->sk;
473 struct net *net = sock_net(sk);
474 struct tipc_sock *tsk;
475 struct sk_buff *skb;
476 u32 dnode, probing_state;
477
478 /*
479 * Exit if socket isn't fully initialized (occurs when a failed accept()
480 * releases a pre-allocated child socket that was never used)
481 */
482 if (sk == NULL)
483 return 0;
484
485 tsk = tipc_sk(sk);
486 lock_sock(sk);
487
488 /*
489 * Reject all unreceived messages, except on an active connection
490 * (which disconnects locally & sends a 'FIN+' to peer)
491 */
492 dnode = tsk_peer_node(tsk);
493 while (sock->state != SS_DISCONNECTING) {
494 skb = __skb_dequeue(&sk->sk_receive_queue);
495 if (skb == NULL)
496 break;
497 if (TIPC_SKB_CB(skb)->handle != NULL)
498 kfree_skb(skb);
499 else {
500 if ((sock->state == SS_CONNECTING) ||
501 (sock->state == SS_CONNECTED)) {
502 sock->state = SS_DISCONNECTING;
503 tsk->connected = 0;
504 tipc_node_remove_conn(net, dnode, tsk->portid);
505 }
506 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
507 tipc_link_xmit_skb(net, skb, dnode, 0);
508 }
509 }
510
511 tipc_sk_withdraw(tsk, 0, NULL);
512 probing_state = tsk->probing_state;
513 if (del_timer_sync(&tsk->timer) && probing_state != TIPC_CONN_PROBING)
514 sock_put(sk);
515 tipc_sk_remove(tsk);
516 if (tsk->connected) {
517 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
518 SHORT_H_SIZE, 0, dnode, tipc_own_addr,
519 tsk_peer_port(tsk),
520 tsk->portid, TIPC_ERR_NO_PORT);
521 if (skb)
522 tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
523 tipc_node_remove_conn(net, dnode, tsk->portid);
524 }
525
526 /* Discard any remaining (connection-based) messages in receive queue */
527 __skb_queue_purge(&sk->sk_receive_queue);
528
529 /* Reject any messages that accumulated in backlog queue */
530 sock->state = SS_DISCONNECTING;
531 release_sock(sk);
532
533 call_rcu(&tsk->rcu, tipc_sk_callback);
534 sock->sk = NULL;
535
536 return 0;
537 }
538
539 /**
540 * tipc_bind - associate or disassocate TIPC name(s) with a socket
541 * @sock: socket structure
542 * @uaddr: socket address describing name(s) and desired operation
543 * @uaddr_len: size of socket address data structure
544 *
545 * Name and name sequence binding is indicated using a positive scope value;
546 * a negative scope value unbinds the specified name. Specifying no name
547 * (i.e. a socket address length of 0) unbinds all names from the socket.
548 *
549 * Returns 0 on success, errno otherwise
550 *
551 * NOTE: This routine doesn't need to take the socket lock since it doesn't
552 * access any non-constant socket information.
553 */
554 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
555 int uaddr_len)
556 {
557 struct sock *sk = sock->sk;
558 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
559 struct tipc_sock *tsk = tipc_sk(sk);
560 int res = -EINVAL;
561
562 lock_sock(sk);
563 if (unlikely(!uaddr_len)) {
564 res = tipc_sk_withdraw(tsk, 0, NULL);
565 goto exit;
566 }
567
568 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
569 res = -EINVAL;
570 goto exit;
571 }
572 if (addr->family != AF_TIPC) {
573 res = -EAFNOSUPPORT;
574 goto exit;
575 }
576
577 if (addr->addrtype == TIPC_ADDR_NAME)
578 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
579 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
580 res = -EAFNOSUPPORT;
581 goto exit;
582 }
583
584 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
585 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
586 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
587 res = -EACCES;
588 goto exit;
589 }
590
591 res = (addr->scope > 0) ?
592 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
593 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
594 exit:
595 release_sock(sk);
596 return res;
597 }
598
599 /**
600 * tipc_getname - get port ID of socket or peer socket
601 * @sock: socket structure
602 * @uaddr: area for returned socket address
603 * @uaddr_len: area for returned length of socket address
604 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
605 *
606 * Returns 0 on success, errno otherwise
607 *
608 * NOTE: This routine doesn't need to take the socket lock since it only
609 * accesses socket information that is unchanging (or which changes in
610 * a completely predictable manner).
611 */
612 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
613 int *uaddr_len, int peer)
614 {
615 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
616 struct tipc_sock *tsk = tipc_sk(sock->sk);
617
618 memset(addr, 0, sizeof(*addr));
619 if (peer) {
620 if ((sock->state != SS_CONNECTED) &&
621 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
622 return -ENOTCONN;
623 addr->addr.id.ref = tsk_peer_port(tsk);
624 addr->addr.id.node = tsk_peer_node(tsk);
625 } else {
626 addr->addr.id.ref = tsk->portid;
627 addr->addr.id.node = tipc_own_addr;
628 }
629
630 *uaddr_len = sizeof(*addr);
631 addr->addrtype = TIPC_ADDR_ID;
632 addr->family = AF_TIPC;
633 addr->scope = 0;
634 addr->addr.name.domain = 0;
635
636 return 0;
637 }
638
639 /**
640 * tipc_poll - read and possibly block on pollmask
641 * @file: file structure associated with the socket
642 * @sock: socket for which to calculate the poll bits
643 * @wait: ???
644 *
645 * Returns pollmask value
646 *
647 * COMMENTARY:
648 * It appears that the usual socket locking mechanisms are not useful here
649 * since the pollmask info is potentially out-of-date the moment this routine
650 * exits. TCP and other protocols seem to rely on higher level poll routines
651 * to handle any preventable race conditions, so TIPC will do the same ...
652 *
653 * TIPC sets the returned events as follows:
654 *
655 * socket state flags set
656 * ------------ ---------
657 * unconnected no read flags
658 * POLLOUT if port is not congested
659 *
660 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
661 * no write flags
662 *
663 * connected POLLIN/POLLRDNORM if data in rx queue
664 * POLLOUT if port is not congested
665 *
666 * disconnecting POLLIN/POLLRDNORM/POLLHUP
667 * no write flags
668 *
669 * listening POLLIN if SYN in rx queue
670 * no write flags
671 *
672 * ready POLLIN/POLLRDNORM if data in rx queue
673 * [connectionless] POLLOUT (since port cannot be congested)
674 *
675 * IMPORTANT: The fact that a read or write operation is indicated does NOT
676 * imply that the operation will succeed, merely that it should be performed
677 * and will not block.
678 */
679 static unsigned int tipc_poll(struct file *file, struct socket *sock,
680 poll_table *wait)
681 {
682 struct sock *sk = sock->sk;
683 struct tipc_sock *tsk = tipc_sk(sk);
684 u32 mask = 0;
685
686 sock_poll_wait(file, sk_sleep(sk), wait);
687
688 switch ((int)sock->state) {
689 case SS_UNCONNECTED:
690 if (!tsk->link_cong)
691 mask |= POLLOUT;
692 break;
693 case SS_READY:
694 case SS_CONNECTED:
695 if (!tsk->link_cong && !tsk_conn_cong(tsk))
696 mask |= POLLOUT;
697 /* fall thru' */
698 case SS_CONNECTING:
699 case SS_LISTENING:
700 if (!skb_queue_empty(&sk->sk_receive_queue))
701 mask |= (POLLIN | POLLRDNORM);
702 break;
703 case SS_DISCONNECTING:
704 mask = (POLLIN | POLLRDNORM | POLLHUP);
705 break;
706 }
707
708 return mask;
709 }
710
711 /**
712 * tipc_sendmcast - send multicast message
713 * @sock: socket structure
714 * @seq: destination address
715 * @msg: message to send
716 * @dsz: total length of message data
717 * @timeo: timeout to wait for wakeup
718 *
719 * Called from function tipc_sendmsg(), which has done all sanity checks
720 * Returns the number of bytes sent on success, or errno
721 */
722 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
723 struct msghdr *msg, size_t dsz, long timeo)
724 {
725 struct sock *sk = sock->sk;
726 struct net *net = sock_net(sk);
727 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
728 struct sk_buff_head head;
729 uint mtu;
730 int rc;
731
732 msg_set_type(mhdr, TIPC_MCAST_MSG);
733 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
734 msg_set_destport(mhdr, 0);
735 msg_set_destnode(mhdr, 0);
736 msg_set_nametype(mhdr, seq->type);
737 msg_set_namelower(mhdr, seq->lower);
738 msg_set_nameupper(mhdr, seq->upper);
739 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
740
741 new_mtu:
742 mtu = tipc_bclink_get_mtu();
743 __skb_queue_head_init(&head);
744 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
745 if (unlikely(rc < 0))
746 return rc;
747
748 do {
749 rc = tipc_bclink_xmit(net, &head);
750 if (likely(rc >= 0)) {
751 rc = dsz;
752 break;
753 }
754 if (rc == -EMSGSIZE)
755 goto new_mtu;
756 if (rc != -ELINKCONG)
757 break;
758 tipc_sk(sk)->link_cong = 1;
759 rc = tipc_wait_for_sndmsg(sock, &timeo);
760 if (rc)
761 __skb_queue_purge(&head);
762 } while (!rc);
763 return rc;
764 }
765
766 /* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
767 */
768 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff *buf)
769 {
770 struct tipc_msg *msg = buf_msg(buf);
771 struct tipc_port_list dports = {0, NULL, };
772 struct tipc_port_list *item;
773 struct sk_buff *b;
774 uint i, last, dst = 0;
775 u32 scope = TIPC_CLUSTER_SCOPE;
776
777 if (in_own_node(msg_orignode(msg)))
778 scope = TIPC_NODE_SCOPE;
779
780 /* Create destination port list: */
781 tipc_nametbl_mc_translate(net, msg_nametype(msg), msg_namelower(msg),
782 msg_nameupper(msg), scope, &dports);
783 last = dports.count;
784 if (!last) {
785 kfree_skb(buf);
786 return;
787 }
788
789 for (item = &dports; item; item = item->next) {
790 for (i = 0; i < PLSIZE && ++dst <= last; i++) {
791 b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
792 if (!b) {
793 pr_warn("Failed do clone mcast rcv buffer\n");
794 continue;
795 }
796 msg_set_destport(msg, item->ports[i]);
797 tipc_sk_rcv(net, b);
798 }
799 }
800 tipc_port_list_free(&dports);
801 }
802
803 /**
804 * tipc_sk_proto_rcv - receive a connection mng protocol message
805 * @tsk: receiving socket
806 * @dnode: node to send response message to, if any
807 * @buf: buffer containing protocol message
808 * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
809 * (CONN_PROBE_REPLY) message should be forwarded.
810 */
811 static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
812 struct sk_buff *buf)
813 {
814 struct tipc_msg *msg = buf_msg(buf);
815 int conn_cong;
816
817 /* Ignore if connection cannot be validated: */
818 if (!tsk_peer_msg(tsk, msg))
819 goto exit;
820
821 tsk->probing_state = TIPC_CONN_OK;
822
823 if (msg_type(msg) == CONN_ACK) {
824 conn_cong = tsk_conn_cong(tsk);
825 tsk->sent_unacked -= msg_msgcnt(msg);
826 if (conn_cong)
827 tsk->sk.sk_write_space(&tsk->sk);
828 } else if (msg_type(msg) == CONN_PROBE) {
829 if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
830 return TIPC_OK;
831 msg_set_type(msg, CONN_PROBE_REPLY);
832 return TIPC_FWD_MSG;
833 }
834 /* Do nothing if msg_type() == CONN_PROBE_REPLY */
835 exit:
836 kfree_skb(buf);
837 return TIPC_OK;
838 }
839
840 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
841 {
842 struct sock *sk = sock->sk;
843 struct tipc_sock *tsk = tipc_sk(sk);
844 DEFINE_WAIT(wait);
845 int done;
846
847 do {
848 int err = sock_error(sk);
849 if (err)
850 return err;
851 if (sock->state == SS_DISCONNECTING)
852 return -EPIPE;
853 if (!*timeo_p)
854 return -EAGAIN;
855 if (signal_pending(current))
856 return sock_intr_errno(*timeo_p);
857
858 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
859 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
860 finish_wait(sk_sleep(sk), &wait);
861 } while (!done);
862 return 0;
863 }
864
865 /**
866 * tipc_sendmsg - send message in connectionless manner
867 * @iocb: if NULL, indicates that socket lock is already held
868 * @sock: socket structure
869 * @m: message to send
870 * @dsz: amount of user data to be sent
871 *
872 * Message must have an destination specified explicitly.
873 * Used for SOCK_RDM and SOCK_DGRAM messages,
874 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
875 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
876 *
877 * Returns the number of bytes sent on success, or errno otherwise
878 */
879 static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
880 struct msghdr *m, size_t dsz)
881 {
882 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
883 struct sock *sk = sock->sk;
884 struct tipc_sock *tsk = tipc_sk(sk);
885 struct net *net = sock_net(sk);
886 struct tipc_msg *mhdr = &tsk->phdr;
887 u32 dnode, dport;
888 struct sk_buff_head head;
889 struct sk_buff *skb;
890 struct tipc_name_seq *seq = &dest->addr.nameseq;
891 u32 mtu;
892 long timeo;
893 int rc;
894
895 if (unlikely(!dest))
896 return -EDESTADDRREQ;
897
898 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
899 (dest->family != AF_TIPC)))
900 return -EINVAL;
901
902 if (dsz > TIPC_MAX_USER_MSG_SIZE)
903 return -EMSGSIZE;
904
905 if (iocb)
906 lock_sock(sk);
907
908 if (unlikely(sock->state != SS_READY)) {
909 if (sock->state == SS_LISTENING) {
910 rc = -EPIPE;
911 goto exit;
912 }
913 if (sock->state != SS_UNCONNECTED) {
914 rc = -EISCONN;
915 goto exit;
916 }
917 if (tsk->published) {
918 rc = -EOPNOTSUPP;
919 goto exit;
920 }
921 if (dest->addrtype == TIPC_ADDR_NAME) {
922 tsk->conn_type = dest->addr.name.name.type;
923 tsk->conn_instance = dest->addr.name.name.instance;
924 }
925 }
926
927 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
928
929 if (dest->addrtype == TIPC_ADDR_MCAST) {
930 rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
931 goto exit;
932 } else if (dest->addrtype == TIPC_ADDR_NAME) {
933 u32 type = dest->addr.name.name.type;
934 u32 inst = dest->addr.name.name.instance;
935 u32 domain = dest->addr.name.domain;
936
937 dnode = domain;
938 msg_set_type(mhdr, TIPC_NAMED_MSG);
939 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
940 msg_set_nametype(mhdr, type);
941 msg_set_nameinst(mhdr, inst);
942 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
943 dport = tipc_nametbl_translate(net, type, inst, &dnode);
944 msg_set_destnode(mhdr, dnode);
945 msg_set_destport(mhdr, dport);
946 if (unlikely(!dport && !dnode)) {
947 rc = -EHOSTUNREACH;
948 goto exit;
949 }
950 } else if (dest->addrtype == TIPC_ADDR_ID) {
951 dnode = dest->addr.id.node;
952 msg_set_type(mhdr, TIPC_DIRECT_MSG);
953 msg_set_lookup_scope(mhdr, 0);
954 msg_set_destnode(mhdr, dnode);
955 msg_set_destport(mhdr, dest->addr.id.ref);
956 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
957 }
958
959 new_mtu:
960 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
961 __skb_queue_head_init(&head);
962 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
963 if (rc < 0)
964 goto exit;
965
966 do {
967 skb = skb_peek(&head);
968 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
969 rc = tipc_link_xmit(net, &head, dnode, tsk->portid);
970 if (likely(rc >= 0)) {
971 if (sock->state != SS_READY)
972 sock->state = SS_CONNECTING;
973 rc = dsz;
974 break;
975 }
976 if (rc == -EMSGSIZE)
977 goto new_mtu;
978 if (rc != -ELINKCONG)
979 break;
980 tsk->link_cong = 1;
981 rc = tipc_wait_for_sndmsg(sock, &timeo);
982 if (rc)
983 __skb_queue_purge(&head);
984 } while (!rc);
985 exit:
986 if (iocb)
987 release_sock(sk);
988
989 return rc;
990 }
991
992 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
993 {
994 struct sock *sk = sock->sk;
995 struct tipc_sock *tsk = tipc_sk(sk);
996 DEFINE_WAIT(wait);
997 int done;
998
999 do {
1000 int err = sock_error(sk);
1001 if (err)
1002 return err;
1003 if (sock->state == SS_DISCONNECTING)
1004 return -EPIPE;
1005 else if (sock->state != SS_CONNECTED)
1006 return -ENOTCONN;
1007 if (!*timeo_p)
1008 return -EAGAIN;
1009 if (signal_pending(current))
1010 return sock_intr_errno(*timeo_p);
1011
1012 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1013 done = sk_wait_event(sk, timeo_p,
1014 (!tsk->link_cong &&
1015 !tsk_conn_cong(tsk)) ||
1016 !tsk->connected);
1017 finish_wait(sk_sleep(sk), &wait);
1018 } while (!done);
1019 return 0;
1020 }
1021
1022 /**
1023 * tipc_send_stream - send stream-oriented data
1024 * @iocb: (unused)
1025 * @sock: socket structure
1026 * @m: data to send
1027 * @dsz: total length of data to be transmitted
1028 *
1029 * Used for SOCK_STREAM data.
1030 *
1031 * Returns the number of bytes sent on success (or partial success),
1032 * or errno if no data sent
1033 */
1034 static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1035 struct msghdr *m, size_t dsz)
1036 {
1037 struct sock *sk = sock->sk;
1038 struct net *net = sock_net(sk);
1039 struct tipc_sock *tsk = tipc_sk(sk);
1040 struct tipc_msg *mhdr = &tsk->phdr;
1041 struct sk_buff_head head;
1042 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1043 u32 portid = tsk->portid;
1044 int rc = -EINVAL;
1045 long timeo;
1046 u32 dnode;
1047 uint mtu, send, sent = 0;
1048
1049 /* Handle implied connection establishment */
1050 if (unlikely(dest)) {
1051 rc = tipc_sendmsg(iocb, sock, m, dsz);
1052 if (dsz && (dsz == rc))
1053 tsk->sent_unacked = 1;
1054 return rc;
1055 }
1056 if (dsz > (uint)INT_MAX)
1057 return -EMSGSIZE;
1058
1059 if (iocb)
1060 lock_sock(sk);
1061
1062 if (unlikely(sock->state != SS_CONNECTED)) {
1063 if (sock->state == SS_DISCONNECTING)
1064 rc = -EPIPE;
1065 else
1066 rc = -ENOTCONN;
1067 goto exit;
1068 }
1069
1070 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1071 dnode = tsk_peer_node(tsk);
1072
1073 next:
1074 mtu = tsk->max_pkt;
1075 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1076 __skb_queue_head_init(&head);
1077 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
1078 if (unlikely(rc < 0))
1079 goto exit;
1080 do {
1081 if (likely(!tsk_conn_cong(tsk))) {
1082 rc = tipc_link_xmit(net, &head, dnode, portid);
1083 if (likely(!rc)) {
1084 tsk->sent_unacked++;
1085 sent += send;
1086 if (sent == dsz)
1087 break;
1088 goto next;
1089 }
1090 if (rc == -EMSGSIZE) {
1091 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1092 portid);
1093 goto next;
1094 }
1095 if (rc != -ELINKCONG)
1096 break;
1097 tsk->link_cong = 1;
1098 }
1099 rc = tipc_wait_for_sndpkt(sock, &timeo);
1100 if (rc)
1101 __skb_queue_purge(&head);
1102 } while (!rc);
1103 exit:
1104 if (iocb)
1105 release_sock(sk);
1106 return sent ? sent : rc;
1107 }
1108
1109 /**
1110 * tipc_send_packet - send a connection-oriented message
1111 * @iocb: if NULL, indicates that socket lock is already held
1112 * @sock: socket structure
1113 * @m: message to send
1114 * @dsz: length of data to be transmitted
1115 *
1116 * Used for SOCK_SEQPACKET messages.
1117 *
1118 * Returns the number of bytes sent on success, or errno otherwise
1119 */
1120 static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
1121 struct msghdr *m, size_t dsz)
1122 {
1123 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1124 return -EMSGSIZE;
1125
1126 return tipc_send_stream(iocb, sock, m, dsz);
1127 }
1128
1129 /* tipc_sk_finish_conn - complete the setup of a connection
1130 */
1131 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1132 u32 peer_node)
1133 {
1134 struct net *net = sock_net(&tsk->sk);
1135 struct tipc_msg *msg = &tsk->phdr;
1136
1137 msg_set_destnode(msg, peer_node);
1138 msg_set_destport(msg, peer_port);
1139 msg_set_type(msg, TIPC_CONN_MSG);
1140 msg_set_lookup_scope(msg, 0);
1141 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1142
1143 tsk->probing_intv = CONN_PROBING_INTERVAL;
1144 tsk->probing_state = TIPC_CONN_OK;
1145 tsk->connected = 1;
1146 if (!mod_timer(&tsk->timer, jiffies + tsk->probing_intv))
1147 sock_hold(&tsk->sk);
1148 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1149 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1150 }
1151
1152 /**
1153 * set_orig_addr - capture sender's address for received message
1154 * @m: descriptor for message info
1155 * @msg: received message header
1156 *
1157 * Note: Address is not captured if not requested by receiver.
1158 */
1159 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1160 {
1161 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1162
1163 if (addr) {
1164 addr->family = AF_TIPC;
1165 addr->addrtype = TIPC_ADDR_ID;
1166 memset(&addr->addr, 0, sizeof(addr->addr));
1167 addr->addr.id.ref = msg_origport(msg);
1168 addr->addr.id.node = msg_orignode(msg);
1169 addr->addr.name.domain = 0; /* could leave uninitialized */
1170 addr->scope = 0; /* could leave uninitialized */
1171 m->msg_namelen = sizeof(struct sockaddr_tipc);
1172 }
1173 }
1174
1175 /**
1176 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1177 * @m: descriptor for message info
1178 * @msg: received message header
1179 * @tsk: TIPC port associated with message
1180 *
1181 * Note: Ancillary data is not captured if not requested by receiver.
1182 *
1183 * Returns 0 if successful, otherwise errno
1184 */
1185 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1186 struct tipc_sock *tsk)
1187 {
1188 u32 anc_data[3];
1189 u32 err;
1190 u32 dest_type;
1191 int has_name;
1192 int res;
1193
1194 if (likely(m->msg_controllen == 0))
1195 return 0;
1196
1197 /* Optionally capture errored message object(s) */
1198 err = msg ? msg_errcode(msg) : 0;
1199 if (unlikely(err)) {
1200 anc_data[0] = err;
1201 anc_data[1] = msg_data_sz(msg);
1202 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1203 if (res)
1204 return res;
1205 if (anc_data[1]) {
1206 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1207 msg_data(msg));
1208 if (res)
1209 return res;
1210 }
1211 }
1212
1213 /* Optionally capture message destination object */
1214 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1215 switch (dest_type) {
1216 case TIPC_NAMED_MSG:
1217 has_name = 1;
1218 anc_data[0] = msg_nametype(msg);
1219 anc_data[1] = msg_namelower(msg);
1220 anc_data[2] = msg_namelower(msg);
1221 break;
1222 case TIPC_MCAST_MSG:
1223 has_name = 1;
1224 anc_data[0] = msg_nametype(msg);
1225 anc_data[1] = msg_namelower(msg);
1226 anc_data[2] = msg_nameupper(msg);
1227 break;
1228 case TIPC_CONN_MSG:
1229 has_name = (tsk->conn_type != 0);
1230 anc_data[0] = tsk->conn_type;
1231 anc_data[1] = tsk->conn_instance;
1232 anc_data[2] = tsk->conn_instance;
1233 break;
1234 default:
1235 has_name = 0;
1236 }
1237 if (has_name) {
1238 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1239 if (res)
1240 return res;
1241 }
1242
1243 return 0;
1244 }
1245
1246 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1247 {
1248 struct net *net = sock_net(&tsk->sk);
1249 struct sk_buff *skb = NULL;
1250 struct tipc_msg *msg;
1251 u32 peer_port = tsk_peer_port(tsk);
1252 u32 dnode = tsk_peer_node(tsk);
1253
1254 if (!tsk->connected)
1255 return;
1256 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
1257 tipc_own_addr, peer_port, tsk->portid, TIPC_OK);
1258 if (!skb)
1259 return;
1260 msg = buf_msg(skb);
1261 msg_set_msgcnt(msg, ack);
1262 tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1263 }
1264
1265 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1266 {
1267 struct sock *sk = sock->sk;
1268 DEFINE_WAIT(wait);
1269 long timeo = *timeop;
1270 int err;
1271
1272 for (;;) {
1273 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1274 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1275 if (sock->state == SS_DISCONNECTING) {
1276 err = -ENOTCONN;
1277 break;
1278 }
1279 release_sock(sk);
1280 timeo = schedule_timeout(timeo);
1281 lock_sock(sk);
1282 }
1283 err = 0;
1284 if (!skb_queue_empty(&sk->sk_receive_queue))
1285 break;
1286 err = sock_intr_errno(timeo);
1287 if (signal_pending(current))
1288 break;
1289 err = -EAGAIN;
1290 if (!timeo)
1291 break;
1292 }
1293 finish_wait(sk_sleep(sk), &wait);
1294 *timeop = timeo;
1295 return err;
1296 }
1297
1298 /**
1299 * tipc_recvmsg - receive packet-oriented message
1300 * @iocb: (unused)
1301 * @m: descriptor for message info
1302 * @buf_len: total size of user buffer area
1303 * @flags: receive flags
1304 *
1305 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1306 * If the complete message doesn't fit in user area, truncate it.
1307 *
1308 * Returns size of returned message data, errno otherwise
1309 */
1310 static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1311 struct msghdr *m, size_t buf_len, int flags)
1312 {
1313 struct sock *sk = sock->sk;
1314 struct tipc_sock *tsk = tipc_sk(sk);
1315 struct sk_buff *buf;
1316 struct tipc_msg *msg;
1317 long timeo;
1318 unsigned int sz;
1319 u32 err;
1320 int res;
1321
1322 /* Catch invalid receive requests */
1323 if (unlikely(!buf_len))
1324 return -EINVAL;
1325
1326 lock_sock(sk);
1327
1328 if (unlikely(sock->state == SS_UNCONNECTED)) {
1329 res = -ENOTCONN;
1330 goto exit;
1331 }
1332
1333 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1334 restart:
1335
1336 /* Look for a message in receive queue; wait if necessary */
1337 res = tipc_wait_for_rcvmsg(sock, &timeo);
1338 if (res)
1339 goto exit;
1340
1341 /* Look at first message in receive queue */
1342 buf = skb_peek(&sk->sk_receive_queue);
1343 msg = buf_msg(buf);
1344 sz = msg_data_sz(msg);
1345 err = msg_errcode(msg);
1346
1347 /* Discard an empty non-errored message & try again */
1348 if ((!sz) && (!err)) {
1349 tsk_advance_rx_queue(sk);
1350 goto restart;
1351 }
1352
1353 /* Capture sender's address (optional) */
1354 set_orig_addr(m, msg);
1355
1356 /* Capture ancillary data (optional) */
1357 res = tipc_sk_anc_data_recv(m, msg, tsk);
1358 if (res)
1359 goto exit;
1360
1361 /* Capture message data (if valid) & compute return value (always) */
1362 if (!err) {
1363 if (unlikely(buf_len < sz)) {
1364 sz = buf_len;
1365 m->msg_flags |= MSG_TRUNC;
1366 }
1367 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1368 if (res)
1369 goto exit;
1370 res = sz;
1371 } else {
1372 if ((sock->state == SS_READY) ||
1373 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1374 res = 0;
1375 else
1376 res = -ECONNRESET;
1377 }
1378
1379 /* Consume received message (optional) */
1380 if (likely(!(flags & MSG_PEEK))) {
1381 if ((sock->state != SS_READY) &&
1382 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1383 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1384 tsk->rcv_unacked = 0;
1385 }
1386 tsk_advance_rx_queue(sk);
1387 }
1388 exit:
1389 release_sock(sk);
1390 return res;
1391 }
1392
1393 /**
1394 * tipc_recv_stream - receive stream-oriented data
1395 * @iocb: (unused)
1396 * @m: descriptor for message info
1397 * @buf_len: total size of user buffer area
1398 * @flags: receive flags
1399 *
1400 * Used for SOCK_STREAM messages only. If not enough data is available
1401 * will optionally wait for more; never truncates data.
1402 *
1403 * Returns size of returned message data, errno otherwise
1404 */
1405 static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1406 struct msghdr *m, size_t buf_len, int flags)
1407 {
1408 struct sock *sk = sock->sk;
1409 struct tipc_sock *tsk = tipc_sk(sk);
1410 struct sk_buff *buf;
1411 struct tipc_msg *msg;
1412 long timeo;
1413 unsigned int sz;
1414 int sz_to_copy, target, needed;
1415 int sz_copied = 0;
1416 u32 err;
1417 int res = 0;
1418
1419 /* Catch invalid receive attempts */
1420 if (unlikely(!buf_len))
1421 return -EINVAL;
1422
1423 lock_sock(sk);
1424
1425 if (unlikely(sock->state == SS_UNCONNECTED)) {
1426 res = -ENOTCONN;
1427 goto exit;
1428 }
1429
1430 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1431 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1432
1433 restart:
1434 /* Look for a message in receive queue; wait if necessary */
1435 res = tipc_wait_for_rcvmsg(sock, &timeo);
1436 if (res)
1437 goto exit;
1438
1439 /* Look at first message in receive queue */
1440 buf = skb_peek(&sk->sk_receive_queue);
1441 msg = buf_msg(buf);
1442 sz = msg_data_sz(msg);
1443 err = msg_errcode(msg);
1444
1445 /* Discard an empty non-errored message & try again */
1446 if ((!sz) && (!err)) {
1447 tsk_advance_rx_queue(sk);
1448 goto restart;
1449 }
1450
1451 /* Optionally capture sender's address & ancillary data of first msg */
1452 if (sz_copied == 0) {
1453 set_orig_addr(m, msg);
1454 res = tipc_sk_anc_data_recv(m, msg, tsk);
1455 if (res)
1456 goto exit;
1457 }
1458
1459 /* Capture message data (if valid) & compute return value (always) */
1460 if (!err) {
1461 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1462
1463 sz -= offset;
1464 needed = (buf_len - sz_copied);
1465 sz_to_copy = (sz <= needed) ? sz : needed;
1466
1467 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
1468 m, sz_to_copy);
1469 if (res)
1470 goto exit;
1471
1472 sz_copied += sz_to_copy;
1473
1474 if (sz_to_copy < sz) {
1475 if (!(flags & MSG_PEEK))
1476 TIPC_SKB_CB(buf)->handle =
1477 (void *)(unsigned long)(offset + sz_to_copy);
1478 goto exit;
1479 }
1480 } else {
1481 if (sz_copied != 0)
1482 goto exit; /* can't add error msg to valid data */
1483
1484 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1485 res = 0;
1486 else
1487 res = -ECONNRESET;
1488 }
1489
1490 /* Consume received message (optional) */
1491 if (likely(!(flags & MSG_PEEK))) {
1492 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1493 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1494 tsk->rcv_unacked = 0;
1495 }
1496 tsk_advance_rx_queue(sk);
1497 }
1498
1499 /* Loop around if more data is required */
1500 if ((sz_copied < buf_len) && /* didn't get all requested data */
1501 (!skb_queue_empty(&sk->sk_receive_queue) ||
1502 (sz_copied < target)) && /* and more is ready or required */
1503 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1504 (!err)) /* and haven't reached a FIN */
1505 goto restart;
1506
1507 exit:
1508 release_sock(sk);
1509 return sz_copied ? sz_copied : res;
1510 }
1511
1512 /**
1513 * tipc_write_space - wake up thread if port congestion is released
1514 * @sk: socket
1515 */
1516 static void tipc_write_space(struct sock *sk)
1517 {
1518 struct socket_wq *wq;
1519
1520 rcu_read_lock();
1521 wq = rcu_dereference(sk->sk_wq);
1522 if (wq_has_sleeper(wq))
1523 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1524 POLLWRNORM | POLLWRBAND);
1525 rcu_read_unlock();
1526 }
1527
1528 /**
1529 * tipc_data_ready - wake up threads to indicate messages have been received
1530 * @sk: socket
1531 * @len: the length of messages
1532 */
1533 static void tipc_data_ready(struct sock *sk)
1534 {
1535 struct socket_wq *wq;
1536
1537 rcu_read_lock();
1538 wq = rcu_dereference(sk->sk_wq);
1539 if (wq_has_sleeper(wq))
1540 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1541 POLLRDNORM | POLLRDBAND);
1542 rcu_read_unlock();
1543 }
1544
1545 /**
1546 * filter_connect - Handle all incoming messages for a connection-based socket
1547 * @tsk: TIPC socket
1548 * @msg: message
1549 *
1550 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
1551 */
1552 static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1553 {
1554 struct sock *sk = &tsk->sk;
1555 struct net *net = sock_net(sk);
1556 struct socket *sock = sk->sk_socket;
1557 struct tipc_msg *msg = buf_msg(*buf);
1558 int retval = -TIPC_ERR_NO_PORT;
1559
1560 if (msg_mcast(msg))
1561 return retval;
1562
1563 switch ((int)sock->state) {
1564 case SS_CONNECTED:
1565 /* Accept only connection-based messages sent by peer */
1566 if (tsk_peer_msg(tsk, msg)) {
1567 if (unlikely(msg_errcode(msg))) {
1568 sock->state = SS_DISCONNECTING;
1569 tsk->connected = 0;
1570 /* let timer expire on it's own */
1571 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1572 tsk->portid);
1573 }
1574 retval = TIPC_OK;
1575 }
1576 break;
1577 case SS_CONNECTING:
1578 /* Accept only ACK or NACK message */
1579
1580 if (unlikely(!msg_connected(msg)))
1581 break;
1582
1583 if (unlikely(msg_errcode(msg))) {
1584 sock->state = SS_DISCONNECTING;
1585 sk->sk_err = ECONNREFUSED;
1586 retval = TIPC_OK;
1587 break;
1588 }
1589
1590 if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
1591 sock->state = SS_DISCONNECTING;
1592 sk->sk_err = EINVAL;
1593 retval = TIPC_OK;
1594 break;
1595 }
1596
1597 tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
1598 msg_set_importance(&tsk->phdr, msg_importance(msg));
1599 sock->state = SS_CONNECTED;
1600
1601 /* If an incoming message is an 'ACK-', it should be
1602 * discarded here because it doesn't contain useful
1603 * data. In addition, we should try to wake up
1604 * connect() routine if sleeping.
1605 */
1606 if (msg_data_sz(msg) == 0) {
1607 kfree_skb(*buf);
1608 *buf = NULL;
1609 if (waitqueue_active(sk_sleep(sk)))
1610 wake_up_interruptible(sk_sleep(sk));
1611 }
1612 retval = TIPC_OK;
1613 break;
1614 case SS_LISTENING:
1615 case SS_UNCONNECTED:
1616 /* Accept only SYN message */
1617 if (!msg_connected(msg) && !(msg_errcode(msg)))
1618 retval = TIPC_OK;
1619 break;
1620 case SS_DISCONNECTING:
1621 break;
1622 default:
1623 pr_err("Unknown socket state %u\n", sock->state);
1624 }
1625 return retval;
1626 }
1627
1628 /**
1629 * rcvbuf_limit - get proper overload limit of socket receive queue
1630 * @sk: socket
1631 * @buf: message
1632 *
1633 * For all connection oriented messages, irrespective of importance,
1634 * the default overload value (i.e. 67MB) is set as limit.
1635 *
1636 * For all connectionless messages, by default new queue limits are
1637 * as belows:
1638 *
1639 * TIPC_LOW_IMPORTANCE (4 MB)
1640 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1641 * TIPC_HIGH_IMPORTANCE (16 MB)
1642 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1643 *
1644 * Returns overload limit according to corresponding message importance
1645 */
1646 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1647 {
1648 struct tipc_msg *msg = buf_msg(buf);
1649
1650 if (msg_connected(msg))
1651 return sysctl_tipc_rmem[2];
1652
1653 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1654 msg_importance(msg);
1655 }
1656
1657 /**
1658 * filter_rcv - validate incoming message
1659 * @sk: socket
1660 * @buf: message
1661 *
1662 * Enqueues message on receive queue if acceptable; optionally handles
1663 * disconnect indication for a connected socket.
1664 *
1665 * Called with socket lock already taken; port lock may also be taken.
1666 *
1667 * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
1668 * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
1669 */
1670 static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1671 {
1672 struct socket *sock = sk->sk_socket;
1673 struct tipc_sock *tsk = tipc_sk(sk);
1674 struct tipc_msg *msg = buf_msg(buf);
1675 unsigned int limit = rcvbuf_limit(sk, buf);
1676 u32 onode;
1677 int rc = TIPC_OK;
1678
1679 if (unlikely(msg_user(msg) == CONN_MANAGER))
1680 return tipc_sk_proto_rcv(tsk, &onode, buf);
1681
1682 if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
1683 kfree_skb(buf);
1684 tsk->link_cong = 0;
1685 sk->sk_write_space(sk);
1686 return TIPC_OK;
1687 }
1688
1689 /* Reject message if it is wrong sort of message for socket */
1690 if (msg_type(msg) > TIPC_DIRECT_MSG)
1691 return -TIPC_ERR_NO_PORT;
1692
1693 if (sock->state == SS_READY) {
1694 if (msg_connected(msg))
1695 return -TIPC_ERR_NO_PORT;
1696 } else {
1697 rc = filter_connect(tsk, &buf);
1698 if (rc != TIPC_OK || buf == NULL)
1699 return rc;
1700 }
1701
1702 /* Reject message if there isn't room to queue it */
1703 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1704 return -TIPC_ERR_OVERLOAD;
1705
1706 /* Enqueue message */
1707 TIPC_SKB_CB(buf)->handle = NULL;
1708 __skb_queue_tail(&sk->sk_receive_queue, buf);
1709 skb_set_owner_r(buf, sk);
1710
1711 sk->sk_data_ready(sk);
1712 return TIPC_OK;
1713 }
1714
1715 /**
1716 * tipc_backlog_rcv - handle incoming message from backlog queue
1717 * @sk: socket
1718 * @skb: message
1719 *
1720 * Caller must hold socket lock, but not port lock.
1721 *
1722 * Returns 0
1723 */
1724 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1725 {
1726 int rc;
1727 u32 onode;
1728 struct tipc_sock *tsk = tipc_sk(sk);
1729 uint truesize = skb->truesize;
1730
1731 rc = filter_rcv(sk, skb);
1732
1733 if (likely(!rc)) {
1734 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1735 atomic_add(truesize, &tsk->dupl_rcvcnt);
1736 return 0;
1737 }
1738
1739 if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
1740 return 0;
1741
1742 tipc_link_xmit_skb(sock_net(sk), skb, onode, 0);
1743
1744 return 0;
1745 }
1746
1747 /**
1748 * tipc_sk_rcv - handle incoming message
1749 * @skb: buffer containing arriving message
1750 * Consumes buffer
1751 * Returns 0 if success, or errno: -EHOSTUNREACH
1752 */
1753 int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
1754 {
1755 struct tipc_sock *tsk;
1756 struct sock *sk;
1757 u32 dport = msg_destport(buf_msg(skb));
1758 int rc = TIPC_OK;
1759 uint limit;
1760 u32 dnode;
1761
1762 /* Validate destination and message */
1763 tsk = tipc_sk_lookup(net, dport);
1764 if (unlikely(!tsk)) {
1765 rc = tipc_msg_eval(net, skb, &dnode);
1766 goto exit;
1767 }
1768 sk = &tsk->sk;
1769
1770 /* Queue message */
1771 spin_lock_bh(&sk->sk_lock.slock);
1772
1773 if (!sock_owned_by_user(sk)) {
1774 rc = filter_rcv(sk, skb);
1775 } else {
1776 if (sk->sk_backlog.len == 0)
1777 atomic_set(&tsk->dupl_rcvcnt, 0);
1778 limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
1779 if (sk_add_backlog(sk, skb, limit))
1780 rc = -TIPC_ERR_OVERLOAD;
1781 }
1782 spin_unlock_bh(&sk->sk_lock.slock);
1783 sock_put(sk);
1784 if (likely(!rc))
1785 return 0;
1786 exit:
1787 if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
1788 return -EHOSTUNREACH;
1789
1790 tipc_link_xmit_skb(net, skb, dnode, 0);
1791 return (rc < 0) ? -EHOSTUNREACH : 0;
1792 }
1793
1794 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1795 {
1796 struct sock *sk = sock->sk;
1797 DEFINE_WAIT(wait);
1798 int done;
1799
1800 do {
1801 int err = sock_error(sk);
1802 if (err)
1803 return err;
1804 if (!*timeo_p)
1805 return -ETIMEDOUT;
1806 if (signal_pending(current))
1807 return sock_intr_errno(*timeo_p);
1808
1809 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1810 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1811 finish_wait(sk_sleep(sk), &wait);
1812 } while (!done);
1813 return 0;
1814 }
1815
1816 /**
1817 * tipc_connect - establish a connection to another TIPC port
1818 * @sock: socket structure
1819 * @dest: socket address for destination port
1820 * @destlen: size of socket address data structure
1821 * @flags: file-related flags associated with socket
1822 *
1823 * Returns 0 on success, errno otherwise
1824 */
1825 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1826 int destlen, int flags)
1827 {
1828 struct sock *sk = sock->sk;
1829 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1830 struct msghdr m = {NULL,};
1831 long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1832 socket_state previous;
1833 int res;
1834
1835 lock_sock(sk);
1836
1837 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1838 if (sock->state == SS_READY) {
1839 res = -EOPNOTSUPP;
1840 goto exit;
1841 }
1842
1843 /*
1844 * Reject connection attempt using multicast address
1845 *
1846 * Note: send_msg() validates the rest of the address fields,
1847 * so there's no need to do it here
1848 */
1849 if (dst->addrtype == TIPC_ADDR_MCAST) {
1850 res = -EINVAL;
1851 goto exit;
1852 }
1853
1854 previous = sock->state;
1855 switch (sock->state) {
1856 case SS_UNCONNECTED:
1857 /* Send a 'SYN-' to destination */
1858 m.msg_name = dest;
1859 m.msg_namelen = destlen;
1860
1861 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1862 * indicate send_msg() is never blocked.
1863 */
1864 if (!timeout)
1865 m.msg_flags = MSG_DONTWAIT;
1866
1867 res = tipc_sendmsg(NULL, sock, &m, 0);
1868 if ((res < 0) && (res != -EWOULDBLOCK))
1869 goto exit;
1870
1871 /* Just entered SS_CONNECTING state; the only
1872 * difference is that return value in non-blocking
1873 * case is EINPROGRESS, rather than EALREADY.
1874 */
1875 res = -EINPROGRESS;
1876 case SS_CONNECTING:
1877 if (previous == SS_CONNECTING)
1878 res = -EALREADY;
1879 if (!timeout)
1880 goto exit;
1881 timeout = msecs_to_jiffies(timeout);
1882 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1883 res = tipc_wait_for_connect(sock, &timeout);
1884 break;
1885 case SS_CONNECTED:
1886 res = -EISCONN;
1887 break;
1888 default:
1889 res = -EINVAL;
1890 break;
1891 }
1892 exit:
1893 release_sock(sk);
1894 return res;
1895 }
1896
1897 /**
1898 * tipc_listen - allow socket to listen for incoming connections
1899 * @sock: socket structure
1900 * @len: (unused)
1901 *
1902 * Returns 0 on success, errno otherwise
1903 */
1904 static int tipc_listen(struct socket *sock, int len)
1905 {
1906 struct sock *sk = sock->sk;
1907 int res;
1908
1909 lock_sock(sk);
1910
1911 if (sock->state != SS_UNCONNECTED)
1912 res = -EINVAL;
1913 else {
1914 sock->state = SS_LISTENING;
1915 res = 0;
1916 }
1917
1918 release_sock(sk);
1919 return res;
1920 }
1921
1922 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1923 {
1924 struct sock *sk = sock->sk;
1925 DEFINE_WAIT(wait);
1926 int err;
1927
1928 /* True wake-one mechanism for incoming connections: only
1929 * one process gets woken up, not the 'whole herd'.
1930 * Since we do not 'race & poll' for established sockets
1931 * anymore, the common case will execute the loop only once.
1932 */
1933 for (;;) {
1934 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1935 TASK_INTERRUPTIBLE);
1936 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1937 release_sock(sk);
1938 timeo = schedule_timeout(timeo);
1939 lock_sock(sk);
1940 }
1941 err = 0;
1942 if (!skb_queue_empty(&sk->sk_receive_queue))
1943 break;
1944 err = -EINVAL;
1945 if (sock->state != SS_LISTENING)
1946 break;
1947 err = sock_intr_errno(timeo);
1948 if (signal_pending(current))
1949 break;
1950 err = -EAGAIN;
1951 if (!timeo)
1952 break;
1953 }
1954 finish_wait(sk_sleep(sk), &wait);
1955 return err;
1956 }
1957
1958 /**
1959 * tipc_accept - wait for connection request
1960 * @sock: listening socket
1961 * @newsock: new socket that is to be connected
1962 * @flags: file-related flags associated with socket
1963 *
1964 * Returns 0 on success, errno otherwise
1965 */
1966 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1967 {
1968 struct sock *new_sk, *sk = sock->sk;
1969 struct sk_buff *buf;
1970 struct tipc_sock *new_tsock;
1971 struct tipc_msg *msg;
1972 long timeo;
1973 int res;
1974
1975 lock_sock(sk);
1976
1977 if (sock->state != SS_LISTENING) {
1978 res = -EINVAL;
1979 goto exit;
1980 }
1981 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1982 res = tipc_wait_for_accept(sock, timeo);
1983 if (res)
1984 goto exit;
1985
1986 buf = skb_peek(&sk->sk_receive_queue);
1987
1988 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
1989 if (res)
1990 goto exit;
1991
1992 new_sk = new_sock->sk;
1993 new_tsock = tipc_sk(new_sk);
1994 msg = buf_msg(buf);
1995
1996 /* we lock on new_sk; but lockdep sees the lock on sk */
1997 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1998
1999 /*
2000 * Reject any stray messages received by new socket
2001 * before the socket lock was taken (very, very unlikely)
2002 */
2003 tsk_rej_rx_queue(new_sk);
2004
2005 /* Connect new socket to it's peer */
2006 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2007 new_sock->state = SS_CONNECTED;
2008
2009 tsk_set_importance(new_tsock, msg_importance(msg));
2010 if (msg_named(msg)) {
2011 new_tsock->conn_type = msg_nametype(msg);
2012 new_tsock->conn_instance = msg_nameinst(msg);
2013 }
2014
2015 /*
2016 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2017 * Respond to 'SYN+' by queuing it on new socket.
2018 */
2019 if (!msg_data_sz(msg)) {
2020 struct msghdr m = {NULL,};
2021
2022 tsk_advance_rx_queue(sk);
2023 tipc_send_packet(NULL, new_sock, &m, 0);
2024 } else {
2025 __skb_dequeue(&sk->sk_receive_queue);
2026 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2027 skb_set_owner_r(buf, new_sk);
2028 }
2029 release_sock(new_sk);
2030 exit:
2031 release_sock(sk);
2032 return res;
2033 }
2034
2035 /**
2036 * tipc_shutdown - shutdown socket connection
2037 * @sock: socket structure
2038 * @how: direction to close (must be SHUT_RDWR)
2039 *
2040 * Terminates connection (if necessary), then purges socket's receive queue.
2041 *
2042 * Returns 0 on success, errno otherwise
2043 */
2044 static int tipc_shutdown(struct socket *sock, int how)
2045 {
2046 struct sock *sk = sock->sk;
2047 struct net *net = sock_net(sk);
2048 struct tipc_sock *tsk = tipc_sk(sk);
2049 struct sk_buff *skb;
2050 u32 dnode;
2051 int res;
2052
2053 if (how != SHUT_RDWR)
2054 return -EINVAL;
2055
2056 lock_sock(sk);
2057
2058 switch (sock->state) {
2059 case SS_CONNECTING:
2060 case SS_CONNECTED:
2061
2062 restart:
2063 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2064 skb = __skb_dequeue(&sk->sk_receive_queue);
2065 if (skb) {
2066 if (TIPC_SKB_CB(skb)->handle != NULL) {
2067 kfree_skb(skb);
2068 goto restart;
2069 }
2070 if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
2071 tipc_link_xmit_skb(net, skb, dnode,
2072 tsk->portid);
2073 tipc_node_remove_conn(net, dnode, tsk->portid);
2074 } else {
2075 dnode = tsk_peer_node(tsk);
2076 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2077 TIPC_CONN_MSG, SHORT_H_SIZE,
2078 0, dnode, tipc_own_addr,
2079 tsk_peer_port(tsk),
2080 tsk->portid, TIPC_CONN_SHUTDOWN);
2081 tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
2082 }
2083 tsk->connected = 0;
2084 sock->state = SS_DISCONNECTING;
2085 tipc_node_remove_conn(net, dnode, tsk->portid);
2086 /* fall through */
2087
2088 case SS_DISCONNECTING:
2089
2090 /* Discard any unreceived messages */
2091 __skb_queue_purge(&sk->sk_receive_queue);
2092
2093 /* Wake up anyone sleeping in poll */
2094 sk->sk_state_change(sk);
2095 res = 0;
2096 break;
2097
2098 default:
2099 res = -ENOTCONN;
2100 }
2101
2102 release_sock(sk);
2103 return res;
2104 }
2105
2106 static void tipc_sk_timeout(unsigned long data)
2107 {
2108 struct tipc_sock *tsk = (struct tipc_sock *)data;
2109 struct sock *sk = &tsk->sk;
2110 struct sk_buff *skb = NULL;
2111 u32 peer_port, peer_node;
2112
2113 bh_lock_sock(sk);
2114 if (!tsk->connected) {
2115 bh_unlock_sock(sk);
2116 goto exit;
2117 }
2118 peer_port = tsk_peer_port(tsk);
2119 peer_node = tsk_peer_node(tsk);
2120
2121 if (tsk->probing_state == TIPC_CONN_PROBING) {
2122 /* Previous probe not answered -> self abort */
2123 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
2124 SHORT_H_SIZE, 0, tipc_own_addr,
2125 peer_node, tsk->portid, peer_port,
2126 TIPC_ERR_NO_PORT);
2127 } else {
2128 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
2129 0, peer_node, tipc_own_addr,
2130 peer_port, tsk->portid, TIPC_OK);
2131 tsk->probing_state = TIPC_CONN_PROBING;
2132 if (!mod_timer(&tsk->timer, jiffies + tsk->probing_intv))
2133 sock_hold(sk);
2134 }
2135 bh_unlock_sock(sk);
2136 if (skb)
2137 tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2138 exit:
2139 sock_put(sk);
2140 }
2141
2142 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2143 struct tipc_name_seq const *seq)
2144 {
2145 struct net *net = sock_net(&tsk->sk);
2146 struct publication *publ;
2147 u32 key;
2148
2149 if (tsk->connected)
2150 return -EINVAL;
2151 key = tsk->portid + tsk->pub_count + 1;
2152 if (key == tsk->portid)
2153 return -EADDRINUSE;
2154
2155 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2156 scope, tsk->portid, key);
2157 if (unlikely(!publ))
2158 return -EINVAL;
2159
2160 list_add(&publ->pport_list, &tsk->publications);
2161 tsk->pub_count++;
2162 tsk->published = 1;
2163 return 0;
2164 }
2165
2166 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2167 struct tipc_name_seq const *seq)
2168 {
2169 struct net *net = sock_net(&tsk->sk);
2170 struct publication *publ;
2171 struct publication *safe;
2172 int rc = -EINVAL;
2173
2174 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2175 if (seq) {
2176 if (publ->scope != scope)
2177 continue;
2178 if (publ->type != seq->type)
2179 continue;
2180 if (publ->lower != seq->lower)
2181 continue;
2182 if (publ->upper != seq->upper)
2183 break;
2184 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2185 publ->ref, publ->key);
2186 rc = 0;
2187 break;
2188 }
2189 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2190 publ->ref, publ->key);
2191 rc = 0;
2192 }
2193 if (list_empty(&tsk->publications))
2194 tsk->published = 0;
2195 return rc;
2196 }
2197
2198 static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
2199 int len, int full_id)
2200 {
2201 struct publication *publ;
2202 int ret;
2203
2204 if (full_id)
2205 ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
2206 tipc_zone(tipc_own_addr),
2207 tipc_cluster(tipc_own_addr),
2208 tipc_node(tipc_own_addr), tsk->portid);
2209 else
2210 ret = tipc_snprintf(buf, len, "%-10u:", tsk->portid);
2211
2212 if (tsk->connected) {
2213 u32 dport = tsk_peer_port(tsk);
2214 u32 destnode = tsk_peer_node(tsk);
2215
2216 ret += tipc_snprintf(buf + ret, len - ret,
2217 " connected to <%u.%u.%u:%u>",
2218 tipc_zone(destnode),
2219 tipc_cluster(destnode),
2220 tipc_node(destnode), dport);
2221 if (tsk->conn_type != 0)
2222 ret += tipc_snprintf(buf + ret, len - ret,
2223 " via {%u,%u}", tsk->conn_type,
2224 tsk->conn_instance);
2225 } else if (tsk->published) {
2226 ret += tipc_snprintf(buf + ret, len - ret, " bound to");
2227 list_for_each_entry(publ, &tsk->publications, pport_list) {
2228 if (publ->lower == publ->upper)
2229 ret += tipc_snprintf(buf + ret, len - ret,
2230 " {%u,%u}", publ->type,
2231 publ->lower);
2232 else
2233 ret += tipc_snprintf(buf + ret, len - ret,
2234 " {%u,%u,%u}", publ->type,
2235 publ->lower, publ->upper);
2236 }
2237 }
2238 ret += tipc_snprintf(buf + ret, len - ret, "\n");
2239 return ret;
2240 }
2241
2242 struct sk_buff *tipc_sk_socks_show(struct net *net)
2243 {
2244 struct tipc_net *tn = net_generic(net, tipc_net_id);
2245 const struct bucket_table *tbl;
2246 struct rhash_head *pos;
2247 struct sk_buff *buf;
2248 struct tlv_desc *rep_tlv;
2249 char *pb;
2250 int pb_len;
2251 struct tipc_sock *tsk;
2252 int str_len = 0;
2253 int i;
2254
2255 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2256 if (!buf)
2257 return NULL;
2258 rep_tlv = (struct tlv_desc *)buf->data;
2259 pb = TLV_DATA(rep_tlv);
2260 pb_len = ULTRA_STRING_MAX_LEN;
2261
2262 rcu_read_lock();
2263 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2264 for (i = 0; i < tbl->size; i++) {
2265 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2266 spin_lock_bh(&tsk->sk.sk_lock.slock);
2267 str_len += tipc_sk_show(tsk, pb + str_len,
2268 pb_len - str_len, 0);
2269 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2270 }
2271 }
2272 rcu_read_unlock();
2273
2274 str_len += 1; /* for "\0" */
2275 skb_put(buf, TLV_SPACE(str_len));
2276 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2277
2278 return buf;
2279 }
2280
2281 /* tipc_sk_reinit: set non-zero address in all existing sockets
2282 * when we go from standalone to network mode.
2283 */
2284 void tipc_sk_reinit(struct net *net)
2285 {
2286 struct tipc_net *tn = net_generic(net, tipc_net_id);
2287 const struct bucket_table *tbl;
2288 struct rhash_head *pos;
2289 struct tipc_sock *tsk;
2290 struct tipc_msg *msg;
2291 int i;
2292
2293 rcu_read_lock();
2294 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2295 for (i = 0; i < tbl->size; i++) {
2296 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2297 spin_lock_bh(&tsk->sk.sk_lock.slock);
2298 msg = &tsk->phdr;
2299 msg_set_prevnode(msg, tipc_own_addr);
2300 msg_set_orignode(msg, tipc_own_addr);
2301 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2302 }
2303 }
2304 rcu_read_unlock();
2305 }
2306
2307 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2308 {
2309 struct tipc_net *tn = net_generic(net, tipc_net_id);
2310 struct tipc_sock *tsk;
2311
2312 rcu_read_lock();
2313 tsk = rhashtable_lookup(&tn->sk_rht, &portid);
2314 if (tsk)
2315 sock_hold(&tsk->sk);
2316 rcu_read_unlock();
2317
2318 return tsk;
2319 }
2320
2321 static int tipc_sk_insert(struct tipc_sock *tsk)
2322 {
2323 struct sock *sk = &tsk->sk;
2324 struct net *net = sock_net(sk);
2325 struct tipc_net *tn = net_generic(net, tipc_net_id);
2326 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2327 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2328
2329 while (remaining--) {
2330 portid++;
2331 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2332 portid = TIPC_MIN_PORT;
2333 tsk->portid = portid;
2334 sock_hold(&tsk->sk);
2335 if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
2336 return 0;
2337 sock_put(&tsk->sk);
2338 }
2339
2340 return -1;
2341 }
2342
2343 static void tipc_sk_remove(struct tipc_sock *tsk)
2344 {
2345 struct sock *sk = &tsk->sk;
2346 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2347
2348 if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
2349 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2350 __sock_put(sk);
2351 }
2352 }
2353
2354 int tipc_sk_rht_init(struct net *net)
2355 {
2356 struct tipc_net *tn = net_generic(net, tipc_net_id);
2357 struct rhashtable_params rht_params = {
2358 .nelem_hint = 192,
2359 .head_offset = offsetof(struct tipc_sock, node),
2360 .key_offset = offsetof(struct tipc_sock, portid),
2361 .key_len = sizeof(u32), /* portid */
2362 .hashfn = jhash,
2363 .max_shift = 20, /* 1M */
2364 .min_shift = 8, /* 256 */
2365 .grow_decision = rht_grow_above_75,
2366 .shrink_decision = rht_shrink_below_30,
2367 };
2368
2369 return rhashtable_init(&tn->sk_rht, &rht_params);
2370 }
2371
2372 void tipc_sk_rht_destroy(struct net *net)
2373 {
2374 struct tipc_net *tn = net_generic(net, tipc_net_id);
2375
2376 /* Wait for socket readers to complete */
2377 synchronize_net();
2378
2379 rhashtable_destroy(&tn->sk_rht);
2380 }
2381
2382 /**
2383 * tipc_setsockopt - set socket option
2384 * @sock: socket structure
2385 * @lvl: option level
2386 * @opt: option identifier
2387 * @ov: pointer to new option value
2388 * @ol: length of option value
2389 *
2390 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2391 * (to ease compatibility).
2392 *
2393 * Returns 0 on success, errno otherwise
2394 */
2395 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2396 char __user *ov, unsigned int ol)
2397 {
2398 struct sock *sk = sock->sk;
2399 struct tipc_sock *tsk = tipc_sk(sk);
2400 u32 value;
2401 int res;
2402
2403 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2404 return 0;
2405 if (lvl != SOL_TIPC)
2406 return -ENOPROTOOPT;
2407 if (ol < sizeof(value))
2408 return -EINVAL;
2409 res = get_user(value, (u32 __user *)ov);
2410 if (res)
2411 return res;
2412
2413 lock_sock(sk);
2414
2415 switch (opt) {
2416 case TIPC_IMPORTANCE:
2417 res = tsk_set_importance(tsk, value);
2418 break;
2419 case TIPC_SRC_DROPPABLE:
2420 if (sock->type != SOCK_STREAM)
2421 tsk_set_unreliable(tsk, value);
2422 else
2423 res = -ENOPROTOOPT;
2424 break;
2425 case TIPC_DEST_DROPPABLE:
2426 tsk_set_unreturnable(tsk, value);
2427 break;
2428 case TIPC_CONN_TIMEOUT:
2429 tipc_sk(sk)->conn_timeout = value;
2430 /* no need to set "res", since already 0 at this point */
2431 break;
2432 default:
2433 res = -EINVAL;
2434 }
2435
2436 release_sock(sk);
2437
2438 return res;
2439 }
2440
2441 /**
2442 * tipc_getsockopt - get socket option
2443 * @sock: socket structure
2444 * @lvl: option level
2445 * @opt: option identifier
2446 * @ov: receptacle for option value
2447 * @ol: receptacle for length of option value
2448 *
2449 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2450 * (to ease compatibility).
2451 *
2452 * Returns 0 on success, errno otherwise
2453 */
2454 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2455 char __user *ov, int __user *ol)
2456 {
2457 struct sock *sk = sock->sk;
2458 struct tipc_sock *tsk = tipc_sk(sk);
2459 int len;
2460 u32 value;
2461 int res;
2462
2463 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2464 return put_user(0, ol);
2465 if (lvl != SOL_TIPC)
2466 return -ENOPROTOOPT;
2467 res = get_user(len, ol);
2468 if (res)
2469 return res;
2470
2471 lock_sock(sk);
2472
2473 switch (opt) {
2474 case TIPC_IMPORTANCE:
2475 value = tsk_importance(tsk);
2476 break;
2477 case TIPC_SRC_DROPPABLE:
2478 value = tsk_unreliable(tsk);
2479 break;
2480 case TIPC_DEST_DROPPABLE:
2481 value = tsk_unreturnable(tsk);
2482 break;
2483 case TIPC_CONN_TIMEOUT:
2484 value = tsk->conn_timeout;
2485 /* no need to set "res", since already 0 at this point */
2486 break;
2487 case TIPC_NODE_RECVQ_DEPTH:
2488 value = 0; /* was tipc_queue_size, now obsolete */
2489 break;
2490 case TIPC_SOCK_RECVQ_DEPTH:
2491 value = skb_queue_len(&sk->sk_receive_queue);
2492 break;
2493 default:
2494 res = -EINVAL;
2495 }
2496
2497 release_sock(sk);
2498
2499 if (res)
2500 return res; /* "get" failed */
2501
2502 if (len < sizeof(value))
2503 return -EINVAL;
2504
2505 if (copy_to_user(ov, &value, sizeof(value)))
2506 return -EFAULT;
2507
2508 return put_user(sizeof(value), ol);
2509 }
2510
2511 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2512 {
2513 struct sock *sk = sock->sk;
2514 struct tipc_sioc_ln_req lnr;
2515 void __user *argp = (void __user *)arg;
2516
2517 switch (cmd) {
2518 case SIOCGETLINKNAME:
2519 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2520 return -EFAULT;
2521 if (!tipc_node_get_linkname(sock_net(sk),
2522 lnr.bearer_id & 0xffff, lnr.peer,
2523 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2524 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2525 return -EFAULT;
2526 return 0;
2527 }
2528 return -EADDRNOTAVAIL;
2529 default:
2530 return -ENOIOCTLCMD;
2531 }
2532 }
2533
2534 /* Protocol switches for the various types of TIPC sockets */
2535
2536 static const struct proto_ops msg_ops = {
2537 .owner = THIS_MODULE,
2538 .family = AF_TIPC,
2539 .release = tipc_release,
2540 .bind = tipc_bind,
2541 .connect = tipc_connect,
2542 .socketpair = sock_no_socketpair,
2543 .accept = sock_no_accept,
2544 .getname = tipc_getname,
2545 .poll = tipc_poll,
2546 .ioctl = tipc_ioctl,
2547 .listen = sock_no_listen,
2548 .shutdown = tipc_shutdown,
2549 .setsockopt = tipc_setsockopt,
2550 .getsockopt = tipc_getsockopt,
2551 .sendmsg = tipc_sendmsg,
2552 .recvmsg = tipc_recvmsg,
2553 .mmap = sock_no_mmap,
2554 .sendpage = sock_no_sendpage
2555 };
2556
2557 static const struct proto_ops packet_ops = {
2558 .owner = THIS_MODULE,
2559 .family = AF_TIPC,
2560 .release = tipc_release,
2561 .bind = tipc_bind,
2562 .connect = tipc_connect,
2563 .socketpair = sock_no_socketpair,
2564 .accept = tipc_accept,
2565 .getname = tipc_getname,
2566 .poll = tipc_poll,
2567 .ioctl = tipc_ioctl,
2568 .listen = tipc_listen,
2569 .shutdown = tipc_shutdown,
2570 .setsockopt = tipc_setsockopt,
2571 .getsockopt = tipc_getsockopt,
2572 .sendmsg = tipc_send_packet,
2573 .recvmsg = tipc_recvmsg,
2574 .mmap = sock_no_mmap,
2575 .sendpage = sock_no_sendpage
2576 };
2577
2578 static const struct proto_ops stream_ops = {
2579 .owner = THIS_MODULE,
2580 .family = AF_TIPC,
2581 .release = tipc_release,
2582 .bind = tipc_bind,
2583 .connect = tipc_connect,
2584 .socketpair = sock_no_socketpair,
2585 .accept = tipc_accept,
2586 .getname = tipc_getname,
2587 .poll = tipc_poll,
2588 .ioctl = tipc_ioctl,
2589 .listen = tipc_listen,
2590 .shutdown = tipc_shutdown,
2591 .setsockopt = tipc_setsockopt,
2592 .getsockopt = tipc_getsockopt,
2593 .sendmsg = tipc_send_stream,
2594 .recvmsg = tipc_recv_stream,
2595 .mmap = sock_no_mmap,
2596 .sendpage = sock_no_sendpage
2597 };
2598
2599 static const struct net_proto_family tipc_family_ops = {
2600 .owner = THIS_MODULE,
2601 .family = AF_TIPC,
2602 .create = tipc_sk_create
2603 };
2604
2605 static struct proto tipc_proto = {
2606 .name = "TIPC",
2607 .owner = THIS_MODULE,
2608 .obj_size = sizeof(struct tipc_sock),
2609 .sysctl_rmem = sysctl_tipc_rmem
2610 };
2611
2612 static struct proto tipc_proto_kern = {
2613 .name = "TIPC",
2614 .obj_size = sizeof(struct tipc_sock),
2615 .sysctl_rmem = sysctl_tipc_rmem
2616 };
2617
2618 /**
2619 * tipc_socket_init - initialize TIPC socket interface
2620 *
2621 * Returns 0 on success, errno otherwise
2622 */
2623 int tipc_socket_init(void)
2624 {
2625 int res;
2626
2627 res = proto_register(&tipc_proto, 1);
2628 if (res) {
2629 pr_err("Failed to register TIPC protocol type\n");
2630 goto out;
2631 }
2632
2633 res = sock_register(&tipc_family_ops);
2634 if (res) {
2635 pr_err("Failed to register TIPC socket type\n");
2636 proto_unregister(&tipc_proto);
2637 goto out;
2638 }
2639 out:
2640 return res;
2641 }
2642
2643 /**
2644 * tipc_socket_stop - stop TIPC socket interface
2645 */
2646 void tipc_socket_stop(void)
2647 {
2648 sock_unregister(tipc_family_ops.family);
2649 proto_unregister(&tipc_proto);
2650 }
2651
2652 /* Caller should hold socket lock for the passed tipc socket. */
2653 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2654 {
2655 u32 peer_node;
2656 u32 peer_port;
2657 struct nlattr *nest;
2658
2659 peer_node = tsk_peer_node(tsk);
2660 peer_port = tsk_peer_port(tsk);
2661
2662 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2663
2664 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2665 goto msg_full;
2666 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2667 goto msg_full;
2668
2669 if (tsk->conn_type != 0) {
2670 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2671 goto msg_full;
2672 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2673 goto msg_full;
2674 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2675 goto msg_full;
2676 }
2677 nla_nest_end(skb, nest);
2678
2679 return 0;
2680
2681 msg_full:
2682 nla_nest_cancel(skb, nest);
2683
2684 return -EMSGSIZE;
2685 }
2686
2687 /* Caller should hold socket lock for the passed tipc socket. */
2688 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2689 struct tipc_sock *tsk)
2690 {
2691 int err;
2692 void *hdr;
2693 struct nlattr *attrs;
2694
2695 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2696 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2697 if (!hdr)
2698 goto msg_cancel;
2699
2700 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2701 if (!attrs)
2702 goto genlmsg_cancel;
2703 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2704 goto attr_msg_cancel;
2705 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
2706 goto attr_msg_cancel;
2707
2708 if (tsk->connected) {
2709 err = __tipc_nl_add_sk_con(skb, tsk);
2710 if (err)
2711 goto attr_msg_cancel;
2712 } else if (!list_empty(&tsk->publications)) {
2713 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2714 goto attr_msg_cancel;
2715 }
2716 nla_nest_end(skb, attrs);
2717 genlmsg_end(skb, hdr);
2718
2719 return 0;
2720
2721 attr_msg_cancel:
2722 nla_nest_cancel(skb, attrs);
2723 genlmsg_cancel:
2724 genlmsg_cancel(skb, hdr);
2725 msg_cancel:
2726 return -EMSGSIZE;
2727 }
2728
2729 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2730 {
2731 int err;
2732 struct tipc_sock *tsk;
2733 const struct bucket_table *tbl;
2734 struct rhash_head *pos;
2735 u32 prev_portid = cb->args[0];
2736 u32 portid = prev_portid;
2737 struct net *net = sock_net(skb->sk);
2738 struct tipc_net *tn = net_generic(net, tipc_net_id);
2739 int i;
2740
2741 rcu_read_lock();
2742 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2743 for (i = 0; i < tbl->size; i++) {
2744 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2745 spin_lock_bh(&tsk->sk.sk_lock.slock);
2746 portid = tsk->portid;
2747 err = __tipc_nl_add_sk(skb, cb, tsk);
2748 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2749 if (err)
2750 break;
2751
2752 prev_portid = portid;
2753 }
2754 }
2755 rcu_read_unlock();
2756
2757 cb->args[0] = prev_portid;
2758
2759 return skb->len;
2760 }
2761
2762 /* Caller should hold socket lock for the passed tipc socket. */
2763 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2764 struct netlink_callback *cb,
2765 struct publication *publ)
2766 {
2767 void *hdr;
2768 struct nlattr *attrs;
2769
2770 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2771 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2772 if (!hdr)
2773 goto msg_cancel;
2774
2775 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2776 if (!attrs)
2777 goto genlmsg_cancel;
2778
2779 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2780 goto attr_msg_cancel;
2781 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2782 goto attr_msg_cancel;
2783 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2784 goto attr_msg_cancel;
2785 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2786 goto attr_msg_cancel;
2787
2788 nla_nest_end(skb, attrs);
2789 genlmsg_end(skb, hdr);
2790
2791 return 0;
2792
2793 attr_msg_cancel:
2794 nla_nest_cancel(skb, attrs);
2795 genlmsg_cancel:
2796 genlmsg_cancel(skb, hdr);
2797 msg_cancel:
2798 return -EMSGSIZE;
2799 }
2800
2801 /* Caller should hold socket lock for the passed tipc socket. */
2802 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2803 struct netlink_callback *cb,
2804 struct tipc_sock *tsk, u32 *last_publ)
2805 {
2806 int err;
2807 struct publication *p;
2808
2809 if (*last_publ) {
2810 list_for_each_entry(p, &tsk->publications, pport_list) {
2811 if (p->key == *last_publ)
2812 break;
2813 }
2814 if (p->key != *last_publ) {
2815 /* We never set seq or call nl_dump_check_consistent()
2816 * this means that setting prev_seq here will cause the
2817 * consistence check to fail in the netlink callback
2818 * handler. Resulting in the last NLMSG_DONE message
2819 * having the NLM_F_DUMP_INTR flag set.
2820 */
2821 cb->prev_seq = 1;
2822 *last_publ = 0;
2823 return -EPIPE;
2824 }
2825 } else {
2826 p = list_first_entry(&tsk->publications, struct publication,
2827 pport_list);
2828 }
2829
2830 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2831 err = __tipc_nl_add_sk_publ(skb, cb, p);
2832 if (err) {
2833 *last_publ = p->key;
2834 return err;
2835 }
2836 }
2837 *last_publ = 0;
2838
2839 return 0;
2840 }
2841
2842 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2843 {
2844 int err;
2845 u32 tsk_portid = cb->args[0];
2846 u32 last_publ = cb->args[1];
2847 u32 done = cb->args[2];
2848 struct net *net = sock_net(skb->sk);
2849 struct tipc_sock *tsk;
2850
2851 if (!tsk_portid) {
2852 struct nlattr **attrs;
2853 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2854
2855 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2856 if (err)
2857 return err;
2858
2859 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2860 attrs[TIPC_NLA_SOCK],
2861 tipc_nl_sock_policy);
2862 if (err)
2863 return err;
2864
2865 if (!sock[TIPC_NLA_SOCK_REF])
2866 return -EINVAL;
2867
2868 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2869 }
2870
2871 if (done)
2872 return 0;
2873
2874 tsk = tipc_sk_lookup(net, tsk_portid);
2875 if (!tsk)
2876 return -EINVAL;
2877
2878 lock_sock(&tsk->sk);
2879 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2880 if (!err)
2881 done = 1;
2882 release_sock(&tsk->sk);
2883 sock_put(&tsk->sk);
2884
2885 cb->args[0] = tsk_portid;
2886 cb->args[1] = last_publ;
2887 cb->args[2] = done;
2888
2889 return skb->len;
2890 }
This page took 0.094096 seconds and 5 git commands to generate.