tipc: make tipc node table aware of net namespace
[deliverable/linux.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/jhash.h>
39 #include "core.h"
40 #include "name_table.h"
41 #include "node.h"
42 #include "link.h"
43 #include "config.h"
44 #include "socket.h"
45
46 #define SS_LISTENING -1 /* socket is listening */
47 #define SS_READY -2 /* socket is connectionless */
48
49 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
50 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
51 #define TIPC_FWD_MSG 1
52 #define TIPC_CONN_OK 0
53 #define TIPC_CONN_PROBING 1
54 #define TIPC_MAX_PORT 0xffffffff
55 #define TIPC_MIN_PORT 1
56
57 /**
58 * struct tipc_sock - TIPC socket structure
59 * @sk: socket - interacts with 'port' and with user via the socket API
60 * @connected: non-zero if port is currently connected to a peer port
61 * @conn_type: TIPC type used when connection was established
62 * @conn_instance: TIPC instance used when connection was established
63 * @published: non-zero if port has one or more associated names
64 * @max_pkt: maximum packet size "hint" used when building messages sent by port
65 * @portid: unique port identity in TIPC socket hash table
66 * @phdr: preformatted message header used when sending messages
67 * @port_list: adjacent ports in TIPC's global list of ports
68 * @publications: list of publications for port
69 * @pub_count: total # of publications port has made during its lifetime
70 * @probing_state:
71 * @probing_intv:
72 * @timer:
73 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
74 * @peer_name: the peer of the connection, if any
75 * @conn_timeout: the time we can wait for an unresponded setup request
76 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
77 * @link_cong: non-zero if owner must sleep because of link congestion
78 * @sent_unacked: # messages sent by socket, and not yet acked by peer
79 * @rcv_unacked: # messages read by user, but not yet acked back to peer
80 * @node: hash table node
81 * @rcu: rcu struct for tipc_sock
82 */
83 struct tipc_sock {
84 struct sock sk;
85 int connected;
86 u32 conn_type;
87 u32 conn_instance;
88 int published;
89 u32 max_pkt;
90 u32 portid;
91 struct tipc_msg phdr;
92 struct list_head sock_list;
93 struct list_head publications;
94 u32 pub_count;
95 u32 probing_state;
96 unsigned long probing_intv;
97 struct timer_list timer;
98 uint conn_timeout;
99 atomic_t dupl_rcvcnt;
100 bool link_cong;
101 uint sent_unacked;
102 uint rcv_unacked;
103 struct rhash_head node;
104 struct rcu_head rcu;
105 };
106
107 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
108 static void tipc_data_ready(struct sock *sk);
109 static void tipc_write_space(struct sock *sk);
110 static int tipc_release(struct socket *sock);
111 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
112 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
113 static void tipc_sk_timeout(unsigned long data);
114 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
115 struct tipc_name_seq const *seq);
116 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
117 struct tipc_name_seq const *seq);
118 static struct tipc_sock *tipc_sk_lookup(u32 portid);
119 static int tipc_sk_insert(struct tipc_sock *tsk);
120 static void tipc_sk_remove(struct tipc_sock *tsk);
121
122 static const struct proto_ops packet_ops;
123 static const struct proto_ops stream_ops;
124 static const struct proto_ops msg_ops;
125
126 static struct proto tipc_proto;
127 static struct proto tipc_proto_kern;
128
129 static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
130 [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
131 [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
132 [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
133 [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
134 [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
135 };
136
137 /*
138 * Revised TIPC socket locking policy:
139 *
140 * Most socket operations take the standard socket lock when they start
141 * and hold it until they finish (or until they need to sleep). Acquiring
142 * this lock grants the owner exclusive access to the fields of the socket
143 * data structures, with the exception of the backlog queue. A few socket
144 * operations can be done without taking the socket lock because they only
145 * read socket information that never changes during the life of the socket.
146 *
147 * Socket operations may acquire the lock for the associated TIPC port if they
148 * need to perform an operation on the port. If any routine needs to acquire
149 * both the socket lock and the port lock it must take the socket lock first
150 * to avoid the risk of deadlock.
151 *
152 * The dispatcher handling incoming messages cannot grab the socket lock in
153 * the standard fashion, since invoked it runs at the BH level and cannot block.
154 * Instead, it checks to see if the socket lock is currently owned by someone,
155 * and either handles the message itself or adds it to the socket's backlog
156 * queue; in the latter case the queued message is processed once the process
157 * owning the socket lock releases it.
158 *
159 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
160 * the problem of a blocked socket operation preventing any other operations
161 * from occurring. However, applications must be careful if they have
162 * multiple threads trying to send (or receive) on the same socket, as these
163 * operations might interfere with each other. For example, doing a connect
164 * and a receive at the same time might allow the receive to consume the
165 * ACK message meant for the connect. While additional work could be done
166 * to try and overcome this, it doesn't seem to be worthwhile at the present.
167 *
168 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
169 * that another operation that must be performed in a non-blocking manner is
170 * not delayed for very long because the lock has already been taken.
171 *
172 * NOTE: This code assumes that certain fields of a port/socket pair are
173 * constant over its lifetime; such fields can be examined without taking
174 * the socket lock and/or port lock, and do not need to be re-read even
175 * after resuming processing after waiting. These fields include:
176 * - socket type
177 * - pointer to socket sk structure (aka tipc_sock structure)
178 * - pointer to port structure
179 * - port reference
180 */
181
182 /* Protects tipc socket hash table mutations */
183 static struct rhashtable tipc_sk_rht;
184
185 static u32 tsk_peer_node(struct tipc_sock *tsk)
186 {
187 return msg_destnode(&tsk->phdr);
188 }
189
190 static u32 tsk_peer_port(struct tipc_sock *tsk)
191 {
192 return msg_destport(&tsk->phdr);
193 }
194
195 static bool tsk_unreliable(struct tipc_sock *tsk)
196 {
197 return msg_src_droppable(&tsk->phdr) != 0;
198 }
199
200 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
201 {
202 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
203 }
204
205 static bool tsk_unreturnable(struct tipc_sock *tsk)
206 {
207 return msg_dest_droppable(&tsk->phdr) != 0;
208 }
209
210 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
211 {
212 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
213 }
214
215 static int tsk_importance(struct tipc_sock *tsk)
216 {
217 return msg_importance(&tsk->phdr);
218 }
219
220 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
221 {
222 if (imp > TIPC_CRITICAL_IMPORTANCE)
223 return -EINVAL;
224 msg_set_importance(&tsk->phdr, (u32)imp);
225 return 0;
226 }
227
228 static struct tipc_sock *tipc_sk(const struct sock *sk)
229 {
230 return container_of(sk, struct tipc_sock, sk);
231 }
232
233 static int tsk_conn_cong(struct tipc_sock *tsk)
234 {
235 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
236 }
237
238 /**
239 * tsk_advance_rx_queue - discard first buffer in socket receive queue
240 *
241 * Caller must hold socket lock
242 */
243 static void tsk_advance_rx_queue(struct sock *sk)
244 {
245 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
246 }
247
248 /**
249 * tsk_rej_rx_queue - reject all buffers in socket receive queue
250 *
251 * Caller must hold socket lock
252 */
253 static void tsk_rej_rx_queue(struct sock *sk)
254 {
255 struct sk_buff *skb;
256 u32 dnode;
257
258 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
259 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
260 tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
261 }
262 }
263
264 /* tsk_peer_msg - verify if message was sent by connected port's peer
265 *
266 * Handles cases where the node's network address has changed from
267 * the default of <0.0.0> to its configured setting.
268 */
269 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
270 {
271 u32 peer_port = tsk_peer_port(tsk);
272 u32 orig_node;
273 u32 peer_node;
274
275 if (unlikely(!tsk->connected))
276 return false;
277
278 if (unlikely(msg_origport(msg) != peer_port))
279 return false;
280
281 orig_node = msg_orignode(msg);
282 peer_node = tsk_peer_node(tsk);
283
284 if (likely(orig_node == peer_node))
285 return true;
286
287 if (!orig_node && (peer_node == tipc_own_addr))
288 return true;
289
290 if (!peer_node && (orig_node == tipc_own_addr))
291 return true;
292
293 return false;
294 }
295
296 /**
297 * tipc_sk_create - create a TIPC socket
298 * @net: network namespace (must be default network)
299 * @sock: pre-allocated socket structure
300 * @protocol: protocol indicator (must be 0)
301 * @kern: caused by kernel or by userspace?
302 *
303 * This routine creates additional data structures used by the TIPC socket,
304 * initializes them, and links them together.
305 *
306 * Returns 0 on success, errno otherwise
307 */
308 static int tipc_sk_create(struct net *net, struct socket *sock,
309 int protocol, int kern)
310 {
311 const struct proto_ops *ops;
312 socket_state state;
313 struct sock *sk;
314 struct tipc_sock *tsk;
315 struct tipc_msg *msg;
316
317 /* Validate arguments */
318 if (unlikely(protocol != 0))
319 return -EPROTONOSUPPORT;
320
321 switch (sock->type) {
322 case SOCK_STREAM:
323 ops = &stream_ops;
324 state = SS_UNCONNECTED;
325 break;
326 case SOCK_SEQPACKET:
327 ops = &packet_ops;
328 state = SS_UNCONNECTED;
329 break;
330 case SOCK_DGRAM:
331 case SOCK_RDM:
332 ops = &msg_ops;
333 state = SS_READY;
334 break;
335 default:
336 return -EPROTOTYPE;
337 }
338
339 /* Allocate socket's protocol area */
340 if (!kern)
341 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
342 else
343 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
344
345 if (sk == NULL)
346 return -ENOMEM;
347
348 tsk = tipc_sk(sk);
349 tsk->max_pkt = MAX_PKT_DEFAULT;
350 INIT_LIST_HEAD(&tsk->publications);
351 msg = &tsk->phdr;
352 tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
353 NAMED_H_SIZE, 0);
354
355 /* Finish initializing socket data structures */
356 sock->ops = ops;
357 sock->state = state;
358 sock_init_data(sock, sk);
359 if (tipc_sk_insert(tsk)) {
360 pr_warn("Socket create failed; port numbrer exhausted\n");
361 return -EINVAL;
362 }
363 msg_set_origport(msg, tsk->portid);
364 setup_timer(&tsk->timer, tipc_sk_timeout, (unsigned long)tsk);
365 sk->sk_backlog_rcv = tipc_backlog_rcv;
366 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
367 sk->sk_data_ready = tipc_data_ready;
368 sk->sk_write_space = tipc_write_space;
369 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
370 tsk->sent_unacked = 0;
371 atomic_set(&tsk->dupl_rcvcnt, 0);
372
373 if (sock->state == SS_READY) {
374 tsk_set_unreturnable(tsk, true);
375 if (sock->type == SOCK_DGRAM)
376 tsk_set_unreliable(tsk, true);
377 }
378 return 0;
379 }
380
381 /**
382 * tipc_sock_create_local - create TIPC socket from inside TIPC module
383 * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
384 *
385 * We cannot use sock_creat_kern here because it bumps module user count.
386 * Since socket owner and creator is the same module we must make sure
387 * that module count remains zero for module local sockets, otherwise
388 * we cannot do rmmod.
389 *
390 * Returns 0 on success, errno otherwise
391 */
392 int tipc_sock_create_local(int type, struct socket **res)
393 {
394 int rc;
395
396 rc = sock_create_lite(AF_TIPC, type, 0, res);
397 if (rc < 0) {
398 pr_err("Failed to create kernel socket\n");
399 return rc;
400 }
401 tipc_sk_create(&init_net, *res, 0, 1);
402
403 return 0;
404 }
405
406 /**
407 * tipc_sock_release_local - release socket created by tipc_sock_create_local
408 * @sock: the socket to be released.
409 *
410 * Module reference count is not incremented when such sockets are created,
411 * so we must keep it from being decremented when they are released.
412 */
413 void tipc_sock_release_local(struct socket *sock)
414 {
415 tipc_release(sock);
416 sock->ops = NULL;
417 sock_release(sock);
418 }
419
420 /**
421 * tipc_sock_accept_local - accept a connection on a socket created
422 * with tipc_sock_create_local. Use this function to avoid that
423 * module reference count is inadvertently incremented.
424 *
425 * @sock: the accepting socket
426 * @newsock: reference to the new socket to be created
427 * @flags: socket flags
428 */
429
430 int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
431 int flags)
432 {
433 struct sock *sk = sock->sk;
434 int ret;
435
436 ret = sock_create_lite(sk->sk_family, sk->sk_type,
437 sk->sk_protocol, newsock);
438 if (ret < 0)
439 return ret;
440
441 ret = tipc_accept(sock, *newsock, flags);
442 if (ret < 0) {
443 sock_release(*newsock);
444 return ret;
445 }
446 (*newsock)->ops = sock->ops;
447 return ret;
448 }
449
450 static void tipc_sk_callback(struct rcu_head *head)
451 {
452 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
453
454 sock_put(&tsk->sk);
455 }
456
457 /**
458 * tipc_release - destroy a TIPC socket
459 * @sock: socket to destroy
460 *
461 * This routine cleans up any messages that are still queued on the socket.
462 * For DGRAM and RDM socket types, all queued messages are rejected.
463 * For SEQPACKET and STREAM socket types, the first message is rejected
464 * and any others are discarded. (If the first message on a STREAM socket
465 * is partially-read, it is discarded and the next one is rejected instead.)
466 *
467 * NOTE: Rejected messages are not necessarily returned to the sender! They
468 * are returned or discarded according to the "destination droppable" setting
469 * specified for the message by the sender.
470 *
471 * Returns 0 on success, errno otherwise
472 */
473 static int tipc_release(struct socket *sock)
474 {
475 struct sock *sk = sock->sk;
476 struct net *net = sock_net(sk);
477 struct tipc_sock *tsk;
478 struct sk_buff *skb;
479 u32 dnode, probing_state;
480
481 /*
482 * Exit if socket isn't fully initialized (occurs when a failed accept()
483 * releases a pre-allocated child socket that was never used)
484 */
485 if (sk == NULL)
486 return 0;
487
488 tsk = tipc_sk(sk);
489 lock_sock(sk);
490
491 /*
492 * Reject all unreceived messages, except on an active connection
493 * (which disconnects locally & sends a 'FIN+' to peer)
494 */
495 dnode = tsk_peer_node(tsk);
496 while (sock->state != SS_DISCONNECTING) {
497 skb = __skb_dequeue(&sk->sk_receive_queue);
498 if (skb == NULL)
499 break;
500 if (TIPC_SKB_CB(skb)->handle != NULL)
501 kfree_skb(skb);
502 else {
503 if ((sock->state == SS_CONNECTING) ||
504 (sock->state == SS_CONNECTED)) {
505 sock->state = SS_DISCONNECTING;
506 tsk->connected = 0;
507 tipc_node_remove_conn(net, dnode, tsk->portid);
508 }
509 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
510 tipc_link_xmit_skb(net, skb, dnode, 0);
511 }
512 }
513
514 tipc_sk_withdraw(tsk, 0, NULL);
515 probing_state = tsk->probing_state;
516 if (del_timer_sync(&tsk->timer) && probing_state != TIPC_CONN_PROBING)
517 sock_put(sk);
518 tipc_sk_remove(tsk);
519 if (tsk->connected) {
520 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
521 SHORT_H_SIZE, 0, dnode, tipc_own_addr,
522 tsk_peer_port(tsk),
523 tsk->portid, TIPC_ERR_NO_PORT);
524 if (skb)
525 tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
526 tipc_node_remove_conn(net, dnode, tsk->portid);
527 }
528
529 /* Discard any remaining (connection-based) messages in receive queue */
530 __skb_queue_purge(&sk->sk_receive_queue);
531
532 /* Reject any messages that accumulated in backlog queue */
533 sock->state = SS_DISCONNECTING;
534 release_sock(sk);
535
536 call_rcu(&tsk->rcu, tipc_sk_callback);
537 sock->sk = NULL;
538
539 return 0;
540 }
541
542 /**
543 * tipc_bind - associate or disassocate TIPC name(s) with a socket
544 * @sock: socket structure
545 * @uaddr: socket address describing name(s) and desired operation
546 * @uaddr_len: size of socket address data structure
547 *
548 * Name and name sequence binding is indicated using a positive scope value;
549 * a negative scope value unbinds the specified name. Specifying no name
550 * (i.e. a socket address length of 0) unbinds all names from the socket.
551 *
552 * Returns 0 on success, errno otherwise
553 *
554 * NOTE: This routine doesn't need to take the socket lock since it doesn't
555 * access any non-constant socket information.
556 */
557 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
558 int uaddr_len)
559 {
560 struct sock *sk = sock->sk;
561 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
562 struct tipc_sock *tsk = tipc_sk(sk);
563 int res = -EINVAL;
564
565 lock_sock(sk);
566 if (unlikely(!uaddr_len)) {
567 res = tipc_sk_withdraw(tsk, 0, NULL);
568 goto exit;
569 }
570
571 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
572 res = -EINVAL;
573 goto exit;
574 }
575 if (addr->family != AF_TIPC) {
576 res = -EAFNOSUPPORT;
577 goto exit;
578 }
579
580 if (addr->addrtype == TIPC_ADDR_NAME)
581 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
582 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
583 res = -EAFNOSUPPORT;
584 goto exit;
585 }
586
587 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
588 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
589 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
590 res = -EACCES;
591 goto exit;
592 }
593
594 res = (addr->scope > 0) ?
595 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
596 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
597 exit:
598 release_sock(sk);
599 return res;
600 }
601
602 /**
603 * tipc_getname - get port ID of socket or peer socket
604 * @sock: socket structure
605 * @uaddr: area for returned socket address
606 * @uaddr_len: area for returned length of socket address
607 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
608 *
609 * Returns 0 on success, errno otherwise
610 *
611 * NOTE: This routine doesn't need to take the socket lock since it only
612 * accesses socket information that is unchanging (or which changes in
613 * a completely predictable manner).
614 */
615 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
616 int *uaddr_len, int peer)
617 {
618 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
619 struct tipc_sock *tsk = tipc_sk(sock->sk);
620
621 memset(addr, 0, sizeof(*addr));
622 if (peer) {
623 if ((sock->state != SS_CONNECTED) &&
624 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
625 return -ENOTCONN;
626 addr->addr.id.ref = tsk_peer_port(tsk);
627 addr->addr.id.node = tsk_peer_node(tsk);
628 } else {
629 addr->addr.id.ref = tsk->portid;
630 addr->addr.id.node = tipc_own_addr;
631 }
632
633 *uaddr_len = sizeof(*addr);
634 addr->addrtype = TIPC_ADDR_ID;
635 addr->family = AF_TIPC;
636 addr->scope = 0;
637 addr->addr.name.domain = 0;
638
639 return 0;
640 }
641
642 /**
643 * tipc_poll - read and possibly block on pollmask
644 * @file: file structure associated with the socket
645 * @sock: socket for which to calculate the poll bits
646 * @wait: ???
647 *
648 * Returns pollmask value
649 *
650 * COMMENTARY:
651 * It appears that the usual socket locking mechanisms are not useful here
652 * since the pollmask info is potentially out-of-date the moment this routine
653 * exits. TCP and other protocols seem to rely on higher level poll routines
654 * to handle any preventable race conditions, so TIPC will do the same ...
655 *
656 * TIPC sets the returned events as follows:
657 *
658 * socket state flags set
659 * ------------ ---------
660 * unconnected no read flags
661 * POLLOUT if port is not congested
662 *
663 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
664 * no write flags
665 *
666 * connected POLLIN/POLLRDNORM if data in rx queue
667 * POLLOUT if port is not congested
668 *
669 * disconnecting POLLIN/POLLRDNORM/POLLHUP
670 * no write flags
671 *
672 * listening POLLIN if SYN in rx queue
673 * no write flags
674 *
675 * ready POLLIN/POLLRDNORM if data in rx queue
676 * [connectionless] POLLOUT (since port cannot be congested)
677 *
678 * IMPORTANT: The fact that a read or write operation is indicated does NOT
679 * imply that the operation will succeed, merely that it should be performed
680 * and will not block.
681 */
682 static unsigned int tipc_poll(struct file *file, struct socket *sock,
683 poll_table *wait)
684 {
685 struct sock *sk = sock->sk;
686 struct tipc_sock *tsk = tipc_sk(sk);
687 u32 mask = 0;
688
689 sock_poll_wait(file, sk_sleep(sk), wait);
690
691 switch ((int)sock->state) {
692 case SS_UNCONNECTED:
693 if (!tsk->link_cong)
694 mask |= POLLOUT;
695 break;
696 case SS_READY:
697 case SS_CONNECTED:
698 if (!tsk->link_cong && !tsk_conn_cong(tsk))
699 mask |= POLLOUT;
700 /* fall thru' */
701 case SS_CONNECTING:
702 case SS_LISTENING:
703 if (!skb_queue_empty(&sk->sk_receive_queue))
704 mask |= (POLLIN | POLLRDNORM);
705 break;
706 case SS_DISCONNECTING:
707 mask = (POLLIN | POLLRDNORM | POLLHUP);
708 break;
709 }
710
711 return mask;
712 }
713
714 /**
715 * tipc_sendmcast - send multicast message
716 * @sock: socket structure
717 * @seq: destination address
718 * @msg: message to send
719 * @dsz: total length of message data
720 * @timeo: timeout to wait for wakeup
721 *
722 * Called from function tipc_sendmsg(), which has done all sanity checks
723 * Returns the number of bytes sent on success, or errno
724 */
725 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
726 struct msghdr *msg, size_t dsz, long timeo)
727 {
728 struct sock *sk = sock->sk;
729 struct net *net = sock_net(sk);
730 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
731 struct sk_buff_head head;
732 uint mtu;
733 int rc;
734
735 msg_set_type(mhdr, TIPC_MCAST_MSG);
736 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
737 msg_set_destport(mhdr, 0);
738 msg_set_destnode(mhdr, 0);
739 msg_set_nametype(mhdr, seq->type);
740 msg_set_namelower(mhdr, seq->lower);
741 msg_set_nameupper(mhdr, seq->upper);
742 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
743
744 new_mtu:
745 mtu = tipc_bclink_get_mtu();
746 __skb_queue_head_init(&head);
747 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
748 if (unlikely(rc < 0))
749 return rc;
750
751 do {
752 rc = tipc_bclink_xmit(net, &head);
753 if (likely(rc >= 0)) {
754 rc = dsz;
755 break;
756 }
757 if (rc == -EMSGSIZE)
758 goto new_mtu;
759 if (rc != -ELINKCONG)
760 break;
761 tipc_sk(sk)->link_cong = 1;
762 rc = tipc_wait_for_sndmsg(sock, &timeo);
763 if (rc)
764 __skb_queue_purge(&head);
765 } while (!rc);
766 return rc;
767 }
768
769 /* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
770 */
771 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff *buf)
772 {
773 struct tipc_msg *msg = buf_msg(buf);
774 struct tipc_port_list dports = {0, NULL, };
775 struct tipc_port_list *item;
776 struct sk_buff *b;
777 uint i, last, dst = 0;
778 u32 scope = TIPC_CLUSTER_SCOPE;
779
780 if (in_own_node(msg_orignode(msg)))
781 scope = TIPC_NODE_SCOPE;
782
783 /* Create destination port list: */
784 tipc_nametbl_mc_translate(msg_nametype(msg),
785 msg_namelower(msg),
786 msg_nameupper(msg),
787 scope,
788 &dports);
789 last = dports.count;
790 if (!last) {
791 kfree_skb(buf);
792 return;
793 }
794
795 for (item = &dports; item; item = item->next) {
796 for (i = 0; i < PLSIZE && ++dst <= last; i++) {
797 b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
798 if (!b) {
799 pr_warn("Failed do clone mcast rcv buffer\n");
800 continue;
801 }
802 msg_set_destport(msg, item->ports[i]);
803 tipc_sk_rcv(net, b);
804 }
805 }
806 tipc_port_list_free(&dports);
807 }
808
809 /**
810 * tipc_sk_proto_rcv - receive a connection mng protocol message
811 * @tsk: receiving socket
812 * @dnode: node to send response message to, if any
813 * @buf: buffer containing protocol message
814 * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
815 * (CONN_PROBE_REPLY) message should be forwarded.
816 */
817 static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
818 struct sk_buff *buf)
819 {
820 struct tipc_msg *msg = buf_msg(buf);
821 int conn_cong;
822
823 /* Ignore if connection cannot be validated: */
824 if (!tsk_peer_msg(tsk, msg))
825 goto exit;
826
827 tsk->probing_state = TIPC_CONN_OK;
828
829 if (msg_type(msg) == CONN_ACK) {
830 conn_cong = tsk_conn_cong(tsk);
831 tsk->sent_unacked -= msg_msgcnt(msg);
832 if (conn_cong)
833 tsk->sk.sk_write_space(&tsk->sk);
834 } else if (msg_type(msg) == CONN_PROBE) {
835 if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
836 return TIPC_OK;
837 msg_set_type(msg, CONN_PROBE_REPLY);
838 return TIPC_FWD_MSG;
839 }
840 /* Do nothing if msg_type() == CONN_PROBE_REPLY */
841 exit:
842 kfree_skb(buf);
843 return TIPC_OK;
844 }
845
846 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
847 {
848 struct sock *sk = sock->sk;
849 struct tipc_sock *tsk = tipc_sk(sk);
850 DEFINE_WAIT(wait);
851 int done;
852
853 do {
854 int err = sock_error(sk);
855 if (err)
856 return err;
857 if (sock->state == SS_DISCONNECTING)
858 return -EPIPE;
859 if (!*timeo_p)
860 return -EAGAIN;
861 if (signal_pending(current))
862 return sock_intr_errno(*timeo_p);
863
864 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
865 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
866 finish_wait(sk_sleep(sk), &wait);
867 } while (!done);
868 return 0;
869 }
870
871 /**
872 * tipc_sendmsg - send message in connectionless manner
873 * @iocb: if NULL, indicates that socket lock is already held
874 * @sock: socket structure
875 * @m: message to send
876 * @dsz: amount of user data to be sent
877 *
878 * Message must have an destination specified explicitly.
879 * Used for SOCK_RDM and SOCK_DGRAM messages,
880 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
881 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
882 *
883 * Returns the number of bytes sent on success, or errno otherwise
884 */
885 static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
886 struct msghdr *m, size_t dsz)
887 {
888 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
889 struct sock *sk = sock->sk;
890 struct tipc_sock *tsk = tipc_sk(sk);
891 struct net *net = sock_net(sk);
892 struct tipc_msg *mhdr = &tsk->phdr;
893 u32 dnode, dport;
894 struct sk_buff_head head;
895 struct sk_buff *skb;
896 struct tipc_name_seq *seq = &dest->addr.nameseq;
897 u32 mtu;
898 long timeo;
899 int rc;
900
901 if (unlikely(!dest))
902 return -EDESTADDRREQ;
903
904 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
905 (dest->family != AF_TIPC)))
906 return -EINVAL;
907
908 if (dsz > TIPC_MAX_USER_MSG_SIZE)
909 return -EMSGSIZE;
910
911 if (iocb)
912 lock_sock(sk);
913
914 if (unlikely(sock->state != SS_READY)) {
915 if (sock->state == SS_LISTENING) {
916 rc = -EPIPE;
917 goto exit;
918 }
919 if (sock->state != SS_UNCONNECTED) {
920 rc = -EISCONN;
921 goto exit;
922 }
923 if (tsk->published) {
924 rc = -EOPNOTSUPP;
925 goto exit;
926 }
927 if (dest->addrtype == TIPC_ADDR_NAME) {
928 tsk->conn_type = dest->addr.name.name.type;
929 tsk->conn_instance = dest->addr.name.name.instance;
930 }
931 }
932
933 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
934
935 if (dest->addrtype == TIPC_ADDR_MCAST) {
936 rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
937 goto exit;
938 } else if (dest->addrtype == TIPC_ADDR_NAME) {
939 u32 type = dest->addr.name.name.type;
940 u32 inst = dest->addr.name.name.instance;
941 u32 domain = dest->addr.name.domain;
942
943 dnode = domain;
944 msg_set_type(mhdr, TIPC_NAMED_MSG);
945 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
946 msg_set_nametype(mhdr, type);
947 msg_set_nameinst(mhdr, inst);
948 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
949 dport = tipc_nametbl_translate(type, inst, &dnode);
950 msg_set_destnode(mhdr, dnode);
951 msg_set_destport(mhdr, dport);
952 if (unlikely(!dport && !dnode)) {
953 rc = -EHOSTUNREACH;
954 goto exit;
955 }
956 } else if (dest->addrtype == TIPC_ADDR_ID) {
957 dnode = dest->addr.id.node;
958 msg_set_type(mhdr, TIPC_DIRECT_MSG);
959 msg_set_lookup_scope(mhdr, 0);
960 msg_set_destnode(mhdr, dnode);
961 msg_set_destport(mhdr, dest->addr.id.ref);
962 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
963 }
964
965 new_mtu:
966 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
967 __skb_queue_head_init(&head);
968 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
969 if (rc < 0)
970 goto exit;
971
972 do {
973 skb = skb_peek(&head);
974 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
975 rc = tipc_link_xmit(net, &head, dnode, tsk->portid);
976 if (likely(rc >= 0)) {
977 if (sock->state != SS_READY)
978 sock->state = SS_CONNECTING;
979 rc = dsz;
980 break;
981 }
982 if (rc == -EMSGSIZE)
983 goto new_mtu;
984 if (rc != -ELINKCONG)
985 break;
986 tsk->link_cong = 1;
987 rc = tipc_wait_for_sndmsg(sock, &timeo);
988 if (rc)
989 __skb_queue_purge(&head);
990 } while (!rc);
991 exit:
992 if (iocb)
993 release_sock(sk);
994
995 return rc;
996 }
997
998 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
999 {
1000 struct sock *sk = sock->sk;
1001 struct tipc_sock *tsk = tipc_sk(sk);
1002 DEFINE_WAIT(wait);
1003 int done;
1004
1005 do {
1006 int err = sock_error(sk);
1007 if (err)
1008 return err;
1009 if (sock->state == SS_DISCONNECTING)
1010 return -EPIPE;
1011 else if (sock->state != SS_CONNECTED)
1012 return -ENOTCONN;
1013 if (!*timeo_p)
1014 return -EAGAIN;
1015 if (signal_pending(current))
1016 return sock_intr_errno(*timeo_p);
1017
1018 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1019 done = sk_wait_event(sk, timeo_p,
1020 (!tsk->link_cong &&
1021 !tsk_conn_cong(tsk)) ||
1022 !tsk->connected);
1023 finish_wait(sk_sleep(sk), &wait);
1024 } while (!done);
1025 return 0;
1026 }
1027
1028 /**
1029 * tipc_send_stream - send stream-oriented data
1030 * @iocb: (unused)
1031 * @sock: socket structure
1032 * @m: data to send
1033 * @dsz: total length of data to be transmitted
1034 *
1035 * Used for SOCK_STREAM data.
1036 *
1037 * Returns the number of bytes sent on success (or partial success),
1038 * or errno if no data sent
1039 */
1040 static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1041 struct msghdr *m, size_t dsz)
1042 {
1043 struct sock *sk = sock->sk;
1044 struct net *net = sock_net(sk);
1045 struct tipc_sock *tsk = tipc_sk(sk);
1046 struct tipc_msg *mhdr = &tsk->phdr;
1047 struct sk_buff_head head;
1048 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1049 u32 portid = tsk->portid;
1050 int rc = -EINVAL;
1051 long timeo;
1052 u32 dnode;
1053 uint mtu, send, sent = 0;
1054
1055 /* Handle implied connection establishment */
1056 if (unlikely(dest)) {
1057 rc = tipc_sendmsg(iocb, sock, m, dsz);
1058 if (dsz && (dsz == rc))
1059 tsk->sent_unacked = 1;
1060 return rc;
1061 }
1062 if (dsz > (uint)INT_MAX)
1063 return -EMSGSIZE;
1064
1065 if (iocb)
1066 lock_sock(sk);
1067
1068 if (unlikely(sock->state != SS_CONNECTED)) {
1069 if (sock->state == SS_DISCONNECTING)
1070 rc = -EPIPE;
1071 else
1072 rc = -ENOTCONN;
1073 goto exit;
1074 }
1075
1076 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1077 dnode = tsk_peer_node(tsk);
1078
1079 next:
1080 mtu = tsk->max_pkt;
1081 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1082 __skb_queue_head_init(&head);
1083 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
1084 if (unlikely(rc < 0))
1085 goto exit;
1086 do {
1087 if (likely(!tsk_conn_cong(tsk))) {
1088 rc = tipc_link_xmit(net, &head, dnode, portid);
1089 if (likely(!rc)) {
1090 tsk->sent_unacked++;
1091 sent += send;
1092 if (sent == dsz)
1093 break;
1094 goto next;
1095 }
1096 if (rc == -EMSGSIZE) {
1097 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1098 portid);
1099 goto next;
1100 }
1101 if (rc != -ELINKCONG)
1102 break;
1103 tsk->link_cong = 1;
1104 }
1105 rc = tipc_wait_for_sndpkt(sock, &timeo);
1106 if (rc)
1107 __skb_queue_purge(&head);
1108 } while (!rc);
1109 exit:
1110 if (iocb)
1111 release_sock(sk);
1112 return sent ? sent : rc;
1113 }
1114
1115 /**
1116 * tipc_send_packet - send a connection-oriented message
1117 * @iocb: if NULL, indicates that socket lock is already held
1118 * @sock: socket structure
1119 * @m: message to send
1120 * @dsz: length of data to be transmitted
1121 *
1122 * Used for SOCK_SEQPACKET messages.
1123 *
1124 * Returns the number of bytes sent on success, or errno otherwise
1125 */
1126 static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
1127 struct msghdr *m, size_t dsz)
1128 {
1129 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1130 return -EMSGSIZE;
1131
1132 return tipc_send_stream(iocb, sock, m, dsz);
1133 }
1134
1135 /* tipc_sk_finish_conn - complete the setup of a connection
1136 */
1137 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1138 u32 peer_node)
1139 {
1140 struct net *net = sock_net(&tsk->sk);
1141 struct tipc_msg *msg = &tsk->phdr;
1142
1143 msg_set_destnode(msg, peer_node);
1144 msg_set_destport(msg, peer_port);
1145 msg_set_type(msg, TIPC_CONN_MSG);
1146 msg_set_lookup_scope(msg, 0);
1147 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1148
1149 tsk->probing_intv = CONN_PROBING_INTERVAL;
1150 tsk->probing_state = TIPC_CONN_OK;
1151 tsk->connected = 1;
1152 if (!mod_timer(&tsk->timer, jiffies + tsk->probing_intv))
1153 sock_hold(&tsk->sk);
1154 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1155 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1156 }
1157
1158 /**
1159 * set_orig_addr - capture sender's address for received message
1160 * @m: descriptor for message info
1161 * @msg: received message header
1162 *
1163 * Note: Address is not captured if not requested by receiver.
1164 */
1165 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1166 {
1167 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1168
1169 if (addr) {
1170 addr->family = AF_TIPC;
1171 addr->addrtype = TIPC_ADDR_ID;
1172 memset(&addr->addr, 0, sizeof(addr->addr));
1173 addr->addr.id.ref = msg_origport(msg);
1174 addr->addr.id.node = msg_orignode(msg);
1175 addr->addr.name.domain = 0; /* could leave uninitialized */
1176 addr->scope = 0; /* could leave uninitialized */
1177 m->msg_namelen = sizeof(struct sockaddr_tipc);
1178 }
1179 }
1180
1181 /**
1182 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1183 * @m: descriptor for message info
1184 * @msg: received message header
1185 * @tsk: TIPC port associated with message
1186 *
1187 * Note: Ancillary data is not captured if not requested by receiver.
1188 *
1189 * Returns 0 if successful, otherwise errno
1190 */
1191 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1192 struct tipc_sock *tsk)
1193 {
1194 u32 anc_data[3];
1195 u32 err;
1196 u32 dest_type;
1197 int has_name;
1198 int res;
1199
1200 if (likely(m->msg_controllen == 0))
1201 return 0;
1202
1203 /* Optionally capture errored message object(s) */
1204 err = msg ? msg_errcode(msg) : 0;
1205 if (unlikely(err)) {
1206 anc_data[0] = err;
1207 anc_data[1] = msg_data_sz(msg);
1208 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1209 if (res)
1210 return res;
1211 if (anc_data[1]) {
1212 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1213 msg_data(msg));
1214 if (res)
1215 return res;
1216 }
1217 }
1218
1219 /* Optionally capture message destination object */
1220 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1221 switch (dest_type) {
1222 case TIPC_NAMED_MSG:
1223 has_name = 1;
1224 anc_data[0] = msg_nametype(msg);
1225 anc_data[1] = msg_namelower(msg);
1226 anc_data[2] = msg_namelower(msg);
1227 break;
1228 case TIPC_MCAST_MSG:
1229 has_name = 1;
1230 anc_data[0] = msg_nametype(msg);
1231 anc_data[1] = msg_namelower(msg);
1232 anc_data[2] = msg_nameupper(msg);
1233 break;
1234 case TIPC_CONN_MSG:
1235 has_name = (tsk->conn_type != 0);
1236 anc_data[0] = tsk->conn_type;
1237 anc_data[1] = tsk->conn_instance;
1238 anc_data[2] = tsk->conn_instance;
1239 break;
1240 default:
1241 has_name = 0;
1242 }
1243 if (has_name) {
1244 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1245 if (res)
1246 return res;
1247 }
1248
1249 return 0;
1250 }
1251
1252 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1253 {
1254 struct net *net = sock_net(&tsk->sk);
1255 struct sk_buff *skb = NULL;
1256 struct tipc_msg *msg;
1257 u32 peer_port = tsk_peer_port(tsk);
1258 u32 dnode = tsk_peer_node(tsk);
1259
1260 if (!tsk->connected)
1261 return;
1262 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
1263 tipc_own_addr, peer_port, tsk->portid, TIPC_OK);
1264 if (!skb)
1265 return;
1266 msg = buf_msg(skb);
1267 msg_set_msgcnt(msg, ack);
1268 tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1269 }
1270
1271 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1272 {
1273 struct sock *sk = sock->sk;
1274 DEFINE_WAIT(wait);
1275 long timeo = *timeop;
1276 int err;
1277
1278 for (;;) {
1279 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1280 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1281 if (sock->state == SS_DISCONNECTING) {
1282 err = -ENOTCONN;
1283 break;
1284 }
1285 release_sock(sk);
1286 timeo = schedule_timeout(timeo);
1287 lock_sock(sk);
1288 }
1289 err = 0;
1290 if (!skb_queue_empty(&sk->sk_receive_queue))
1291 break;
1292 err = sock_intr_errno(timeo);
1293 if (signal_pending(current))
1294 break;
1295 err = -EAGAIN;
1296 if (!timeo)
1297 break;
1298 }
1299 finish_wait(sk_sleep(sk), &wait);
1300 *timeop = timeo;
1301 return err;
1302 }
1303
1304 /**
1305 * tipc_recvmsg - receive packet-oriented message
1306 * @iocb: (unused)
1307 * @m: descriptor for message info
1308 * @buf_len: total size of user buffer area
1309 * @flags: receive flags
1310 *
1311 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1312 * If the complete message doesn't fit in user area, truncate it.
1313 *
1314 * Returns size of returned message data, errno otherwise
1315 */
1316 static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1317 struct msghdr *m, size_t buf_len, int flags)
1318 {
1319 struct sock *sk = sock->sk;
1320 struct tipc_sock *tsk = tipc_sk(sk);
1321 struct sk_buff *buf;
1322 struct tipc_msg *msg;
1323 long timeo;
1324 unsigned int sz;
1325 u32 err;
1326 int res;
1327
1328 /* Catch invalid receive requests */
1329 if (unlikely(!buf_len))
1330 return -EINVAL;
1331
1332 lock_sock(sk);
1333
1334 if (unlikely(sock->state == SS_UNCONNECTED)) {
1335 res = -ENOTCONN;
1336 goto exit;
1337 }
1338
1339 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1340 restart:
1341
1342 /* Look for a message in receive queue; wait if necessary */
1343 res = tipc_wait_for_rcvmsg(sock, &timeo);
1344 if (res)
1345 goto exit;
1346
1347 /* Look at first message in receive queue */
1348 buf = skb_peek(&sk->sk_receive_queue);
1349 msg = buf_msg(buf);
1350 sz = msg_data_sz(msg);
1351 err = msg_errcode(msg);
1352
1353 /* Discard an empty non-errored message & try again */
1354 if ((!sz) && (!err)) {
1355 tsk_advance_rx_queue(sk);
1356 goto restart;
1357 }
1358
1359 /* Capture sender's address (optional) */
1360 set_orig_addr(m, msg);
1361
1362 /* Capture ancillary data (optional) */
1363 res = tipc_sk_anc_data_recv(m, msg, tsk);
1364 if (res)
1365 goto exit;
1366
1367 /* Capture message data (if valid) & compute return value (always) */
1368 if (!err) {
1369 if (unlikely(buf_len < sz)) {
1370 sz = buf_len;
1371 m->msg_flags |= MSG_TRUNC;
1372 }
1373 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1374 if (res)
1375 goto exit;
1376 res = sz;
1377 } else {
1378 if ((sock->state == SS_READY) ||
1379 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1380 res = 0;
1381 else
1382 res = -ECONNRESET;
1383 }
1384
1385 /* Consume received message (optional) */
1386 if (likely(!(flags & MSG_PEEK))) {
1387 if ((sock->state != SS_READY) &&
1388 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1389 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1390 tsk->rcv_unacked = 0;
1391 }
1392 tsk_advance_rx_queue(sk);
1393 }
1394 exit:
1395 release_sock(sk);
1396 return res;
1397 }
1398
1399 /**
1400 * tipc_recv_stream - receive stream-oriented data
1401 * @iocb: (unused)
1402 * @m: descriptor for message info
1403 * @buf_len: total size of user buffer area
1404 * @flags: receive flags
1405 *
1406 * Used for SOCK_STREAM messages only. If not enough data is available
1407 * will optionally wait for more; never truncates data.
1408 *
1409 * Returns size of returned message data, errno otherwise
1410 */
1411 static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1412 struct msghdr *m, size_t buf_len, int flags)
1413 {
1414 struct sock *sk = sock->sk;
1415 struct tipc_sock *tsk = tipc_sk(sk);
1416 struct sk_buff *buf;
1417 struct tipc_msg *msg;
1418 long timeo;
1419 unsigned int sz;
1420 int sz_to_copy, target, needed;
1421 int sz_copied = 0;
1422 u32 err;
1423 int res = 0;
1424
1425 /* Catch invalid receive attempts */
1426 if (unlikely(!buf_len))
1427 return -EINVAL;
1428
1429 lock_sock(sk);
1430
1431 if (unlikely(sock->state == SS_UNCONNECTED)) {
1432 res = -ENOTCONN;
1433 goto exit;
1434 }
1435
1436 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1437 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1438
1439 restart:
1440 /* Look for a message in receive queue; wait if necessary */
1441 res = tipc_wait_for_rcvmsg(sock, &timeo);
1442 if (res)
1443 goto exit;
1444
1445 /* Look at first message in receive queue */
1446 buf = skb_peek(&sk->sk_receive_queue);
1447 msg = buf_msg(buf);
1448 sz = msg_data_sz(msg);
1449 err = msg_errcode(msg);
1450
1451 /* Discard an empty non-errored message & try again */
1452 if ((!sz) && (!err)) {
1453 tsk_advance_rx_queue(sk);
1454 goto restart;
1455 }
1456
1457 /* Optionally capture sender's address & ancillary data of first msg */
1458 if (sz_copied == 0) {
1459 set_orig_addr(m, msg);
1460 res = tipc_sk_anc_data_recv(m, msg, tsk);
1461 if (res)
1462 goto exit;
1463 }
1464
1465 /* Capture message data (if valid) & compute return value (always) */
1466 if (!err) {
1467 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1468
1469 sz -= offset;
1470 needed = (buf_len - sz_copied);
1471 sz_to_copy = (sz <= needed) ? sz : needed;
1472
1473 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
1474 m, sz_to_copy);
1475 if (res)
1476 goto exit;
1477
1478 sz_copied += sz_to_copy;
1479
1480 if (sz_to_copy < sz) {
1481 if (!(flags & MSG_PEEK))
1482 TIPC_SKB_CB(buf)->handle =
1483 (void *)(unsigned long)(offset + sz_to_copy);
1484 goto exit;
1485 }
1486 } else {
1487 if (sz_copied != 0)
1488 goto exit; /* can't add error msg to valid data */
1489
1490 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1491 res = 0;
1492 else
1493 res = -ECONNRESET;
1494 }
1495
1496 /* Consume received message (optional) */
1497 if (likely(!(flags & MSG_PEEK))) {
1498 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1499 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1500 tsk->rcv_unacked = 0;
1501 }
1502 tsk_advance_rx_queue(sk);
1503 }
1504
1505 /* Loop around if more data is required */
1506 if ((sz_copied < buf_len) && /* didn't get all requested data */
1507 (!skb_queue_empty(&sk->sk_receive_queue) ||
1508 (sz_copied < target)) && /* and more is ready or required */
1509 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1510 (!err)) /* and haven't reached a FIN */
1511 goto restart;
1512
1513 exit:
1514 release_sock(sk);
1515 return sz_copied ? sz_copied : res;
1516 }
1517
1518 /**
1519 * tipc_write_space - wake up thread if port congestion is released
1520 * @sk: socket
1521 */
1522 static void tipc_write_space(struct sock *sk)
1523 {
1524 struct socket_wq *wq;
1525
1526 rcu_read_lock();
1527 wq = rcu_dereference(sk->sk_wq);
1528 if (wq_has_sleeper(wq))
1529 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1530 POLLWRNORM | POLLWRBAND);
1531 rcu_read_unlock();
1532 }
1533
1534 /**
1535 * tipc_data_ready - wake up threads to indicate messages have been received
1536 * @sk: socket
1537 * @len: the length of messages
1538 */
1539 static void tipc_data_ready(struct sock *sk)
1540 {
1541 struct socket_wq *wq;
1542
1543 rcu_read_lock();
1544 wq = rcu_dereference(sk->sk_wq);
1545 if (wq_has_sleeper(wq))
1546 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1547 POLLRDNORM | POLLRDBAND);
1548 rcu_read_unlock();
1549 }
1550
1551 /**
1552 * filter_connect - Handle all incoming messages for a connection-based socket
1553 * @tsk: TIPC socket
1554 * @msg: message
1555 *
1556 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
1557 */
1558 static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1559 {
1560 struct sock *sk = &tsk->sk;
1561 struct net *net = sock_net(sk);
1562 struct socket *sock = sk->sk_socket;
1563 struct tipc_msg *msg = buf_msg(*buf);
1564 int retval = -TIPC_ERR_NO_PORT;
1565
1566 if (msg_mcast(msg))
1567 return retval;
1568
1569 switch ((int)sock->state) {
1570 case SS_CONNECTED:
1571 /* Accept only connection-based messages sent by peer */
1572 if (tsk_peer_msg(tsk, msg)) {
1573 if (unlikely(msg_errcode(msg))) {
1574 sock->state = SS_DISCONNECTING;
1575 tsk->connected = 0;
1576 /* let timer expire on it's own */
1577 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1578 tsk->portid);
1579 }
1580 retval = TIPC_OK;
1581 }
1582 break;
1583 case SS_CONNECTING:
1584 /* Accept only ACK or NACK message */
1585
1586 if (unlikely(!msg_connected(msg)))
1587 break;
1588
1589 if (unlikely(msg_errcode(msg))) {
1590 sock->state = SS_DISCONNECTING;
1591 sk->sk_err = ECONNREFUSED;
1592 retval = TIPC_OK;
1593 break;
1594 }
1595
1596 if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
1597 sock->state = SS_DISCONNECTING;
1598 sk->sk_err = EINVAL;
1599 retval = TIPC_OK;
1600 break;
1601 }
1602
1603 tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
1604 msg_set_importance(&tsk->phdr, msg_importance(msg));
1605 sock->state = SS_CONNECTED;
1606
1607 /* If an incoming message is an 'ACK-', it should be
1608 * discarded here because it doesn't contain useful
1609 * data. In addition, we should try to wake up
1610 * connect() routine if sleeping.
1611 */
1612 if (msg_data_sz(msg) == 0) {
1613 kfree_skb(*buf);
1614 *buf = NULL;
1615 if (waitqueue_active(sk_sleep(sk)))
1616 wake_up_interruptible(sk_sleep(sk));
1617 }
1618 retval = TIPC_OK;
1619 break;
1620 case SS_LISTENING:
1621 case SS_UNCONNECTED:
1622 /* Accept only SYN message */
1623 if (!msg_connected(msg) && !(msg_errcode(msg)))
1624 retval = TIPC_OK;
1625 break;
1626 case SS_DISCONNECTING:
1627 break;
1628 default:
1629 pr_err("Unknown socket state %u\n", sock->state);
1630 }
1631 return retval;
1632 }
1633
1634 /**
1635 * rcvbuf_limit - get proper overload limit of socket receive queue
1636 * @sk: socket
1637 * @buf: message
1638 *
1639 * For all connection oriented messages, irrespective of importance,
1640 * the default overload value (i.e. 67MB) is set as limit.
1641 *
1642 * For all connectionless messages, by default new queue limits are
1643 * as belows:
1644 *
1645 * TIPC_LOW_IMPORTANCE (4 MB)
1646 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1647 * TIPC_HIGH_IMPORTANCE (16 MB)
1648 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1649 *
1650 * Returns overload limit according to corresponding message importance
1651 */
1652 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1653 {
1654 struct tipc_msg *msg = buf_msg(buf);
1655
1656 if (msg_connected(msg))
1657 return sysctl_tipc_rmem[2];
1658
1659 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1660 msg_importance(msg);
1661 }
1662
1663 /**
1664 * filter_rcv - validate incoming message
1665 * @sk: socket
1666 * @buf: message
1667 *
1668 * Enqueues message on receive queue if acceptable; optionally handles
1669 * disconnect indication for a connected socket.
1670 *
1671 * Called with socket lock already taken; port lock may also be taken.
1672 *
1673 * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
1674 * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
1675 */
1676 static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1677 {
1678 struct socket *sock = sk->sk_socket;
1679 struct tipc_sock *tsk = tipc_sk(sk);
1680 struct tipc_msg *msg = buf_msg(buf);
1681 unsigned int limit = rcvbuf_limit(sk, buf);
1682 u32 onode;
1683 int rc = TIPC_OK;
1684
1685 if (unlikely(msg_user(msg) == CONN_MANAGER))
1686 return tipc_sk_proto_rcv(tsk, &onode, buf);
1687
1688 if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
1689 kfree_skb(buf);
1690 tsk->link_cong = 0;
1691 sk->sk_write_space(sk);
1692 return TIPC_OK;
1693 }
1694
1695 /* Reject message if it is wrong sort of message for socket */
1696 if (msg_type(msg) > TIPC_DIRECT_MSG)
1697 return -TIPC_ERR_NO_PORT;
1698
1699 if (sock->state == SS_READY) {
1700 if (msg_connected(msg))
1701 return -TIPC_ERR_NO_PORT;
1702 } else {
1703 rc = filter_connect(tsk, &buf);
1704 if (rc != TIPC_OK || buf == NULL)
1705 return rc;
1706 }
1707
1708 /* Reject message if there isn't room to queue it */
1709 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1710 return -TIPC_ERR_OVERLOAD;
1711
1712 /* Enqueue message */
1713 TIPC_SKB_CB(buf)->handle = NULL;
1714 __skb_queue_tail(&sk->sk_receive_queue, buf);
1715 skb_set_owner_r(buf, sk);
1716
1717 sk->sk_data_ready(sk);
1718 return TIPC_OK;
1719 }
1720
1721 /**
1722 * tipc_backlog_rcv - handle incoming message from backlog queue
1723 * @sk: socket
1724 * @skb: message
1725 *
1726 * Caller must hold socket lock, but not port lock.
1727 *
1728 * Returns 0
1729 */
1730 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1731 {
1732 int rc;
1733 u32 onode;
1734 struct tipc_sock *tsk = tipc_sk(sk);
1735 uint truesize = skb->truesize;
1736
1737 rc = filter_rcv(sk, skb);
1738
1739 if (likely(!rc)) {
1740 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1741 atomic_add(truesize, &tsk->dupl_rcvcnt);
1742 return 0;
1743 }
1744
1745 if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
1746 return 0;
1747
1748 tipc_link_xmit_skb(sock_net(sk), skb, onode, 0);
1749
1750 return 0;
1751 }
1752
1753 /**
1754 * tipc_sk_rcv - handle incoming message
1755 * @skb: buffer containing arriving message
1756 * Consumes buffer
1757 * Returns 0 if success, or errno: -EHOSTUNREACH
1758 */
1759 int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
1760 {
1761 struct tipc_sock *tsk;
1762 struct sock *sk;
1763 u32 dport = msg_destport(buf_msg(skb));
1764 int rc = TIPC_OK;
1765 uint limit;
1766 u32 dnode;
1767
1768 /* Validate destination and message */
1769 tsk = tipc_sk_lookup(dport);
1770 if (unlikely(!tsk)) {
1771 rc = tipc_msg_eval(skb, &dnode);
1772 goto exit;
1773 }
1774 sk = &tsk->sk;
1775
1776 /* Queue message */
1777 spin_lock_bh(&sk->sk_lock.slock);
1778
1779 if (!sock_owned_by_user(sk)) {
1780 rc = filter_rcv(sk, skb);
1781 } else {
1782 if (sk->sk_backlog.len == 0)
1783 atomic_set(&tsk->dupl_rcvcnt, 0);
1784 limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
1785 if (sk_add_backlog(sk, skb, limit))
1786 rc = -TIPC_ERR_OVERLOAD;
1787 }
1788 spin_unlock_bh(&sk->sk_lock.slock);
1789 sock_put(sk);
1790 if (likely(!rc))
1791 return 0;
1792 exit:
1793 if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
1794 return -EHOSTUNREACH;
1795
1796 tipc_link_xmit_skb(net, skb, dnode, 0);
1797 return (rc < 0) ? -EHOSTUNREACH : 0;
1798 }
1799
1800 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1801 {
1802 struct sock *sk = sock->sk;
1803 DEFINE_WAIT(wait);
1804 int done;
1805
1806 do {
1807 int err = sock_error(sk);
1808 if (err)
1809 return err;
1810 if (!*timeo_p)
1811 return -ETIMEDOUT;
1812 if (signal_pending(current))
1813 return sock_intr_errno(*timeo_p);
1814
1815 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1816 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1817 finish_wait(sk_sleep(sk), &wait);
1818 } while (!done);
1819 return 0;
1820 }
1821
1822 /**
1823 * tipc_connect - establish a connection to another TIPC port
1824 * @sock: socket structure
1825 * @dest: socket address for destination port
1826 * @destlen: size of socket address data structure
1827 * @flags: file-related flags associated with socket
1828 *
1829 * Returns 0 on success, errno otherwise
1830 */
1831 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1832 int destlen, int flags)
1833 {
1834 struct sock *sk = sock->sk;
1835 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1836 struct msghdr m = {NULL,};
1837 long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1838 socket_state previous;
1839 int res;
1840
1841 lock_sock(sk);
1842
1843 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1844 if (sock->state == SS_READY) {
1845 res = -EOPNOTSUPP;
1846 goto exit;
1847 }
1848
1849 /*
1850 * Reject connection attempt using multicast address
1851 *
1852 * Note: send_msg() validates the rest of the address fields,
1853 * so there's no need to do it here
1854 */
1855 if (dst->addrtype == TIPC_ADDR_MCAST) {
1856 res = -EINVAL;
1857 goto exit;
1858 }
1859
1860 previous = sock->state;
1861 switch (sock->state) {
1862 case SS_UNCONNECTED:
1863 /* Send a 'SYN-' to destination */
1864 m.msg_name = dest;
1865 m.msg_namelen = destlen;
1866
1867 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1868 * indicate send_msg() is never blocked.
1869 */
1870 if (!timeout)
1871 m.msg_flags = MSG_DONTWAIT;
1872
1873 res = tipc_sendmsg(NULL, sock, &m, 0);
1874 if ((res < 0) && (res != -EWOULDBLOCK))
1875 goto exit;
1876
1877 /* Just entered SS_CONNECTING state; the only
1878 * difference is that return value in non-blocking
1879 * case is EINPROGRESS, rather than EALREADY.
1880 */
1881 res = -EINPROGRESS;
1882 case SS_CONNECTING:
1883 if (previous == SS_CONNECTING)
1884 res = -EALREADY;
1885 if (!timeout)
1886 goto exit;
1887 timeout = msecs_to_jiffies(timeout);
1888 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1889 res = tipc_wait_for_connect(sock, &timeout);
1890 break;
1891 case SS_CONNECTED:
1892 res = -EISCONN;
1893 break;
1894 default:
1895 res = -EINVAL;
1896 break;
1897 }
1898 exit:
1899 release_sock(sk);
1900 return res;
1901 }
1902
1903 /**
1904 * tipc_listen - allow socket to listen for incoming connections
1905 * @sock: socket structure
1906 * @len: (unused)
1907 *
1908 * Returns 0 on success, errno otherwise
1909 */
1910 static int tipc_listen(struct socket *sock, int len)
1911 {
1912 struct sock *sk = sock->sk;
1913 int res;
1914
1915 lock_sock(sk);
1916
1917 if (sock->state != SS_UNCONNECTED)
1918 res = -EINVAL;
1919 else {
1920 sock->state = SS_LISTENING;
1921 res = 0;
1922 }
1923
1924 release_sock(sk);
1925 return res;
1926 }
1927
1928 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1929 {
1930 struct sock *sk = sock->sk;
1931 DEFINE_WAIT(wait);
1932 int err;
1933
1934 /* True wake-one mechanism for incoming connections: only
1935 * one process gets woken up, not the 'whole herd'.
1936 * Since we do not 'race & poll' for established sockets
1937 * anymore, the common case will execute the loop only once.
1938 */
1939 for (;;) {
1940 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1941 TASK_INTERRUPTIBLE);
1942 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1943 release_sock(sk);
1944 timeo = schedule_timeout(timeo);
1945 lock_sock(sk);
1946 }
1947 err = 0;
1948 if (!skb_queue_empty(&sk->sk_receive_queue))
1949 break;
1950 err = -EINVAL;
1951 if (sock->state != SS_LISTENING)
1952 break;
1953 err = sock_intr_errno(timeo);
1954 if (signal_pending(current))
1955 break;
1956 err = -EAGAIN;
1957 if (!timeo)
1958 break;
1959 }
1960 finish_wait(sk_sleep(sk), &wait);
1961 return err;
1962 }
1963
1964 /**
1965 * tipc_accept - wait for connection request
1966 * @sock: listening socket
1967 * @newsock: new socket that is to be connected
1968 * @flags: file-related flags associated with socket
1969 *
1970 * Returns 0 on success, errno otherwise
1971 */
1972 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1973 {
1974 struct sock *new_sk, *sk = sock->sk;
1975 struct sk_buff *buf;
1976 struct tipc_sock *new_tsock;
1977 struct tipc_msg *msg;
1978 long timeo;
1979 int res;
1980
1981 lock_sock(sk);
1982
1983 if (sock->state != SS_LISTENING) {
1984 res = -EINVAL;
1985 goto exit;
1986 }
1987 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1988 res = tipc_wait_for_accept(sock, timeo);
1989 if (res)
1990 goto exit;
1991
1992 buf = skb_peek(&sk->sk_receive_queue);
1993
1994 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
1995 if (res)
1996 goto exit;
1997
1998 new_sk = new_sock->sk;
1999 new_tsock = tipc_sk(new_sk);
2000 msg = buf_msg(buf);
2001
2002 /* we lock on new_sk; but lockdep sees the lock on sk */
2003 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2004
2005 /*
2006 * Reject any stray messages received by new socket
2007 * before the socket lock was taken (very, very unlikely)
2008 */
2009 tsk_rej_rx_queue(new_sk);
2010
2011 /* Connect new socket to it's peer */
2012 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2013 new_sock->state = SS_CONNECTED;
2014
2015 tsk_set_importance(new_tsock, msg_importance(msg));
2016 if (msg_named(msg)) {
2017 new_tsock->conn_type = msg_nametype(msg);
2018 new_tsock->conn_instance = msg_nameinst(msg);
2019 }
2020
2021 /*
2022 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2023 * Respond to 'SYN+' by queuing it on new socket.
2024 */
2025 if (!msg_data_sz(msg)) {
2026 struct msghdr m = {NULL,};
2027
2028 tsk_advance_rx_queue(sk);
2029 tipc_send_packet(NULL, new_sock, &m, 0);
2030 } else {
2031 __skb_dequeue(&sk->sk_receive_queue);
2032 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2033 skb_set_owner_r(buf, new_sk);
2034 }
2035 release_sock(new_sk);
2036 exit:
2037 release_sock(sk);
2038 return res;
2039 }
2040
2041 /**
2042 * tipc_shutdown - shutdown socket connection
2043 * @sock: socket structure
2044 * @how: direction to close (must be SHUT_RDWR)
2045 *
2046 * Terminates connection (if necessary), then purges socket's receive queue.
2047 *
2048 * Returns 0 on success, errno otherwise
2049 */
2050 static int tipc_shutdown(struct socket *sock, int how)
2051 {
2052 struct sock *sk = sock->sk;
2053 struct net *net = sock_net(sk);
2054 struct tipc_sock *tsk = tipc_sk(sk);
2055 struct sk_buff *skb;
2056 u32 dnode;
2057 int res;
2058
2059 if (how != SHUT_RDWR)
2060 return -EINVAL;
2061
2062 lock_sock(sk);
2063
2064 switch (sock->state) {
2065 case SS_CONNECTING:
2066 case SS_CONNECTED:
2067
2068 restart:
2069 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2070 skb = __skb_dequeue(&sk->sk_receive_queue);
2071 if (skb) {
2072 if (TIPC_SKB_CB(skb)->handle != NULL) {
2073 kfree_skb(skb);
2074 goto restart;
2075 }
2076 if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
2077 tipc_link_xmit_skb(net, skb, dnode,
2078 tsk->portid);
2079 tipc_node_remove_conn(net, dnode, tsk->portid);
2080 } else {
2081 dnode = tsk_peer_node(tsk);
2082 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2083 TIPC_CONN_MSG, SHORT_H_SIZE,
2084 0, dnode, tipc_own_addr,
2085 tsk_peer_port(tsk),
2086 tsk->portid, TIPC_CONN_SHUTDOWN);
2087 tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
2088 }
2089 tsk->connected = 0;
2090 sock->state = SS_DISCONNECTING;
2091 tipc_node_remove_conn(net, dnode, tsk->portid);
2092 /* fall through */
2093
2094 case SS_DISCONNECTING:
2095
2096 /* Discard any unreceived messages */
2097 __skb_queue_purge(&sk->sk_receive_queue);
2098
2099 /* Wake up anyone sleeping in poll */
2100 sk->sk_state_change(sk);
2101 res = 0;
2102 break;
2103
2104 default:
2105 res = -ENOTCONN;
2106 }
2107
2108 release_sock(sk);
2109 return res;
2110 }
2111
2112 static void tipc_sk_timeout(unsigned long data)
2113 {
2114 struct tipc_sock *tsk = (struct tipc_sock *)data;
2115 struct sock *sk = &tsk->sk;
2116 struct sk_buff *skb = NULL;
2117 u32 peer_port, peer_node;
2118
2119 bh_lock_sock(sk);
2120 if (!tsk->connected) {
2121 bh_unlock_sock(sk);
2122 goto exit;
2123 }
2124 peer_port = tsk_peer_port(tsk);
2125 peer_node = tsk_peer_node(tsk);
2126
2127 if (tsk->probing_state == TIPC_CONN_PROBING) {
2128 /* Previous probe not answered -> self abort */
2129 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
2130 SHORT_H_SIZE, 0, tipc_own_addr,
2131 peer_node, tsk->portid, peer_port,
2132 TIPC_ERR_NO_PORT);
2133 } else {
2134 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
2135 0, peer_node, tipc_own_addr,
2136 peer_port, tsk->portid, TIPC_OK);
2137 tsk->probing_state = TIPC_CONN_PROBING;
2138 if (!mod_timer(&tsk->timer, jiffies + tsk->probing_intv))
2139 sock_hold(sk);
2140 }
2141 bh_unlock_sock(sk);
2142 if (skb)
2143 tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2144 exit:
2145 sock_put(sk);
2146 }
2147
2148 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2149 struct tipc_name_seq const *seq)
2150 {
2151 struct net *net = sock_net(&tsk->sk);
2152 struct publication *publ;
2153 u32 key;
2154
2155 if (tsk->connected)
2156 return -EINVAL;
2157 key = tsk->portid + tsk->pub_count + 1;
2158 if (key == tsk->portid)
2159 return -EADDRINUSE;
2160
2161 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2162 scope, tsk->portid, key);
2163 if (unlikely(!publ))
2164 return -EINVAL;
2165
2166 list_add(&publ->pport_list, &tsk->publications);
2167 tsk->pub_count++;
2168 tsk->published = 1;
2169 return 0;
2170 }
2171
2172 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2173 struct tipc_name_seq const *seq)
2174 {
2175 struct net *net = sock_net(&tsk->sk);
2176 struct publication *publ;
2177 struct publication *safe;
2178 int rc = -EINVAL;
2179
2180 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2181 if (seq) {
2182 if (publ->scope != scope)
2183 continue;
2184 if (publ->type != seq->type)
2185 continue;
2186 if (publ->lower != seq->lower)
2187 continue;
2188 if (publ->upper != seq->upper)
2189 break;
2190 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2191 publ->ref, publ->key);
2192 rc = 0;
2193 break;
2194 }
2195 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2196 publ->ref, publ->key);
2197 rc = 0;
2198 }
2199 if (list_empty(&tsk->publications))
2200 tsk->published = 0;
2201 return rc;
2202 }
2203
2204 static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
2205 int len, int full_id)
2206 {
2207 struct publication *publ;
2208 int ret;
2209
2210 if (full_id)
2211 ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
2212 tipc_zone(tipc_own_addr),
2213 tipc_cluster(tipc_own_addr),
2214 tipc_node(tipc_own_addr), tsk->portid);
2215 else
2216 ret = tipc_snprintf(buf, len, "%-10u:", tsk->portid);
2217
2218 if (tsk->connected) {
2219 u32 dport = tsk_peer_port(tsk);
2220 u32 destnode = tsk_peer_node(tsk);
2221
2222 ret += tipc_snprintf(buf + ret, len - ret,
2223 " connected to <%u.%u.%u:%u>",
2224 tipc_zone(destnode),
2225 tipc_cluster(destnode),
2226 tipc_node(destnode), dport);
2227 if (tsk->conn_type != 0)
2228 ret += tipc_snprintf(buf + ret, len - ret,
2229 " via {%u,%u}", tsk->conn_type,
2230 tsk->conn_instance);
2231 } else if (tsk->published) {
2232 ret += tipc_snprintf(buf + ret, len - ret, " bound to");
2233 list_for_each_entry(publ, &tsk->publications, pport_list) {
2234 if (publ->lower == publ->upper)
2235 ret += tipc_snprintf(buf + ret, len - ret,
2236 " {%u,%u}", publ->type,
2237 publ->lower);
2238 else
2239 ret += tipc_snprintf(buf + ret, len - ret,
2240 " {%u,%u,%u}", publ->type,
2241 publ->lower, publ->upper);
2242 }
2243 }
2244 ret += tipc_snprintf(buf + ret, len - ret, "\n");
2245 return ret;
2246 }
2247
2248 struct sk_buff *tipc_sk_socks_show(void)
2249 {
2250 const struct bucket_table *tbl;
2251 struct rhash_head *pos;
2252 struct sk_buff *buf;
2253 struct tlv_desc *rep_tlv;
2254 char *pb;
2255 int pb_len;
2256 struct tipc_sock *tsk;
2257 int str_len = 0;
2258 int i;
2259
2260 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2261 if (!buf)
2262 return NULL;
2263 rep_tlv = (struct tlv_desc *)buf->data;
2264 pb = TLV_DATA(rep_tlv);
2265 pb_len = ULTRA_STRING_MAX_LEN;
2266
2267 rcu_read_lock();
2268 tbl = rht_dereference_rcu((&tipc_sk_rht)->tbl, &tipc_sk_rht);
2269 for (i = 0; i < tbl->size; i++) {
2270 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2271 spin_lock_bh(&tsk->sk.sk_lock.slock);
2272 str_len += tipc_sk_show(tsk, pb + str_len,
2273 pb_len - str_len, 0);
2274 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2275 }
2276 }
2277 rcu_read_unlock();
2278
2279 str_len += 1; /* for "\0" */
2280 skb_put(buf, TLV_SPACE(str_len));
2281 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2282
2283 return buf;
2284 }
2285
2286 /* tipc_sk_reinit: set non-zero address in all existing sockets
2287 * when we go from standalone to network mode.
2288 */
2289 void tipc_sk_reinit(void)
2290 {
2291 const struct bucket_table *tbl;
2292 struct rhash_head *pos;
2293 struct tipc_sock *tsk;
2294 struct tipc_msg *msg;
2295 int i;
2296
2297 rcu_read_lock();
2298 tbl = rht_dereference_rcu((&tipc_sk_rht)->tbl, &tipc_sk_rht);
2299 for (i = 0; i < tbl->size; i++) {
2300 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2301 spin_lock_bh(&tsk->sk.sk_lock.slock);
2302 msg = &tsk->phdr;
2303 msg_set_prevnode(msg, tipc_own_addr);
2304 msg_set_orignode(msg, tipc_own_addr);
2305 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2306 }
2307 }
2308 rcu_read_unlock();
2309 }
2310
2311 static struct tipc_sock *tipc_sk_lookup(u32 portid)
2312 {
2313 struct tipc_sock *tsk;
2314
2315 rcu_read_lock();
2316 tsk = rhashtable_lookup(&tipc_sk_rht, &portid);
2317 if (tsk)
2318 sock_hold(&tsk->sk);
2319 rcu_read_unlock();
2320
2321 return tsk;
2322 }
2323
2324 static int tipc_sk_insert(struct tipc_sock *tsk)
2325 {
2326 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2327 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2328
2329 while (remaining--) {
2330 portid++;
2331 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2332 portid = TIPC_MIN_PORT;
2333 tsk->portid = portid;
2334 sock_hold(&tsk->sk);
2335 if (rhashtable_lookup_insert(&tipc_sk_rht, &tsk->node))
2336 return 0;
2337 sock_put(&tsk->sk);
2338 }
2339
2340 return -1;
2341 }
2342
2343 static void tipc_sk_remove(struct tipc_sock *tsk)
2344 {
2345 struct sock *sk = &tsk->sk;
2346
2347 if (rhashtable_remove(&tipc_sk_rht, &tsk->node)) {
2348 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2349 __sock_put(sk);
2350 }
2351 }
2352
2353 int tipc_sk_rht_init(void)
2354 {
2355 struct rhashtable_params rht_params = {
2356 .nelem_hint = 192,
2357 .head_offset = offsetof(struct tipc_sock, node),
2358 .key_offset = offsetof(struct tipc_sock, portid),
2359 .key_len = sizeof(u32), /* portid */
2360 .hashfn = jhash,
2361 .max_shift = 20, /* 1M */
2362 .min_shift = 8, /* 256 */
2363 .grow_decision = rht_grow_above_75,
2364 .shrink_decision = rht_shrink_below_30,
2365 };
2366
2367 return rhashtable_init(&tipc_sk_rht, &rht_params);
2368 }
2369
2370 void tipc_sk_rht_destroy(void)
2371 {
2372 /* Wait for socket readers to complete */
2373 synchronize_net();
2374
2375 rhashtable_destroy(&tipc_sk_rht);
2376 }
2377
2378 /**
2379 * tipc_setsockopt - set socket option
2380 * @sock: socket structure
2381 * @lvl: option level
2382 * @opt: option identifier
2383 * @ov: pointer to new option value
2384 * @ol: length of option value
2385 *
2386 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2387 * (to ease compatibility).
2388 *
2389 * Returns 0 on success, errno otherwise
2390 */
2391 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2392 char __user *ov, unsigned int ol)
2393 {
2394 struct sock *sk = sock->sk;
2395 struct tipc_sock *tsk = tipc_sk(sk);
2396 u32 value;
2397 int res;
2398
2399 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2400 return 0;
2401 if (lvl != SOL_TIPC)
2402 return -ENOPROTOOPT;
2403 if (ol < sizeof(value))
2404 return -EINVAL;
2405 res = get_user(value, (u32 __user *)ov);
2406 if (res)
2407 return res;
2408
2409 lock_sock(sk);
2410
2411 switch (opt) {
2412 case TIPC_IMPORTANCE:
2413 res = tsk_set_importance(tsk, value);
2414 break;
2415 case TIPC_SRC_DROPPABLE:
2416 if (sock->type != SOCK_STREAM)
2417 tsk_set_unreliable(tsk, value);
2418 else
2419 res = -ENOPROTOOPT;
2420 break;
2421 case TIPC_DEST_DROPPABLE:
2422 tsk_set_unreturnable(tsk, value);
2423 break;
2424 case TIPC_CONN_TIMEOUT:
2425 tipc_sk(sk)->conn_timeout = value;
2426 /* no need to set "res", since already 0 at this point */
2427 break;
2428 default:
2429 res = -EINVAL;
2430 }
2431
2432 release_sock(sk);
2433
2434 return res;
2435 }
2436
2437 /**
2438 * tipc_getsockopt - get socket option
2439 * @sock: socket structure
2440 * @lvl: option level
2441 * @opt: option identifier
2442 * @ov: receptacle for option value
2443 * @ol: receptacle for length of option value
2444 *
2445 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2446 * (to ease compatibility).
2447 *
2448 * Returns 0 on success, errno otherwise
2449 */
2450 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2451 char __user *ov, int __user *ol)
2452 {
2453 struct sock *sk = sock->sk;
2454 struct tipc_sock *tsk = tipc_sk(sk);
2455 int len;
2456 u32 value;
2457 int res;
2458
2459 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2460 return put_user(0, ol);
2461 if (lvl != SOL_TIPC)
2462 return -ENOPROTOOPT;
2463 res = get_user(len, ol);
2464 if (res)
2465 return res;
2466
2467 lock_sock(sk);
2468
2469 switch (opt) {
2470 case TIPC_IMPORTANCE:
2471 value = tsk_importance(tsk);
2472 break;
2473 case TIPC_SRC_DROPPABLE:
2474 value = tsk_unreliable(tsk);
2475 break;
2476 case TIPC_DEST_DROPPABLE:
2477 value = tsk_unreturnable(tsk);
2478 break;
2479 case TIPC_CONN_TIMEOUT:
2480 value = tsk->conn_timeout;
2481 /* no need to set "res", since already 0 at this point */
2482 break;
2483 case TIPC_NODE_RECVQ_DEPTH:
2484 value = 0; /* was tipc_queue_size, now obsolete */
2485 break;
2486 case TIPC_SOCK_RECVQ_DEPTH:
2487 value = skb_queue_len(&sk->sk_receive_queue);
2488 break;
2489 default:
2490 res = -EINVAL;
2491 }
2492
2493 release_sock(sk);
2494
2495 if (res)
2496 return res; /* "get" failed */
2497
2498 if (len < sizeof(value))
2499 return -EINVAL;
2500
2501 if (copy_to_user(ov, &value, sizeof(value)))
2502 return -EFAULT;
2503
2504 return put_user(sizeof(value), ol);
2505 }
2506
2507 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2508 {
2509 struct sock *sk = sock->sk;
2510 struct tipc_sioc_ln_req lnr;
2511 void __user *argp = (void __user *)arg;
2512
2513 switch (cmd) {
2514 case SIOCGETLINKNAME:
2515 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2516 return -EFAULT;
2517 if (!tipc_node_get_linkname(sock_net(sk),
2518 lnr.bearer_id & 0xffff, lnr.peer,
2519 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2520 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2521 return -EFAULT;
2522 return 0;
2523 }
2524 return -EADDRNOTAVAIL;
2525 default:
2526 return -ENOIOCTLCMD;
2527 }
2528 }
2529
2530 /* Protocol switches for the various types of TIPC sockets */
2531
2532 static const struct proto_ops msg_ops = {
2533 .owner = THIS_MODULE,
2534 .family = AF_TIPC,
2535 .release = tipc_release,
2536 .bind = tipc_bind,
2537 .connect = tipc_connect,
2538 .socketpair = sock_no_socketpair,
2539 .accept = sock_no_accept,
2540 .getname = tipc_getname,
2541 .poll = tipc_poll,
2542 .ioctl = tipc_ioctl,
2543 .listen = sock_no_listen,
2544 .shutdown = tipc_shutdown,
2545 .setsockopt = tipc_setsockopt,
2546 .getsockopt = tipc_getsockopt,
2547 .sendmsg = tipc_sendmsg,
2548 .recvmsg = tipc_recvmsg,
2549 .mmap = sock_no_mmap,
2550 .sendpage = sock_no_sendpage
2551 };
2552
2553 static const struct proto_ops packet_ops = {
2554 .owner = THIS_MODULE,
2555 .family = AF_TIPC,
2556 .release = tipc_release,
2557 .bind = tipc_bind,
2558 .connect = tipc_connect,
2559 .socketpair = sock_no_socketpair,
2560 .accept = tipc_accept,
2561 .getname = tipc_getname,
2562 .poll = tipc_poll,
2563 .ioctl = tipc_ioctl,
2564 .listen = tipc_listen,
2565 .shutdown = tipc_shutdown,
2566 .setsockopt = tipc_setsockopt,
2567 .getsockopt = tipc_getsockopt,
2568 .sendmsg = tipc_send_packet,
2569 .recvmsg = tipc_recvmsg,
2570 .mmap = sock_no_mmap,
2571 .sendpage = sock_no_sendpage
2572 };
2573
2574 static const struct proto_ops stream_ops = {
2575 .owner = THIS_MODULE,
2576 .family = AF_TIPC,
2577 .release = tipc_release,
2578 .bind = tipc_bind,
2579 .connect = tipc_connect,
2580 .socketpair = sock_no_socketpair,
2581 .accept = tipc_accept,
2582 .getname = tipc_getname,
2583 .poll = tipc_poll,
2584 .ioctl = tipc_ioctl,
2585 .listen = tipc_listen,
2586 .shutdown = tipc_shutdown,
2587 .setsockopt = tipc_setsockopt,
2588 .getsockopt = tipc_getsockopt,
2589 .sendmsg = tipc_send_stream,
2590 .recvmsg = tipc_recv_stream,
2591 .mmap = sock_no_mmap,
2592 .sendpage = sock_no_sendpage
2593 };
2594
2595 static const struct net_proto_family tipc_family_ops = {
2596 .owner = THIS_MODULE,
2597 .family = AF_TIPC,
2598 .create = tipc_sk_create
2599 };
2600
2601 static struct proto tipc_proto = {
2602 .name = "TIPC",
2603 .owner = THIS_MODULE,
2604 .obj_size = sizeof(struct tipc_sock),
2605 .sysctl_rmem = sysctl_tipc_rmem
2606 };
2607
2608 static struct proto tipc_proto_kern = {
2609 .name = "TIPC",
2610 .obj_size = sizeof(struct tipc_sock),
2611 .sysctl_rmem = sysctl_tipc_rmem
2612 };
2613
2614 /**
2615 * tipc_socket_init - initialize TIPC socket interface
2616 *
2617 * Returns 0 on success, errno otherwise
2618 */
2619 int tipc_socket_init(void)
2620 {
2621 int res;
2622
2623 res = proto_register(&tipc_proto, 1);
2624 if (res) {
2625 pr_err("Failed to register TIPC protocol type\n");
2626 goto out;
2627 }
2628
2629 res = sock_register(&tipc_family_ops);
2630 if (res) {
2631 pr_err("Failed to register TIPC socket type\n");
2632 proto_unregister(&tipc_proto);
2633 goto out;
2634 }
2635 out:
2636 return res;
2637 }
2638
2639 /**
2640 * tipc_socket_stop - stop TIPC socket interface
2641 */
2642 void tipc_socket_stop(void)
2643 {
2644 sock_unregister(tipc_family_ops.family);
2645 proto_unregister(&tipc_proto);
2646 }
2647
2648 /* Caller should hold socket lock for the passed tipc socket. */
2649 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2650 {
2651 u32 peer_node;
2652 u32 peer_port;
2653 struct nlattr *nest;
2654
2655 peer_node = tsk_peer_node(tsk);
2656 peer_port = tsk_peer_port(tsk);
2657
2658 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2659
2660 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2661 goto msg_full;
2662 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2663 goto msg_full;
2664
2665 if (tsk->conn_type != 0) {
2666 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2667 goto msg_full;
2668 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2669 goto msg_full;
2670 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2671 goto msg_full;
2672 }
2673 nla_nest_end(skb, nest);
2674
2675 return 0;
2676
2677 msg_full:
2678 nla_nest_cancel(skb, nest);
2679
2680 return -EMSGSIZE;
2681 }
2682
2683 /* Caller should hold socket lock for the passed tipc socket. */
2684 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2685 struct tipc_sock *tsk)
2686 {
2687 int err;
2688 void *hdr;
2689 struct nlattr *attrs;
2690
2691 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2692 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2693 if (!hdr)
2694 goto msg_cancel;
2695
2696 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2697 if (!attrs)
2698 goto genlmsg_cancel;
2699 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2700 goto attr_msg_cancel;
2701 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
2702 goto attr_msg_cancel;
2703
2704 if (tsk->connected) {
2705 err = __tipc_nl_add_sk_con(skb, tsk);
2706 if (err)
2707 goto attr_msg_cancel;
2708 } else if (!list_empty(&tsk->publications)) {
2709 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2710 goto attr_msg_cancel;
2711 }
2712 nla_nest_end(skb, attrs);
2713 genlmsg_end(skb, hdr);
2714
2715 return 0;
2716
2717 attr_msg_cancel:
2718 nla_nest_cancel(skb, attrs);
2719 genlmsg_cancel:
2720 genlmsg_cancel(skb, hdr);
2721 msg_cancel:
2722 return -EMSGSIZE;
2723 }
2724
2725 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2726 {
2727 int err;
2728 struct tipc_sock *tsk;
2729 const struct bucket_table *tbl;
2730 struct rhash_head *pos;
2731 u32 prev_portid = cb->args[0];
2732 u32 portid = prev_portid;
2733 int i;
2734
2735 rcu_read_lock();
2736 tbl = rht_dereference_rcu((&tipc_sk_rht)->tbl, &tipc_sk_rht);
2737 for (i = 0; i < tbl->size; i++) {
2738 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2739 spin_lock_bh(&tsk->sk.sk_lock.slock);
2740 portid = tsk->portid;
2741 err = __tipc_nl_add_sk(skb, cb, tsk);
2742 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2743 if (err)
2744 break;
2745
2746 prev_portid = portid;
2747 }
2748 }
2749 rcu_read_unlock();
2750
2751 cb->args[0] = prev_portid;
2752
2753 return skb->len;
2754 }
2755
2756 /* Caller should hold socket lock for the passed tipc socket. */
2757 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2758 struct netlink_callback *cb,
2759 struct publication *publ)
2760 {
2761 void *hdr;
2762 struct nlattr *attrs;
2763
2764 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2765 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2766 if (!hdr)
2767 goto msg_cancel;
2768
2769 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2770 if (!attrs)
2771 goto genlmsg_cancel;
2772
2773 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2774 goto attr_msg_cancel;
2775 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2776 goto attr_msg_cancel;
2777 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2778 goto attr_msg_cancel;
2779 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2780 goto attr_msg_cancel;
2781
2782 nla_nest_end(skb, attrs);
2783 genlmsg_end(skb, hdr);
2784
2785 return 0;
2786
2787 attr_msg_cancel:
2788 nla_nest_cancel(skb, attrs);
2789 genlmsg_cancel:
2790 genlmsg_cancel(skb, hdr);
2791 msg_cancel:
2792 return -EMSGSIZE;
2793 }
2794
2795 /* Caller should hold socket lock for the passed tipc socket. */
2796 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2797 struct netlink_callback *cb,
2798 struct tipc_sock *tsk, u32 *last_publ)
2799 {
2800 int err;
2801 struct publication *p;
2802
2803 if (*last_publ) {
2804 list_for_each_entry(p, &tsk->publications, pport_list) {
2805 if (p->key == *last_publ)
2806 break;
2807 }
2808 if (p->key != *last_publ) {
2809 /* We never set seq or call nl_dump_check_consistent()
2810 * this means that setting prev_seq here will cause the
2811 * consistence check to fail in the netlink callback
2812 * handler. Resulting in the last NLMSG_DONE message
2813 * having the NLM_F_DUMP_INTR flag set.
2814 */
2815 cb->prev_seq = 1;
2816 *last_publ = 0;
2817 return -EPIPE;
2818 }
2819 } else {
2820 p = list_first_entry(&tsk->publications, struct publication,
2821 pport_list);
2822 }
2823
2824 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2825 err = __tipc_nl_add_sk_publ(skb, cb, p);
2826 if (err) {
2827 *last_publ = p->key;
2828 return err;
2829 }
2830 }
2831 *last_publ = 0;
2832
2833 return 0;
2834 }
2835
2836 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2837 {
2838 int err;
2839 u32 tsk_portid = cb->args[0];
2840 u32 last_publ = cb->args[1];
2841 u32 done = cb->args[2];
2842 struct tipc_sock *tsk;
2843
2844 if (!tsk_portid) {
2845 struct nlattr **attrs;
2846 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2847
2848 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2849 if (err)
2850 return err;
2851
2852 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2853 attrs[TIPC_NLA_SOCK],
2854 tipc_nl_sock_policy);
2855 if (err)
2856 return err;
2857
2858 if (!sock[TIPC_NLA_SOCK_REF])
2859 return -EINVAL;
2860
2861 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2862 }
2863
2864 if (done)
2865 return 0;
2866
2867 tsk = tipc_sk_lookup(tsk_portid);
2868 if (!tsk)
2869 return -EINVAL;
2870
2871 lock_sock(&tsk->sk);
2872 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2873 if (!err)
2874 done = 1;
2875 release_sock(&tsk->sk);
2876 sock_put(&tsk->sk);
2877
2878 cb->args[0] = tsk_portid;
2879 cb->args[1] = last_publ;
2880 cb->args[2] = done;
2881
2882 return skb->len;
2883 }
This page took 0.092149 seconds and 6 git commands to generate.