tipc: feed tipc sock pointer to tipc_sk_timeout routine
[deliverable/linux.git] / net / tipc / socket.c
1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/jhash.h>
39 #include "core.h"
40 #include "name_table.h"
41 #include "node.h"
42 #include "link.h"
43 #include "config.h"
44 #include "socket.h"
45
46 #define SS_LISTENING -1 /* socket is listening */
47 #define SS_READY -2 /* socket is connectionless */
48
49 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
50 #define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
51 #define TIPC_FWD_MSG 1
52 #define TIPC_CONN_OK 0
53 #define TIPC_CONN_PROBING 1
54 #define TIPC_MAX_PORT 0xffffffff
55 #define TIPC_MIN_PORT 1
56
57 /**
58 * struct tipc_sock - TIPC socket structure
59 * @sk: socket - interacts with 'port' and with user via the socket API
60 * @connected: non-zero if port is currently connected to a peer port
61 * @conn_type: TIPC type used when connection was established
62 * @conn_instance: TIPC instance used when connection was established
63 * @published: non-zero if port has one or more associated names
64 * @max_pkt: maximum packet size "hint" used when building messages sent by port
65 * @portid: unique port identity in TIPC socket hash table
66 * @phdr: preformatted message header used when sending messages
67 * @port_list: adjacent ports in TIPC's global list of ports
68 * @publications: list of publications for port
69 * @pub_count: total # of publications port has made during its lifetime
70 * @probing_state:
71 * @probing_intv:
72 * @timer:
73 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
74 * @peer_name: the peer of the connection, if any
75 * @conn_timeout: the time we can wait for an unresponded setup request
76 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
77 * @link_cong: non-zero if owner must sleep because of link congestion
78 * @sent_unacked: # messages sent by socket, and not yet acked by peer
79 * @rcv_unacked: # messages read by user, but not yet acked back to peer
80 * @node: hash table node
81 * @rcu: rcu struct for tipc_sock
82 */
83 struct tipc_sock {
84 struct sock sk;
85 int connected;
86 u32 conn_type;
87 u32 conn_instance;
88 int published;
89 u32 max_pkt;
90 u32 portid;
91 struct tipc_msg phdr;
92 struct list_head sock_list;
93 struct list_head publications;
94 u32 pub_count;
95 u32 probing_state;
96 unsigned long probing_intv;
97 struct timer_list timer;
98 uint conn_timeout;
99 atomic_t dupl_rcvcnt;
100 bool link_cong;
101 uint sent_unacked;
102 uint rcv_unacked;
103 struct rhash_head node;
104 struct rcu_head rcu;
105 };
106
107 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
108 static void tipc_data_ready(struct sock *sk);
109 static void tipc_write_space(struct sock *sk);
110 static int tipc_release(struct socket *sock);
111 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
112 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
113 static void tipc_sk_timeout(unsigned long data);
114 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
115 struct tipc_name_seq const *seq);
116 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
117 struct tipc_name_seq const *seq);
118 static struct tipc_sock *tipc_sk_lookup(u32 portid);
119 static int tipc_sk_insert(struct tipc_sock *tsk);
120 static void tipc_sk_remove(struct tipc_sock *tsk);
121
122 static const struct proto_ops packet_ops;
123 static const struct proto_ops stream_ops;
124 static const struct proto_ops msg_ops;
125
126 static struct proto tipc_proto;
127 static struct proto tipc_proto_kern;
128
129 static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
130 [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
131 [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
132 [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
133 [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
134 [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
135 };
136
137 /*
138 * Revised TIPC socket locking policy:
139 *
140 * Most socket operations take the standard socket lock when they start
141 * and hold it until they finish (or until they need to sleep). Acquiring
142 * this lock grants the owner exclusive access to the fields of the socket
143 * data structures, with the exception of the backlog queue. A few socket
144 * operations can be done without taking the socket lock because they only
145 * read socket information that never changes during the life of the socket.
146 *
147 * Socket operations may acquire the lock for the associated TIPC port if they
148 * need to perform an operation on the port. If any routine needs to acquire
149 * both the socket lock and the port lock it must take the socket lock first
150 * to avoid the risk of deadlock.
151 *
152 * The dispatcher handling incoming messages cannot grab the socket lock in
153 * the standard fashion, since invoked it runs at the BH level and cannot block.
154 * Instead, it checks to see if the socket lock is currently owned by someone,
155 * and either handles the message itself or adds it to the socket's backlog
156 * queue; in the latter case the queued message is processed once the process
157 * owning the socket lock releases it.
158 *
159 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
160 * the problem of a blocked socket operation preventing any other operations
161 * from occurring. However, applications must be careful if they have
162 * multiple threads trying to send (or receive) on the same socket, as these
163 * operations might interfere with each other. For example, doing a connect
164 * and a receive at the same time might allow the receive to consume the
165 * ACK message meant for the connect. While additional work could be done
166 * to try and overcome this, it doesn't seem to be worthwhile at the present.
167 *
168 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
169 * that another operation that must be performed in a non-blocking manner is
170 * not delayed for very long because the lock has already been taken.
171 *
172 * NOTE: This code assumes that certain fields of a port/socket pair are
173 * constant over its lifetime; such fields can be examined without taking
174 * the socket lock and/or port lock, and do not need to be re-read even
175 * after resuming processing after waiting. These fields include:
176 * - socket type
177 * - pointer to socket sk structure (aka tipc_sock structure)
178 * - pointer to port structure
179 * - port reference
180 */
181
182 /* Protects tipc socket hash table mutations */
183 static struct rhashtable tipc_sk_rht;
184
185 static u32 tsk_peer_node(struct tipc_sock *tsk)
186 {
187 return msg_destnode(&tsk->phdr);
188 }
189
190 static u32 tsk_peer_port(struct tipc_sock *tsk)
191 {
192 return msg_destport(&tsk->phdr);
193 }
194
195 static bool tsk_unreliable(struct tipc_sock *tsk)
196 {
197 return msg_src_droppable(&tsk->phdr) != 0;
198 }
199
200 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
201 {
202 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
203 }
204
205 static bool tsk_unreturnable(struct tipc_sock *tsk)
206 {
207 return msg_dest_droppable(&tsk->phdr) != 0;
208 }
209
210 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
211 {
212 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
213 }
214
215 static int tsk_importance(struct tipc_sock *tsk)
216 {
217 return msg_importance(&tsk->phdr);
218 }
219
220 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
221 {
222 if (imp > TIPC_CRITICAL_IMPORTANCE)
223 return -EINVAL;
224 msg_set_importance(&tsk->phdr, (u32)imp);
225 return 0;
226 }
227
228 static struct tipc_sock *tipc_sk(const struct sock *sk)
229 {
230 return container_of(sk, struct tipc_sock, sk);
231 }
232
233 static int tsk_conn_cong(struct tipc_sock *tsk)
234 {
235 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
236 }
237
238 /**
239 * tsk_advance_rx_queue - discard first buffer in socket receive queue
240 *
241 * Caller must hold socket lock
242 */
243 static void tsk_advance_rx_queue(struct sock *sk)
244 {
245 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
246 }
247
248 /**
249 * tsk_rej_rx_queue - reject all buffers in socket receive queue
250 *
251 * Caller must hold socket lock
252 */
253 static void tsk_rej_rx_queue(struct sock *sk)
254 {
255 struct sk_buff *skb;
256 u32 dnode;
257
258 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
259 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
260 tipc_link_xmit_skb(skb, dnode, 0);
261 }
262 }
263
264 /* tsk_peer_msg - verify if message was sent by connected port's peer
265 *
266 * Handles cases where the node's network address has changed from
267 * the default of <0.0.0> to its configured setting.
268 */
269 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
270 {
271 u32 peer_port = tsk_peer_port(tsk);
272 u32 orig_node;
273 u32 peer_node;
274
275 if (unlikely(!tsk->connected))
276 return false;
277
278 if (unlikely(msg_origport(msg) != peer_port))
279 return false;
280
281 orig_node = msg_orignode(msg);
282 peer_node = tsk_peer_node(tsk);
283
284 if (likely(orig_node == peer_node))
285 return true;
286
287 if (!orig_node && (peer_node == tipc_own_addr))
288 return true;
289
290 if (!peer_node && (orig_node == tipc_own_addr))
291 return true;
292
293 return false;
294 }
295
296 /**
297 * tipc_sk_create - create a TIPC socket
298 * @net: network namespace (must be default network)
299 * @sock: pre-allocated socket structure
300 * @protocol: protocol indicator (must be 0)
301 * @kern: caused by kernel or by userspace?
302 *
303 * This routine creates additional data structures used by the TIPC socket,
304 * initializes them, and links them together.
305 *
306 * Returns 0 on success, errno otherwise
307 */
308 static int tipc_sk_create(struct net *net, struct socket *sock,
309 int protocol, int kern)
310 {
311 const struct proto_ops *ops;
312 socket_state state;
313 struct sock *sk;
314 struct tipc_sock *tsk;
315 struct tipc_msg *msg;
316
317 /* Validate arguments */
318 if (unlikely(protocol != 0))
319 return -EPROTONOSUPPORT;
320
321 switch (sock->type) {
322 case SOCK_STREAM:
323 ops = &stream_ops;
324 state = SS_UNCONNECTED;
325 break;
326 case SOCK_SEQPACKET:
327 ops = &packet_ops;
328 state = SS_UNCONNECTED;
329 break;
330 case SOCK_DGRAM:
331 case SOCK_RDM:
332 ops = &msg_ops;
333 state = SS_READY;
334 break;
335 default:
336 return -EPROTOTYPE;
337 }
338
339 /* Allocate socket's protocol area */
340 if (!kern)
341 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
342 else
343 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto_kern);
344
345 if (sk == NULL)
346 return -ENOMEM;
347
348 tsk = tipc_sk(sk);
349 tsk->max_pkt = MAX_PKT_DEFAULT;
350 INIT_LIST_HEAD(&tsk->publications);
351 msg = &tsk->phdr;
352 tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
353 NAMED_H_SIZE, 0);
354
355 /* Finish initializing socket data structures */
356 sock->ops = ops;
357 sock->state = state;
358 sock_init_data(sock, sk);
359 if (tipc_sk_insert(tsk)) {
360 pr_warn("Socket create failed; port numbrer exhausted\n");
361 return -EINVAL;
362 }
363 msg_set_origport(msg, tsk->portid);
364 setup_timer(&tsk->timer, tipc_sk_timeout, (unsigned long)tsk);
365 sk->sk_backlog_rcv = tipc_backlog_rcv;
366 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
367 sk->sk_data_ready = tipc_data_ready;
368 sk->sk_write_space = tipc_write_space;
369 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
370 tsk->sent_unacked = 0;
371 atomic_set(&tsk->dupl_rcvcnt, 0);
372
373 if (sock->state == SS_READY) {
374 tsk_set_unreturnable(tsk, true);
375 if (sock->type == SOCK_DGRAM)
376 tsk_set_unreliable(tsk, true);
377 }
378 return 0;
379 }
380
381 /**
382 * tipc_sock_create_local - create TIPC socket from inside TIPC module
383 * @type: socket type - SOCK_RDM or SOCK_SEQPACKET
384 *
385 * We cannot use sock_creat_kern here because it bumps module user count.
386 * Since socket owner and creator is the same module we must make sure
387 * that module count remains zero for module local sockets, otherwise
388 * we cannot do rmmod.
389 *
390 * Returns 0 on success, errno otherwise
391 */
392 int tipc_sock_create_local(int type, struct socket **res)
393 {
394 int rc;
395
396 rc = sock_create_lite(AF_TIPC, type, 0, res);
397 if (rc < 0) {
398 pr_err("Failed to create kernel socket\n");
399 return rc;
400 }
401 tipc_sk_create(&init_net, *res, 0, 1);
402
403 return 0;
404 }
405
406 /**
407 * tipc_sock_release_local - release socket created by tipc_sock_create_local
408 * @sock: the socket to be released.
409 *
410 * Module reference count is not incremented when such sockets are created,
411 * so we must keep it from being decremented when they are released.
412 */
413 void tipc_sock_release_local(struct socket *sock)
414 {
415 tipc_release(sock);
416 sock->ops = NULL;
417 sock_release(sock);
418 }
419
420 /**
421 * tipc_sock_accept_local - accept a connection on a socket created
422 * with tipc_sock_create_local. Use this function to avoid that
423 * module reference count is inadvertently incremented.
424 *
425 * @sock: the accepting socket
426 * @newsock: reference to the new socket to be created
427 * @flags: socket flags
428 */
429
430 int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
431 int flags)
432 {
433 struct sock *sk = sock->sk;
434 int ret;
435
436 ret = sock_create_lite(sk->sk_family, sk->sk_type,
437 sk->sk_protocol, newsock);
438 if (ret < 0)
439 return ret;
440
441 ret = tipc_accept(sock, *newsock, flags);
442 if (ret < 0) {
443 sock_release(*newsock);
444 return ret;
445 }
446 (*newsock)->ops = sock->ops;
447 return ret;
448 }
449
450 static void tipc_sk_callback(struct rcu_head *head)
451 {
452 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
453
454 sock_put(&tsk->sk);
455 }
456
457 /**
458 * tipc_release - destroy a TIPC socket
459 * @sock: socket to destroy
460 *
461 * This routine cleans up any messages that are still queued on the socket.
462 * For DGRAM and RDM socket types, all queued messages are rejected.
463 * For SEQPACKET and STREAM socket types, the first message is rejected
464 * and any others are discarded. (If the first message on a STREAM socket
465 * is partially-read, it is discarded and the next one is rejected instead.)
466 *
467 * NOTE: Rejected messages are not necessarily returned to the sender! They
468 * are returned or discarded according to the "destination droppable" setting
469 * specified for the message by the sender.
470 *
471 * Returns 0 on success, errno otherwise
472 */
473 static int tipc_release(struct socket *sock)
474 {
475 struct sock *sk = sock->sk;
476 struct tipc_sock *tsk;
477 struct sk_buff *skb;
478 u32 dnode, probing_state;
479
480 /*
481 * Exit if socket isn't fully initialized (occurs when a failed accept()
482 * releases a pre-allocated child socket that was never used)
483 */
484 if (sk == NULL)
485 return 0;
486
487 tsk = tipc_sk(sk);
488 lock_sock(sk);
489
490 /*
491 * Reject all unreceived messages, except on an active connection
492 * (which disconnects locally & sends a 'FIN+' to peer)
493 */
494 dnode = tsk_peer_node(tsk);
495 while (sock->state != SS_DISCONNECTING) {
496 skb = __skb_dequeue(&sk->sk_receive_queue);
497 if (skb == NULL)
498 break;
499 if (TIPC_SKB_CB(skb)->handle != NULL)
500 kfree_skb(skb);
501 else {
502 if ((sock->state == SS_CONNECTING) ||
503 (sock->state == SS_CONNECTED)) {
504 sock->state = SS_DISCONNECTING;
505 tsk->connected = 0;
506 tipc_node_remove_conn(dnode, tsk->portid);
507 }
508 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
509 tipc_link_xmit_skb(skb, dnode, 0);
510 }
511 }
512
513 tipc_sk_withdraw(tsk, 0, NULL);
514 probing_state = tsk->probing_state;
515 if (del_timer_sync(&tsk->timer) && probing_state != TIPC_CONN_PROBING)
516 sock_put(sk);
517 tipc_sk_remove(tsk);
518 if (tsk->connected) {
519 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
520 SHORT_H_SIZE, 0, dnode, tipc_own_addr,
521 tsk_peer_port(tsk),
522 tsk->portid, TIPC_ERR_NO_PORT);
523 if (skb)
524 tipc_link_xmit_skb(skb, dnode, tsk->portid);
525 tipc_node_remove_conn(dnode, tsk->portid);
526 }
527
528 /* Discard any remaining (connection-based) messages in receive queue */
529 __skb_queue_purge(&sk->sk_receive_queue);
530
531 /* Reject any messages that accumulated in backlog queue */
532 sock->state = SS_DISCONNECTING;
533 release_sock(sk);
534
535 call_rcu(&tsk->rcu, tipc_sk_callback);
536 sock->sk = NULL;
537
538 return 0;
539 }
540
541 /**
542 * tipc_bind - associate or disassocate TIPC name(s) with a socket
543 * @sock: socket structure
544 * @uaddr: socket address describing name(s) and desired operation
545 * @uaddr_len: size of socket address data structure
546 *
547 * Name and name sequence binding is indicated using a positive scope value;
548 * a negative scope value unbinds the specified name. Specifying no name
549 * (i.e. a socket address length of 0) unbinds all names from the socket.
550 *
551 * Returns 0 on success, errno otherwise
552 *
553 * NOTE: This routine doesn't need to take the socket lock since it doesn't
554 * access any non-constant socket information.
555 */
556 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
557 int uaddr_len)
558 {
559 struct sock *sk = sock->sk;
560 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
561 struct tipc_sock *tsk = tipc_sk(sk);
562 int res = -EINVAL;
563
564 lock_sock(sk);
565 if (unlikely(!uaddr_len)) {
566 res = tipc_sk_withdraw(tsk, 0, NULL);
567 goto exit;
568 }
569
570 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
571 res = -EINVAL;
572 goto exit;
573 }
574 if (addr->family != AF_TIPC) {
575 res = -EAFNOSUPPORT;
576 goto exit;
577 }
578
579 if (addr->addrtype == TIPC_ADDR_NAME)
580 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
581 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
582 res = -EAFNOSUPPORT;
583 goto exit;
584 }
585
586 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
587 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
588 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
589 res = -EACCES;
590 goto exit;
591 }
592
593 res = (addr->scope > 0) ?
594 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
595 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
596 exit:
597 release_sock(sk);
598 return res;
599 }
600
601 /**
602 * tipc_getname - get port ID of socket or peer socket
603 * @sock: socket structure
604 * @uaddr: area for returned socket address
605 * @uaddr_len: area for returned length of socket address
606 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
607 *
608 * Returns 0 on success, errno otherwise
609 *
610 * NOTE: This routine doesn't need to take the socket lock since it only
611 * accesses socket information that is unchanging (or which changes in
612 * a completely predictable manner).
613 */
614 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
615 int *uaddr_len, int peer)
616 {
617 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
618 struct tipc_sock *tsk = tipc_sk(sock->sk);
619
620 memset(addr, 0, sizeof(*addr));
621 if (peer) {
622 if ((sock->state != SS_CONNECTED) &&
623 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
624 return -ENOTCONN;
625 addr->addr.id.ref = tsk_peer_port(tsk);
626 addr->addr.id.node = tsk_peer_node(tsk);
627 } else {
628 addr->addr.id.ref = tsk->portid;
629 addr->addr.id.node = tipc_own_addr;
630 }
631
632 *uaddr_len = sizeof(*addr);
633 addr->addrtype = TIPC_ADDR_ID;
634 addr->family = AF_TIPC;
635 addr->scope = 0;
636 addr->addr.name.domain = 0;
637
638 return 0;
639 }
640
641 /**
642 * tipc_poll - read and possibly block on pollmask
643 * @file: file structure associated with the socket
644 * @sock: socket for which to calculate the poll bits
645 * @wait: ???
646 *
647 * Returns pollmask value
648 *
649 * COMMENTARY:
650 * It appears that the usual socket locking mechanisms are not useful here
651 * since the pollmask info is potentially out-of-date the moment this routine
652 * exits. TCP and other protocols seem to rely on higher level poll routines
653 * to handle any preventable race conditions, so TIPC will do the same ...
654 *
655 * TIPC sets the returned events as follows:
656 *
657 * socket state flags set
658 * ------------ ---------
659 * unconnected no read flags
660 * POLLOUT if port is not congested
661 *
662 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
663 * no write flags
664 *
665 * connected POLLIN/POLLRDNORM if data in rx queue
666 * POLLOUT if port is not congested
667 *
668 * disconnecting POLLIN/POLLRDNORM/POLLHUP
669 * no write flags
670 *
671 * listening POLLIN if SYN in rx queue
672 * no write flags
673 *
674 * ready POLLIN/POLLRDNORM if data in rx queue
675 * [connectionless] POLLOUT (since port cannot be congested)
676 *
677 * IMPORTANT: The fact that a read or write operation is indicated does NOT
678 * imply that the operation will succeed, merely that it should be performed
679 * and will not block.
680 */
681 static unsigned int tipc_poll(struct file *file, struct socket *sock,
682 poll_table *wait)
683 {
684 struct sock *sk = sock->sk;
685 struct tipc_sock *tsk = tipc_sk(sk);
686 u32 mask = 0;
687
688 sock_poll_wait(file, sk_sleep(sk), wait);
689
690 switch ((int)sock->state) {
691 case SS_UNCONNECTED:
692 if (!tsk->link_cong)
693 mask |= POLLOUT;
694 break;
695 case SS_READY:
696 case SS_CONNECTED:
697 if (!tsk->link_cong && !tsk_conn_cong(tsk))
698 mask |= POLLOUT;
699 /* fall thru' */
700 case SS_CONNECTING:
701 case SS_LISTENING:
702 if (!skb_queue_empty(&sk->sk_receive_queue))
703 mask |= (POLLIN | POLLRDNORM);
704 break;
705 case SS_DISCONNECTING:
706 mask = (POLLIN | POLLRDNORM | POLLHUP);
707 break;
708 }
709
710 return mask;
711 }
712
713 /**
714 * tipc_sendmcast - send multicast message
715 * @sock: socket structure
716 * @seq: destination address
717 * @msg: message to send
718 * @dsz: total length of message data
719 * @timeo: timeout to wait for wakeup
720 *
721 * Called from function tipc_sendmsg(), which has done all sanity checks
722 * Returns the number of bytes sent on success, or errno
723 */
724 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
725 struct msghdr *msg, size_t dsz, long timeo)
726 {
727 struct sock *sk = sock->sk;
728 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
729 struct sk_buff_head head;
730 uint mtu;
731 int rc;
732
733 msg_set_type(mhdr, TIPC_MCAST_MSG);
734 msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
735 msg_set_destport(mhdr, 0);
736 msg_set_destnode(mhdr, 0);
737 msg_set_nametype(mhdr, seq->type);
738 msg_set_namelower(mhdr, seq->lower);
739 msg_set_nameupper(mhdr, seq->upper);
740 msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
741
742 new_mtu:
743 mtu = tipc_bclink_get_mtu();
744 __skb_queue_head_init(&head);
745 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
746 if (unlikely(rc < 0))
747 return rc;
748
749 do {
750 rc = tipc_bclink_xmit(&head);
751 if (likely(rc >= 0)) {
752 rc = dsz;
753 break;
754 }
755 if (rc == -EMSGSIZE)
756 goto new_mtu;
757 if (rc != -ELINKCONG)
758 break;
759 tipc_sk(sk)->link_cong = 1;
760 rc = tipc_wait_for_sndmsg(sock, &timeo);
761 if (rc)
762 __skb_queue_purge(&head);
763 } while (!rc);
764 return rc;
765 }
766
767 /* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
768 */
769 void tipc_sk_mcast_rcv(struct sk_buff *buf)
770 {
771 struct tipc_msg *msg = buf_msg(buf);
772 struct tipc_port_list dports = {0, NULL, };
773 struct tipc_port_list *item;
774 struct sk_buff *b;
775 uint i, last, dst = 0;
776 u32 scope = TIPC_CLUSTER_SCOPE;
777
778 if (in_own_node(msg_orignode(msg)))
779 scope = TIPC_NODE_SCOPE;
780
781 /* Create destination port list: */
782 tipc_nametbl_mc_translate(msg_nametype(msg),
783 msg_namelower(msg),
784 msg_nameupper(msg),
785 scope,
786 &dports);
787 last = dports.count;
788 if (!last) {
789 kfree_skb(buf);
790 return;
791 }
792
793 for (item = &dports; item; item = item->next) {
794 for (i = 0; i < PLSIZE && ++dst <= last; i++) {
795 b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
796 if (!b) {
797 pr_warn("Failed do clone mcast rcv buffer\n");
798 continue;
799 }
800 msg_set_destport(msg, item->ports[i]);
801 tipc_sk_rcv(b);
802 }
803 }
804 tipc_port_list_free(&dports);
805 }
806
807 /**
808 * tipc_sk_proto_rcv - receive a connection mng protocol message
809 * @tsk: receiving socket
810 * @dnode: node to send response message to, if any
811 * @buf: buffer containing protocol message
812 * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
813 * (CONN_PROBE_REPLY) message should be forwarded.
814 */
815 static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
816 struct sk_buff *buf)
817 {
818 struct tipc_msg *msg = buf_msg(buf);
819 int conn_cong;
820
821 /* Ignore if connection cannot be validated: */
822 if (!tsk_peer_msg(tsk, msg))
823 goto exit;
824
825 tsk->probing_state = TIPC_CONN_OK;
826
827 if (msg_type(msg) == CONN_ACK) {
828 conn_cong = tsk_conn_cong(tsk);
829 tsk->sent_unacked -= msg_msgcnt(msg);
830 if (conn_cong)
831 tsk->sk.sk_write_space(&tsk->sk);
832 } else if (msg_type(msg) == CONN_PROBE) {
833 if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
834 return TIPC_OK;
835 msg_set_type(msg, CONN_PROBE_REPLY);
836 return TIPC_FWD_MSG;
837 }
838 /* Do nothing if msg_type() == CONN_PROBE_REPLY */
839 exit:
840 kfree_skb(buf);
841 return TIPC_OK;
842 }
843
844 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
845 {
846 struct sock *sk = sock->sk;
847 struct tipc_sock *tsk = tipc_sk(sk);
848 DEFINE_WAIT(wait);
849 int done;
850
851 do {
852 int err = sock_error(sk);
853 if (err)
854 return err;
855 if (sock->state == SS_DISCONNECTING)
856 return -EPIPE;
857 if (!*timeo_p)
858 return -EAGAIN;
859 if (signal_pending(current))
860 return sock_intr_errno(*timeo_p);
861
862 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
863 done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
864 finish_wait(sk_sleep(sk), &wait);
865 } while (!done);
866 return 0;
867 }
868
869 /**
870 * tipc_sendmsg - send message in connectionless manner
871 * @iocb: if NULL, indicates that socket lock is already held
872 * @sock: socket structure
873 * @m: message to send
874 * @dsz: amount of user data to be sent
875 *
876 * Message must have an destination specified explicitly.
877 * Used for SOCK_RDM and SOCK_DGRAM messages,
878 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
879 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
880 *
881 * Returns the number of bytes sent on success, or errno otherwise
882 */
883 static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
884 struct msghdr *m, size_t dsz)
885 {
886 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
887 struct sock *sk = sock->sk;
888 struct tipc_sock *tsk = tipc_sk(sk);
889 struct tipc_msg *mhdr = &tsk->phdr;
890 u32 dnode, dport;
891 struct sk_buff_head head;
892 struct sk_buff *skb;
893 struct tipc_name_seq *seq = &dest->addr.nameseq;
894 u32 mtu;
895 long timeo;
896 int rc;
897
898 if (unlikely(!dest))
899 return -EDESTADDRREQ;
900
901 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
902 (dest->family != AF_TIPC)))
903 return -EINVAL;
904
905 if (dsz > TIPC_MAX_USER_MSG_SIZE)
906 return -EMSGSIZE;
907
908 if (iocb)
909 lock_sock(sk);
910
911 if (unlikely(sock->state != SS_READY)) {
912 if (sock->state == SS_LISTENING) {
913 rc = -EPIPE;
914 goto exit;
915 }
916 if (sock->state != SS_UNCONNECTED) {
917 rc = -EISCONN;
918 goto exit;
919 }
920 if (tsk->published) {
921 rc = -EOPNOTSUPP;
922 goto exit;
923 }
924 if (dest->addrtype == TIPC_ADDR_NAME) {
925 tsk->conn_type = dest->addr.name.name.type;
926 tsk->conn_instance = dest->addr.name.name.instance;
927 }
928 }
929
930 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
931
932 if (dest->addrtype == TIPC_ADDR_MCAST) {
933 rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
934 goto exit;
935 } else if (dest->addrtype == TIPC_ADDR_NAME) {
936 u32 type = dest->addr.name.name.type;
937 u32 inst = dest->addr.name.name.instance;
938 u32 domain = dest->addr.name.domain;
939
940 dnode = domain;
941 msg_set_type(mhdr, TIPC_NAMED_MSG);
942 msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
943 msg_set_nametype(mhdr, type);
944 msg_set_nameinst(mhdr, inst);
945 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
946 dport = tipc_nametbl_translate(type, inst, &dnode);
947 msg_set_destnode(mhdr, dnode);
948 msg_set_destport(mhdr, dport);
949 if (unlikely(!dport && !dnode)) {
950 rc = -EHOSTUNREACH;
951 goto exit;
952 }
953 } else if (dest->addrtype == TIPC_ADDR_ID) {
954 dnode = dest->addr.id.node;
955 msg_set_type(mhdr, TIPC_DIRECT_MSG);
956 msg_set_lookup_scope(mhdr, 0);
957 msg_set_destnode(mhdr, dnode);
958 msg_set_destport(mhdr, dest->addr.id.ref);
959 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
960 }
961
962 new_mtu:
963 mtu = tipc_node_get_mtu(dnode, tsk->portid);
964 __skb_queue_head_init(&head);
965 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
966 if (rc < 0)
967 goto exit;
968
969 do {
970 skb = skb_peek(&head);
971 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
972 rc = tipc_link_xmit(&head, dnode, tsk->portid);
973 if (likely(rc >= 0)) {
974 if (sock->state != SS_READY)
975 sock->state = SS_CONNECTING;
976 rc = dsz;
977 break;
978 }
979 if (rc == -EMSGSIZE)
980 goto new_mtu;
981 if (rc != -ELINKCONG)
982 break;
983 tsk->link_cong = 1;
984 rc = tipc_wait_for_sndmsg(sock, &timeo);
985 if (rc)
986 __skb_queue_purge(&head);
987 } while (!rc);
988 exit:
989 if (iocb)
990 release_sock(sk);
991
992 return rc;
993 }
994
995 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
996 {
997 struct sock *sk = sock->sk;
998 struct tipc_sock *tsk = tipc_sk(sk);
999 DEFINE_WAIT(wait);
1000 int done;
1001
1002 do {
1003 int err = sock_error(sk);
1004 if (err)
1005 return err;
1006 if (sock->state == SS_DISCONNECTING)
1007 return -EPIPE;
1008 else if (sock->state != SS_CONNECTED)
1009 return -ENOTCONN;
1010 if (!*timeo_p)
1011 return -EAGAIN;
1012 if (signal_pending(current))
1013 return sock_intr_errno(*timeo_p);
1014
1015 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1016 done = sk_wait_event(sk, timeo_p,
1017 (!tsk->link_cong &&
1018 !tsk_conn_cong(tsk)) ||
1019 !tsk->connected);
1020 finish_wait(sk_sleep(sk), &wait);
1021 } while (!done);
1022 return 0;
1023 }
1024
1025 /**
1026 * tipc_send_stream - send stream-oriented data
1027 * @iocb: (unused)
1028 * @sock: socket structure
1029 * @m: data to send
1030 * @dsz: total length of data to be transmitted
1031 *
1032 * Used for SOCK_STREAM data.
1033 *
1034 * Returns the number of bytes sent on success (or partial success),
1035 * or errno if no data sent
1036 */
1037 static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1038 struct msghdr *m, size_t dsz)
1039 {
1040 struct sock *sk = sock->sk;
1041 struct tipc_sock *tsk = tipc_sk(sk);
1042 struct tipc_msg *mhdr = &tsk->phdr;
1043 struct sk_buff_head head;
1044 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1045 u32 portid = tsk->portid;
1046 int rc = -EINVAL;
1047 long timeo;
1048 u32 dnode;
1049 uint mtu, send, sent = 0;
1050
1051 /* Handle implied connection establishment */
1052 if (unlikely(dest)) {
1053 rc = tipc_sendmsg(iocb, sock, m, dsz);
1054 if (dsz && (dsz == rc))
1055 tsk->sent_unacked = 1;
1056 return rc;
1057 }
1058 if (dsz > (uint)INT_MAX)
1059 return -EMSGSIZE;
1060
1061 if (iocb)
1062 lock_sock(sk);
1063
1064 if (unlikely(sock->state != SS_CONNECTED)) {
1065 if (sock->state == SS_DISCONNECTING)
1066 rc = -EPIPE;
1067 else
1068 rc = -ENOTCONN;
1069 goto exit;
1070 }
1071
1072 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1073 dnode = tsk_peer_node(tsk);
1074
1075 next:
1076 mtu = tsk->max_pkt;
1077 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1078 __skb_queue_head_init(&head);
1079 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
1080 if (unlikely(rc < 0))
1081 goto exit;
1082 do {
1083 if (likely(!tsk_conn_cong(tsk))) {
1084 rc = tipc_link_xmit(&head, dnode, portid);
1085 if (likely(!rc)) {
1086 tsk->sent_unacked++;
1087 sent += send;
1088 if (sent == dsz)
1089 break;
1090 goto next;
1091 }
1092 if (rc == -EMSGSIZE) {
1093 tsk->max_pkt = tipc_node_get_mtu(dnode, portid);
1094 goto next;
1095 }
1096 if (rc != -ELINKCONG)
1097 break;
1098 tsk->link_cong = 1;
1099 }
1100 rc = tipc_wait_for_sndpkt(sock, &timeo);
1101 if (rc)
1102 __skb_queue_purge(&head);
1103 } while (!rc);
1104 exit:
1105 if (iocb)
1106 release_sock(sk);
1107 return sent ? sent : rc;
1108 }
1109
1110 /**
1111 * tipc_send_packet - send a connection-oriented message
1112 * @iocb: if NULL, indicates that socket lock is already held
1113 * @sock: socket structure
1114 * @m: message to send
1115 * @dsz: length of data to be transmitted
1116 *
1117 * Used for SOCK_SEQPACKET messages.
1118 *
1119 * Returns the number of bytes sent on success, or errno otherwise
1120 */
1121 static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
1122 struct msghdr *m, size_t dsz)
1123 {
1124 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1125 return -EMSGSIZE;
1126
1127 return tipc_send_stream(iocb, sock, m, dsz);
1128 }
1129
1130 /* tipc_sk_finish_conn - complete the setup of a connection
1131 */
1132 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1133 u32 peer_node)
1134 {
1135 struct tipc_msg *msg = &tsk->phdr;
1136
1137 msg_set_destnode(msg, peer_node);
1138 msg_set_destport(msg, peer_port);
1139 msg_set_type(msg, TIPC_CONN_MSG);
1140 msg_set_lookup_scope(msg, 0);
1141 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1142
1143 tsk->probing_intv = CONN_PROBING_INTERVAL;
1144 tsk->probing_state = TIPC_CONN_OK;
1145 tsk->connected = 1;
1146 if (!mod_timer(&tsk->timer, jiffies + tsk->probing_intv))
1147 sock_hold(&tsk->sk);
1148 tipc_node_add_conn(peer_node, tsk->portid, peer_port);
1149 tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->portid);
1150 }
1151
1152 /**
1153 * set_orig_addr - capture sender's address for received message
1154 * @m: descriptor for message info
1155 * @msg: received message header
1156 *
1157 * Note: Address is not captured if not requested by receiver.
1158 */
1159 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1160 {
1161 DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
1162
1163 if (addr) {
1164 addr->family = AF_TIPC;
1165 addr->addrtype = TIPC_ADDR_ID;
1166 memset(&addr->addr, 0, sizeof(addr->addr));
1167 addr->addr.id.ref = msg_origport(msg);
1168 addr->addr.id.node = msg_orignode(msg);
1169 addr->addr.name.domain = 0; /* could leave uninitialized */
1170 addr->scope = 0; /* could leave uninitialized */
1171 m->msg_namelen = sizeof(struct sockaddr_tipc);
1172 }
1173 }
1174
1175 /**
1176 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1177 * @m: descriptor for message info
1178 * @msg: received message header
1179 * @tsk: TIPC port associated with message
1180 *
1181 * Note: Ancillary data is not captured if not requested by receiver.
1182 *
1183 * Returns 0 if successful, otherwise errno
1184 */
1185 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1186 struct tipc_sock *tsk)
1187 {
1188 u32 anc_data[3];
1189 u32 err;
1190 u32 dest_type;
1191 int has_name;
1192 int res;
1193
1194 if (likely(m->msg_controllen == 0))
1195 return 0;
1196
1197 /* Optionally capture errored message object(s) */
1198 err = msg ? msg_errcode(msg) : 0;
1199 if (unlikely(err)) {
1200 anc_data[0] = err;
1201 anc_data[1] = msg_data_sz(msg);
1202 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1203 if (res)
1204 return res;
1205 if (anc_data[1]) {
1206 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1207 msg_data(msg));
1208 if (res)
1209 return res;
1210 }
1211 }
1212
1213 /* Optionally capture message destination object */
1214 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1215 switch (dest_type) {
1216 case TIPC_NAMED_MSG:
1217 has_name = 1;
1218 anc_data[0] = msg_nametype(msg);
1219 anc_data[1] = msg_namelower(msg);
1220 anc_data[2] = msg_namelower(msg);
1221 break;
1222 case TIPC_MCAST_MSG:
1223 has_name = 1;
1224 anc_data[0] = msg_nametype(msg);
1225 anc_data[1] = msg_namelower(msg);
1226 anc_data[2] = msg_nameupper(msg);
1227 break;
1228 case TIPC_CONN_MSG:
1229 has_name = (tsk->conn_type != 0);
1230 anc_data[0] = tsk->conn_type;
1231 anc_data[1] = tsk->conn_instance;
1232 anc_data[2] = tsk->conn_instance;
1233 break;
1234 default:
1235 has_name = 0;
1236 }
1237 if (has_name) {
1238 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1239 if (res)
1240 return res;
1241 }
1242
1243 return 0;
1244 }
1245
1246 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1247 {
1248 struct sk_buff *skb = NULL;
1249 struct tipc_msg *msg;
1250 u32 peer_port = tsk_peer_port(tsk);
1251 u32 dnode = tsk_peer_node(tsk);
1252
1253 if (!tsk->connected)
1254 return;
1255 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
1256 tipc_own_addr, peer_port, tsk->portid, TIPC_OK);
1257 if (!skb)
1258 return;
1259 msg = buf_msg(skb);
1260 msg_set_msgcnt(msg, ack);
1261 tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
1262 }
1263
1264 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1265 {
1266 struct sock *sk = sock->sk;
1267 DEFINE_WAIT(wait);
1268 long timeo = *timeop;
1269 int err;
1270
1271 for (;;) {
1272 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1273 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1274 if (sock->state == SS_DISCONNECTING) {
1275 err = -ENOTCONN;
1276 break;
1277 }
1278 release_sock(sk);
1279 timeo = schedule_timeout(timeo);
1280 lock_sock(sk);
1281 }
1282 err = 0;
1283 if (!skb_queue_empty(&sk->sk_receive_queue))
1284 break;
1285 err = sock_intr_errno(timeo);
1286 if (signal_pending(current))
1287 break;
1288 err = -EAGAIN;
1289 if (!timeo)
1290 break;
1291 }
1292 finish_wait(sk_sleep(sk), &wait);
1293 *timeop = timeo;
1294 return err;
1295 }
1296
1297 /**
1298 * tipc_recvmsg - receive packet-oriented message
1299 * @iocb: (unused)
1300 * @m: descriptor for message info
1301 * @buf_len: total size of user buffer area
1302 * @flags: receive flags
1303 *
1304 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1305 * If the complete message doesn't fit in user area, truncate it.
1306 *
1307 * Returns size of returned message data, errno otherwise
1308 */
1309 static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1310 struct msghdr *m, size_t buf_len, int flags)
1311 {
1312 struct sock *sk = sock->sk;
1313 struct tipc_sock *tsk = tipc_sk(sk);
1314 struct sk_buff *buf;
1315 struct tipc_msg *msg;
1316 long timeo;
1317 unsigned int sz;
1318 u32 err;
1319 int res;
1320
1321 /* Catch invalid receive requests */
1322 if (unlikely(!buf_len))
1323 return -EINVAL;
1324
1325 lock_sock(sk);
1326
1327 if (unlikely(sock->state == SS_UNCONNECTED)) {
1328 res = -ENOTCONN;
1329 goto exit;
1330 }
1331
1332 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1333 restart:
1334
1335 /* Look for a message in receive queue; wait if necessary */
1336 res = tipc_wait_for_rcvmsg(sock, &timeo);
1337 if (res)
1338 goto exit;
1339
1340 /* Look at first message in receive queue */
1341 buf = skb_peek(&sk->sk_receive_queue);
1342 msg = buf_msg(buf);
1343 sz = msg_data_sz(msg);
1344 err = msg_errcode(msg);
1345
1346 /* Discard an empty non-errored message & try again */
1347 if ((!sz) && (!err)) {
1348 tsk_advance_rx_queue(sk);
1349 goto restart;
1350 }
1351
1352 /* Capture sender's address (optional) */
1353 set_orig_addr(m, msg);
1354
1355 /* Capture ancillary data (optional) */
1356 res = tipc_sk_anc_data_recv(m, msg, tsk);
1357 if (res)
1358 goto exit;
1359
1360 /* Capture message data (if valid) & compute return value (always) */
1361 if (!err) {
1362 if (unlikely(buf_len < sz)) {
1363 sz = buf_len;
1364 m->msg_flags |= MSG_TRUNC;
1365 }
1366 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1367 if (res)
1368 goto exit;
1369 res = sz;
1370 } else {
1371 if ((sock->state == SS_READY) ||
1372 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1373 res = 0;
1374 else
1375 res = -ECONNRESET;
1376 }
1377
1378 /* Consume received message (optional) */
1379 if (likely(!(flags & MSG_PEEK))) {
1380 if ((sock->state != SS_READY) &&
1381 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1382 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1383 tsk->rcv_unacked = 0;
1384 }
1385 tsk_advance_rx_queue(sk);
1386 }
1387 exit:
1388 release_sock(sk);
1389 return res;
1390 }
1391
1392 /**
1393 * tipc_recv_stream - receive stream-oriented data
1394 * @iocb: (unused)
1395 * @m: descriptor for message info
1396 * @buf_len: total size of user buffer area
1397 * @flags: receive flags
1398 *
1399 * Used for SOCK_STREAM messages only. If not enough data is available
1400 * will optionally wait for more; never truncates data.
1401 *
1402 * Returns size of returned message data, errno otherwise
1403 */
1404 static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1405 struct msghdr *m, size_t buf_len, int flags)
1406 {
1407 struct sock *sk = sock->sk;
1408 struct tipc_sock *tsk = tipc_sk(sk);
1409 struct sk_buff *buf;
1410 struct tipc_msg *msg;
1411 long timeo;
1412 unsigned int sz;
1413 int sz_to_copy, target, needed;
1414 int sz_copied = 0;
1415 u32 err;
1416 int res = 0;
1417
1418 /* Catch invalid receive attempts */
1419 if (unlikely(!buf_len))
1420 return -EINVAL;
1421
1422 lock_sock(sk);
1423
1424 if (unlikely(sock->state == SS_UNCONNECTED)) {
1425 res = -ENOTCONN;
1426 goto exit;
1427 }
1428
1429 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1430 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1431
1432 restart:
1433 /* Look for a message in receive queue; wait if necessary */
1434 res = tipc_wait_for_rcvmsg(sock, &timeo);
1435 if (res)
1436 goto exit;
1437
1438 /* Look at first message in receive queue */
1439 buf = skb_peek(&sk->sk_receive_queue);
1440 msg = buf_msg(buf);
1441 sz = msg_data_sz(msg);
1442 err = msg_errcode(msg);
1443
1444 /* Discard an empty non-errored message & try again */
1445 if ((!sz) && (!err)) {
1446 tsk_advance_rx_queue(sk);
1447 goto restart;
1448 }
1449
1450 /* Optionally capture sender's address & ancillary data of first msg */
1451 if (sz_copied == 0) {
1452 set_orig_addr(m, msg);
1453 res = tipc_sk_anc_data_recv(m, msg, tsk);
1454 if (res)
1455 goto exit;
1456 }
1457
1458 /* Capture message data (if valid) & compute return value (always) */
1459 if (!err) {
1460 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1461
1462 sz -= offset;
1463 needed = (buf_len - sz_copied);
1464 sz_to_copy = (sz <= needed) ? sz : needed;
1465
1466 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
1467 m, sz_to_copy);
1468 if (res)
1469 goto exit;
1470
1471 sz_copied += sz_to_copy;
1472
1473 if (sz_to_copy < sz) {
1474 if (!(flags & MSG_PEEK))
1475 TIPC_SKB_CB(buf)->handle =
1476 (void *)(unsigned long)(offset + sz_to_copy);
1477 goto exit;
1478 }
1479 } else {
1480 if (sz_copied != 0)
1481 goto exit; /* can't add error msg to valid data */
1482
1483 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1484 res = 0;
1485 else
1486 res = -ECONNRESET;
1487 }
1488
1489 /* Consume received message (optional) */
1490 if (likely(!(flags & MSG_PEEK))) {
1491 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1492 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1493 tsk->rcv_unacked = 0;
1494 }
1495 tsk_advance_rx_queue(sk);
1496 }
1497
1498 /* Loop around if more data is required */
1499 if ((sz_copied < buf_len) && /* didn't get all requested data */
1500 (!skb_queue_empty(&sk->sk_receive_queue) ||
1501 (sz_copied < target)) && /* and more is ready or required */
1502 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1503 (!err)) /* and haven't reached a FIN */
1504 goto restart;
1505
1506 exit:
1507 release_sock(sk);
1508 return sz_copied ? sz_copied : res;
1509 }
1510
1511 /**
1512 * tipc_write_space - wake up thread if port congestion is released
1513 * @sk: socket
1514 */
1515 static void tipc_write_space(struct sock *sk)
1516 {
1517 struct socket_wq *wq;
1518
1519 rcu_read_lock();
1520 wq = rcu_dereference(sk->sk_wq);
1521 if (wq_has_sleeper(wq))
1522 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
1523 POLLWRNORM | POLLWRBAND);
1524 rcu_read_unlock();
1525 }
1526
1527 /**
1528 * tipc_data_ready - wake up threads to indicate messages have been received
1529 * @sk: socket
1530 * @len: the length of messages
1531 */
1532 static void tipc_data_ready(struct sock *sk)
1533 {
1534 struct socket_wq *wq;
1535
1536 rcu_read_lock();
1537 wq = rcu_dereference(sk->sk_wq);
1538 if (wq_has_sleeper(wq))
1539 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
1540 POLLRDNORM | POLLRDBAND);
1541 rcu_read_unlock();
1542 }
1543
1544 /**
1545 * filter_connect - Handle all incoming messages for a connection-based socket
1546 * @tsk: TIPC socket
1547 * @msg: message
1548 *
1549 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
1550 */
1551 static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1552 {
1553 struct sock *sk = &tsk->sk;
1554 struct socket *sock = sk->sk_socket;
1555 struct tipc_msg *msg = buf_msg(*buf);
1556 int retval = -TIPC_ERR_NO_PORT;
1557
1558 if (msg_mcast(msg))
1559 return retval;
1560
1561 switch ((int)sock->state) {
1562 case SS_CONNECTED:
1563 /* Accept only connection-based messages sent by peer */
1564 if (tsk_peer_msg(tsk, msg)) {
1565 if (unlikely(msg_errcode(msg))) {
1566 sock->state = SS_DISCONNECTING;
1567 tsk->connected = 0;
1568 /* let timer expire on it's own */
1569 tipc_node_remove_conn(tsk_peer_node(tsk),
1570 tsk->portid);
1571 }
1572 retval = TIPC_OK;
1573 }
1574 break;
1575 case SS_CONNECTING:
1576 /* Accept only ACK or NACK message */
1577
1578 if (unlikely(!msg_connected(msg)))
1579 break;
1580
1581 if (unlikely(msg_errcode(msg))) {
1582 sock->state = SS_DISCONNECTING;
1583 sk->sk_err = ECONNREFUSED;
1584 retval = TIPC_OK;
1585 break;
1586 }
1587
1588 if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
1589 sock->state = SS_DISCONNECTING;
1590 sk->sk_err = EINVAL;
1591 retval = TIPC_OK;
1592 break;
1593 }
1594
1595 tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
1596 msg_set_importance(&tsk->phdr, msg_importance(msg));
1597 sock->state = SS_CONNECTED;
1598
1599 /* If an incoming message is an 'ACK-', it should be
1600 * discarded here because it doesn't contain useful
1601 * data. In addition, we should try to wake up
1602 * connect() routine if sleeping.
1603 */
1604 if (msg_data_sz(msg) == 0) {
1605 kfree_skb(*buf);
1606 *buf = NULL;
1607 if (waitqueue_active(sk_sleep(sk)))
1608 wake_up_interruptible(sk_sleep(sk));
1609 }
1610 retval = TIPC_OK;
1611 break;
1612 case SS_LISTENING:
1613 case SS_UNCONNECTED:
1614 /* Accept only SYN message */
1615 if (!msg_connected(msg) && !(msg_errcode(msg)))
1616 retval = TIPC_OK;
1617 break;
1618 case SS_DISCONNECTING:
1619 break;
1620 default:
1621 pr_err("Unknown socket state %u\n", sock->state);
1622 }
1623 return retval;
1624 }
1625
1626 /**
1627 * rcvbuf_limit - get proper overload limit of socket receive queue
1628 * @sk: socket
1629 * @buf: message
1630 *
1631 * For all connection oriented messages, irrespective of importance,
1632 * the default overload value (i.e. 67MB) is set as limit.
1633 *
1634 * For all connectionless messages, by default new queue limits are
1635 * as belows:
1636 *
1637 * TIPC_LOW_IMPORTANCE (4 MB)
1638 * TIPC_MEDIUM_IMPORTANCE (8 MB)
1639 * TIPC_HIGH_IMPORTANCE (16 MB)
1640 * TIPC_CRITICAL_IMPORTANCE (32 MB)
1641 *
1642 * Returns overload limit according to corresponding message importance
1643 */
1644 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1645 {
1646 struct tipc_msg *msg = buf_msg(buf);
1647
1648 if (msg_connected(msg))
1649 return sysctl_tipc_rmem[2];
1650
1651 return sk->sk_rcvbuf >> TIPC_CRITICAL_IMPORTANCE <<
1652 msg_importance(msg);
1653 }
1654
1655 /**
1656 * filter_rcv - validate incoming message
1657 * @sk: socket
1658 * @buf: message
1659 *
1660 * Enqueues message on receive queue if acceptable; optionally handles
1661 * disconnect indication for a connected socket.
1662 *
1663 * Called with socket lock already taken; port lock may also be taken.
1664 *
1665 * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
1666 * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
1667 */
1668 static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1669 {
1670 struct socket *sock = sk->sk_socket;
1671 struct tipc_sock *tsk = tipc_sk(sk);
1672 struct tipc_msg *msg = buf_msg(buf);
1673 unsigned int limit = rcvbuf_limit(sk, buf);
1674 u32 onode;
1675 int rc = TIPC_OK;
1676
1677 if (unlikely(msg_user(msg) == CONN_MANAGER))
1678 return tipc_sk_proto_rcv(tsk, &onode, buf);
1679
1680 if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
1681 kfree_skb(buf);
1682 tsk->link_cong = 0;
1683 sk->sk_write_space(sk);
1684 return TIPC_OK;
1685 }
1686
1687 /* Reject message if it is wrong sort of message for socket */
1688 if (msg_type(msg) > TIPC_DIRECT_MSG)
1689 return -TIPC_ERR_NO_PORT;
1690
1691 if (sock->state == SS_READY) {
1692 if (msg_connected(msg))
1693 return -TIPC_ERR_NO_PORT;
1694 } else {
1695 rc = filter_connect(tsk, &buf);
1696 if (rc != TIPC_OK || buf == NULL)
1697 return rc;
1698 }
1699
1700 /* Reject message if there isn't room to queue it */
1701 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
1702 return -TIPC_ERR_OVERLOAD;
1703
1704 /* Enqueue message */
1705 TIPC_SKB_CB(buf)->handle = NULL;
1706 __skb_queue_tail(&sk->sk_receive_queue, buf);
1707 skb_set_owner_r(buf, sk);
1708
1709 sk->sk_data_ready(sk);
1710 return TIPC_OK;
1711 }
1712
1713 /**
1714 * tipc_backlog_rcv - handle incoming message from backlog queue
1715 * @sk: socket
1716 * @skb: message
1717 *
1718 * Caller must hold socket lock, but not port lock.
1719 *
1720 * Returns 0
1721 */
1722 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1723 {
1724 int rc;
1725 u32 onode;
1726 struct tipc_sock *tsk = tipc_sk(sk);
1727 uint truesize = skb->truesize;
1728
1729 rc = filter_rcv(sk, skb);
1730
1731 if (likely(!rc)) {
1732 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1733 atomic_add(truesize, &tsk->dupl_rcvcnt);
1734 return 0;
1735 }
1736
1737 if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
1738 return 0;
1739
1740 tipc_link_xmit_skb(skb, onode, 0);
1741
1742 return 0;
1743 }
1744
1745 /**
1746 * tipc_sk_rcv - handle incoming message
1747 * @skb: buffer containing arriving message
1748 * Consumes buffer
1749 * Returns 0 if success, or errno: -EHOSTUNREACH
1750 */
1751 int tipc_sk_rcv(struct sk_buff *skb)
1752 {
1753 struct tipc_sock *tsk;
1754 struct sock *sk;
1755 u32 dport = msg_destport(buf_msg(skb));
1756 int rc = TIPC_OK;
1757 uint limit;
1758 u32 dnode;
1759
1760 /* Validate destination and message */
1761 tsk = tipc_sk_lookup(dport);
1762 if (unlikely(!tsk)) {
1763 rc = tipc_msg_eval(skb, &dnode);
1764 goto exit;
1765 }
1766 sk = &tsk->sk;
1767
1768 /* Queue message */
1769 spin_lock_bh(&sk->sk_lock.slock);
1770
1771 if (!sock_owned_by_user(sk)) {
1772 rc = filter_rcv(sk, skb);
1773 } else {
1774 if (sk->sk_backlog.len == 0)
1775 atomic_set(&tsk->dupl_rcvcnt, 0);
1776 limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
1777 if (sk_add_backlog(sk, skb, limit))
1778 rc = -TIPC_ERR_OVERLOAD;
1779 }
1780 spin_unlock_bh(&sk->sk_lock.slock);
1781 sock_put(sk);
1782 if (likely(!rc))
1783 return 0;
1784 exit:
1785 if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
1786 return -EHOSTUNREACH;
1787
1788 tipc_link_xmit_skb(skb, dnode, 0);
1789 return (rc < 0) ? -EHOSTUNREACH : 0;
1790 }
1791
1792 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
1793 {
1794 struct sock *sk = sock->sk;
1795 DEFINE_WAIT(wait);
1796 int done;
1797
1798 do {
1799 int err = sock_error(sk);
1800 if (err)
1801 return err;
1802 if (!*timeo_p)
1803 return -ETIMEDOUT;
1804 if (signal_pending(current))
1805 return sock_intr_errno(*timeo_p);
1806
1807 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1808 done = sk_wait_event(sk, timeo_p, sock->state != SS_CONNECTING);
1809 finish_wait(sk_sleep(sk), &wait);
1810 } while (!done);
1811 return 0;
1812 }
1813
1814 /**
1815 * tipc_connect - establish a connection to another TIPC port
1816 * @sock: socket structure
1817 * @dest: socket address for destination port
1818 * @destlen: size of socket address data structure
1819 * @flags: file-related flags associated with socket
1820 *
1821 * Returns 0 on success, errno otherwise
1822 */
1823 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
1824 int destlen, int flags)
1825 {
1826 struct sock *sk = sock->sk;
1827 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1828 struct msghdr m = {NULL,};
1829 long timeout = (flags & O_NONBLOCK) ? 0 : tipc_sk(sk)->conn_timeout;
1830 socket_state previous;
1831 int res;
1832
1833 lock_sock(sk);
1834
1835 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1836 if (sock->state == SS_READY) {
1837 res = -EOPNOTSUPP;
1838 goto exit;
1839 }
1840
1841 /*
1842 * Reject connection attempt using multicast address
1843 *
1844 * Note: send_msg() validates the rest of the address fields,
1845 * so there's no need to do it here
1846 */
1847 if (dst->addrtype == TIPC_ADDR_MCAST) {
1848 res = -EINVAL;
1849 goto exit;
1850 }
1851
1852 previous = sock->state;
1853 switch (sock->state) {
1854 case SS_UNCONNECTED:
1855 /* Send a 'SYN-' to destination */
1856 m.msg_name = dest;
1857 m.msg_namelen = destlen;
1858
1859 /* If connect is in non-blocking case, set MSG_DONTWAIT to
1860 * indicate send_msg() is never blocked.
1861 */
1862 if (!timeout)
1863 m.msg_flags = MSG_DONTWAIT;
1864
1865 res = tipc_sendmsg(NULL, sock, &m, 0);
1866 if ((res < 0) && (res != -EWOULDBLOCK))
1867 goto exit;
1868
1869 /* Just entered SS_CONNECTING state; the only
1870 * difference is that return value in non-blocking
1871 * case is EINPROGRESS, rather than EALREADY.
1872 */
1873 res = -EINPROGRESS;
1874 case SS_CONNECTING:
1875 if (previous == SS_CONNECTING)
1876 res = -EALREADY;
1877 if (!timeout)
1878 goto exit;
1879 timeout = msecs_to_jiffies(timeout);
1880 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1881 res = tipc_wait_for_connect(sock, &timeout);
1882 break;
1883 case SS_CONNECTED:
1884 res = -EISCONN;
1885 break;
1886 default:
1887 res = -EINVAL;
1888 break;
1889 }
1890 exit:
1891 release_sock(sk);
1892 return res;
1893 }
1894
1895 /**
1896 * tipc_listen - allow socket to listen for incoming connections
1897 * @sock: socket structure
1898 * @len: (unused)
1899 *
1900 * Returns 0 on success, errno otherwise
1901 */
1902 static int tipc_listen(struct socket *sock, int len)
1903 {
1904 struct sock *sk = sock->sk;
1905 int res;
1906
1907 lock_sock(sk);
1908
1909 if (sock->state != SS_UNCONNECTED)
1910 res = -EINVAL;
1911 else {
1912 sock->state = SS_LISTENING;
1913 res = 0;
1914 }
1915
1916 release_sock(sk);
1917 return res;
1918 }
1919
1920 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1921 {
1922 struct sock *sk = sock->sk;
1923 DEFINE_WAIT(wait);
1924 int err;
1925
1926 /* True wake-one mechanism for incoming connections: only
1927 * one process gets woken up, not the 'whole herd'.
1928 * Since we do not 'race & poll' for established sockets
1929 * anymore, the common case will execute the loop only once.
1930 */
1931 for (;;) {
1932 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1933 TASK_INTERRUPTIBLE);
1934 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1935 release_sock(sk);
1936 timeo = schedule_timeout(timeo);
1937 lock_sock(sk);
1938 }
1939 err = 0;
1940 if (!skb_queue_empty(&sk->sk_receive_queue))
1941 break;
1942 err = -EINVAL;
1943 if (sock->state != SS_LISTENING)
1944 break;
1945 err = sock_intr_errno(timeo);
1946 if (signal_pending(current))
1947 break;
1948 err = -EAGAIN;
1949 if (!timeo)
1950 break;
1951 }
1952 finish_wait(sk_sleep(sk), &wait);
1953 return err;
1954 }
1955
1956 /**
1957 * tipc_accept - wait for connection request
1958 * @sock: listening socket
1959 * @newsock: new socket that is to be connected
1960 * @flags: file-related flags associated with socket
1961 *
1962 * Returns 0 on success, errno otherwise
1963 */
1964 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1965 {
1966 struct sock *new_sk, *sk = sock->sk;
1967 struct sk_buff *buf;
1968 struct tipc_sock *new_tsock;
1969 struct tipc_msg *msg;
1970 long timeo;
1971 int res;
1972
1973 lock_sock(sk);
1974
1975 if (sock->state != SS_LISTENING) {
1976 res = -EINVAL;
1977 goto exit;
1978 }
1979 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1980 res = tipc_wait_for_accept(sock, timeo);
1981 if (res)
1982 goto exit;
1983
1984 buf = skb_peek(&sk->sk_receive_queue);
1985
1986 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
1987 if (res)
1988 goto exit;
1989
1990 new_sk = new_sock->sk;
1991 new_tsock = tipc_sk(new_sk);
1992 msg = buf_msg(buf);
1993
1994 /* we lock on new_sk; but lockdep sees the lock on sk */
1995 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
1996
1997 /*
1998 * Reject any stray messages received by new socket
1999 * before the socket lock was taken (very, very unlikely)
2000 */
2001 tsk_rej_rx_queue(new_sk);
2002
2003 /* Connect new socket to it's peer */
2004 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2005 new_sock->state = SS_CONNECTED;
2006
2007 tsk_set_importance(new_tsock, msg_importance(msg));
2008 if (msg_named(msg)) {
2009 new_tsock->conn_type = msg_nametype(msg);
2010 new_tsock->conn_instance = msg_nameinst(msg);
2011 }
2012
2013 /*
2014 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2015 * Respond to 'SYN+' by queuing it on new socket.
2016 */
2017 if (!msg_data_sz(msg)) {
2018 struct msghdr m = {NULL,};
2019
2020 tsk_advance_rx_queue(sk);
2021 tipc_send_packet(NULL, new_sock, &m, 0);
2022 } else {
2023 __skb_dequeue(&sk->sk_receive_queue);
2024 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2025 skb_set_owner_r(buf, new_sk);
2026 }
2027 release_sock(new_sk);
2028 exit:
2029 release_sock(sk);
2030 return res;
2031 }
2032
2033 /**
2034 * tipc_shutdown - shutdown socket connection
2035 * @sock: socket structure
2036 * @how: direction to close (must be SHUT_RDWR)
2037 *
2038 * Terminates connection (if necessary), then purges socket's receive queue.
2039 *
2040 * Returns 0 on success, errno otherwise
2041 */
2042 static int tipc_shutdown(struct socket *sock, int how)
2043 {
2044 struct sock *sk = sock->sk;
2045 struct tipc_sock *tsk = tipc_sk(sk);
2046 struct sk_buff *skb;
2047 u32 dnode;
2048 int res;
2049
2050 if (how != SHUT_RDWR)
2051 return -EINVAL;
2052
2053 lock_sock(sk);
2054
2055 switch (sock->state) {
2056 case SS_CONNECTING:
2057 case SS_CONNECTED:
2058
2059 restart:
2060 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
2061 skb = __skb_dequeue(&sk->sk_receive_queue);
2062 if (skb) {
2063 if (TIPC_SKB_CB(skb)->handle != NULL) {
2064 kfree_skb(skb);
2065 goto restart;
2066 }
2067 if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
2068 tipc_link_xmit_skb(skb, dnode, tsk->portid);
2069 tipc_node_remove_conn(dnode, tsk->portid);
2070 } else {
2071 dnode = tsk_peer_node(tsk);
2072 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2073 TIPC_CONN_MSG, SHORT_H_SIZE,
2074 0, dnode, tipc_own_addr,
2075 tsk_peer_port(tsk),
2076 tsk->portid, TIPC_CONN_SHUTDOWN);
2077 tipc_link_xmit_skb(skb, dnode, tsk->portid);
2078 }
2079 tsk->connected = 0;
2080 sock->state = SS_DISCONNECTING;
2081 tipc_node_remove_conn(dnode, tsk->portid);
2082 /* fall through */
2083
2084 case SS_DISCONNECTING:
2085
2086 /* Discard any unreceived messages */
2087 __skb_queue_purge(&sk->sk_receive_queue);
2088
2089 /* Wake up anyone sleeping in poll */
2090 sk->sk_state_change(sk);
2091 res = 0;
2092 break;
2093
2094 default:
2095 res = -ENOTCONN;
2096 }
2097
2098 release_sock(sk);
2099 return res;
2100 }
2101
2102 static void tipc_sk_timeout(unsigned long data)
2103 {
2104 struct tipc_sock *tsk = (struct tipc_sock *)data;
2105 struct sock *sk = &tsk->sk;
2106 struct sk_buff *skb = NULL;
2107 u32 peer_port, peer_node;
2108
2109 bh_lock_sock(sk);
2110 if (!tsk->connected) {
2111 bh_unlock_sock(sk);
2112 goto exit;
2113 }
2114 peer_port = tsk_peer_port(tsk);
2115 peer_node = tsk_peer_node(tsk);
2116
2117 if (tsk->probing_state == TIPC_CONN_PROBING) {
2118 /* Previous probe not answered -> self abort */
2119 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
2120 SHORT_H_SIZE, 0, tipc_own_addr,
2121 peer_node, tsk->portid, peer_port,
2122 TIPC_ERR_NO_PORT);
2123 } else {
2124 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
2125 0, peer_node, tipc_own_addr,
2126 peer_port, tsk->portid, TIPC_OK);
2127 tsk->probing_state = TIPC_CONN_PROBING;
2128 if (!mod_timer(&tsk->timer, jiffies + tsk->probing_intv))
2129 sock_hold(sk);
2130 }
2131 bh_unlock_sock(sk);
2132 if (skb)
2133 tipc_link_xmit_skb(skb, peer_node, tsk->portid);
2134 exit:
2135 sock_put(sk);
2136 }
2137
2138 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2139 struct tipc_name_seq const *seq)
2140 {
2141 struct publication *publ;
2142 u32 key;
2143
2144 if (tsk->connected)
2145 return -EINVAL;
2146 key = tsk->portid + tsk->pub_count + 1;
2147 if (key == tsk->portid)
2148 return -EADDRINUSE;
2149
2150 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
2151 scope, tsk->portid, key);
2152 if (unlikely(!publ))
2153 return -EINVAL;
2154
2155 list_add(&publ->pport_list, &tsk->publications);
2156 tsk->pub_count++;
2157 tsk->published = 1;
2158 return 0;
2159 }
2160
2161 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2162 struct tipc_name_seq const *seq)
2163 {
2164 struct publication *publ;
2165 struct publication *safe;
2166 int rc = -EINVAL;
2167
2168 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2169 if (seq) {
2170 if (publ->scope != scope)
2171 continue;
2172 if (publ->type != seq->type)
2173 continue;
2174 if (publ->lower != seq->lower)
2175 continue;
2176 if (publ->upper != seq->upper)
2177 break;
2178 tipc_nametbl_withdraw(publ->type, publ->lower,
2179 publ->ref, publ->key);
2180 rc = 0;
2181 break;
2182 }
2183 tipc_nametbl_withdraw(publ->type, publ->lower,
2184 publ->ref, publ->key);
2185 rc = 0;
2186 }
2187 if (list_empty(&tsk->publications))
2188 tsk->published = 0;
2189 return rc;
2190 }
2191
2192 static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
2193 int len, int full_id)
2194 {
2195 struct publication *publ;
2196 int ret;
2197
2198 if (full_id)
2199 ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
2200 tipc_zone(tipc_own_addr),
2201 tipc_cluster(tipc_own_addr),
2202 tipc_node(tipc_own_addr), tsk->portid);
2203 else
2204 ret = tipc_snprintf(buf, len, "%-10u:", tsk->portid);
2205
2206 if (tsk->connected) {
2207 u32 dport = tsk_peer_port(tsk);
2208 u32 destnode = tsk_peer_node(tsk);
2209
2210 ret += tipc_snprintf(buf + ret, len - ret,
2211 " connected to <%u.%u.%u:%u>",
2212 tipc_zone(destnode),
2213 tipc_cluster(destnode),
2214 tipc_node(destnode), dport);
2215 if (tsk->conn_type != 0)
2216 ret += tipc_snprintf(buf + ret, len - ret,
2217 " via {%u,%u}", tsk->conn_type,
2218 tsk->conn_instance);
2219 } else if (tsk->published) {
2220 ret += tipc_snprintf(buf + ret, len - ret, " bound to");
2221 list_for_each_entry(publ, &tsk->publications, pport_list) {
2222 if (publ->lower == publ->upper)
2223 ret += tipc_snprintf(buf + ret, len - ret,
2224 " {%u,%u}", publ->type,
2225 publ->lower);
2226 else
2227 ret += tipc_snprintf(buf + ret, len - ret,
2228 " {%u,%u,%u}", publ->type,
2229 publ->lower, publ->upper);
2230 }
2231 }
2232 ret += tipc_snprintf(buf + ret, len - ret, "\n");
2233 return ret;
2234 }
2235
2236 struct sk_buff *tipc_sk_socks_show(void)
2237 {
2238 const struct bucket_table *tbl;
2239 struct rhash_head *pos;
2240 struct sk_buff *buf;
2241 struct tlv_desc *rep_tlv;
2242 char *pb;
2243 int pb_len;
2244 struct tipc_sock *tsk;
2245 int str_len = 0;
2246 int i;
2247
2248 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2249 if (!buf)
2250 return NULL;
2251 rep_tlv = (struct tlv_desc *)buf->data;
2252 pb = TLV_DATA(rep_tlv);
2253 pb_len = ULTRA_STRING_MAX_LEN;
2254
2255 rcu_read_lock();
2256 tbl = rht_dereference_rcu((&tipc_sk_rht)->tbl, &tipc_sk_rht);
2257 for (i = 0; i < tbl->size; i++) {
2258 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2259 spin_lock_bh(&tsk->sk.sk_lock.slock);
2260 str_len += tipc_sk_show(tsk, pb + str_len,
2261 pb_len - str_len, 0);
2262 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2263 }
2264 }
2265 rcu_read_unlock();
2266
2267 str_len += 1; /* for "\0" */
2268 skb_put(buf, TLV_SPACE(str_len));
2269 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2270
2271 return buf;
2272 }
2273
2274 /* tipc_sk_reinit: set non-zero address in all existing sockets
2275 * when we go from standalone to network mode.
2276 */
2277 void tipc_sk_reinit(void)
2278 {
2279 const struct bucket_table *tbl;
2280 struct rhash_head *pos;
2281 struct tipc_sock *tsk;
2282 struct tipc_msg *msg;
2283 int i;
2284
2285 rcu_read_lock();
2286 tbl = rht_dereference_rcu((&tipc_sk_rht)->tbl, &tipc_sk_rht);
2287 for (i = 0; i < tbl->size; i++) {
2288 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2289 spin_lock_bh(&tsk->sk.sk_lock.slock);
2290 msg = &tsk->phdr;
2291 msg_set_prevnode(msg, tipc_own_addr);
2292 msg_set_orignode(msg, tipc_own_addr);
2293 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2294 }
2295 }
2296 rcu_read_unlock();
2297 }
2298
2299 static struct tipc_sock *tipc_sk_lookup(u32 portid)
2300 {
2301 struct tipc_sock *tsk;
2302
2303 rcu_read_lock();
2304 tsk = rhashtable_lookup(&tipc_sk_rht, &portid);
2305 if (tsk)
2306 sock_hold(&tsk->sk);
2307 rcu_read_unlock();
2308
2309 return tsk;
2310 }
2311
2312 static int tipc_sk_insert(struct tipc_sock *tsk)
2313 {
2314 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2315 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2316
2317 while (remaining--) {
2318 portid++;
2319 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2320 portid = TIPC_MIN_PORT;
2321 tsk->portid = portid;
2322 sock_hold(&tsk->sk);
2323 if (rhashtable_lookup_insert(&tipc_sk_rht, &tsk->node))
2324 return 0;
2325 sock_put(&tsk->sk);
2326 }
2327
2328 return -1;
2329 }
2330
2331 static void tipc_sk_remove(struct tipc_sock *tsk)
2332 {
2333 struct sock *sk = &tsk->sk;
2334
2335 if (rhashtable_remove(&tipc_sk_rht, &tsk->node)) {
2336 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2337 __sock_put(sk);
2338 }
2339 }
2340
2341 int tipc_sk_rht_init(void)
2342 {
2343 struct rhashtable_params rht_params = {
2344 .nelem_hint = 192,
2345 .head_offset = offsetof(struct tipc_sock, node),
2346 .key_offset = offsetof(struct tipc_sock, portid),
2347 .key_len = sizeof(u32), /* portid */
2348 .hashfn = jhash,
2349 .max_shift = 20, /* 1M */
2350 .min_shift = 8, /* 256 */
2351 .grow_decision = rht_grow_above_75,
2352 .shrink_decision = rht_shrink_below_30,
2353 };
2354
2355 return rhashtable_init(&tipc_sk_rht, &rht_params);
2356 }
2357
2358 void tipc_sk_rht_destroy(void)
2359 {
2360 /* Wait for socket readers to complete */
2361 synchronize_net();
2362
2363 rhashtable_destroy(&tipc_sk_rht);
2364 }
2365
2366 /**
2367 * tipc_setsockopt - set socket option
2368 * @sock: socket structure
2369 * @lvl: option level
2370 * @opt: option identifier
2371 * @ov: pointer to new option value
2372 * @ol: length of option value
2373 *
2374 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2375 * (to ease compatibility).
2376 *
2377 * Returns 0 on success, errno otherwise
2378 */
2379 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2380 char __user *ov, unsigned int ol)
2381 {
2382 struct sock *sk = sock->sk;
2383 struct tipc_sock *tsk = tipc_sk(sk);
2384 u32 value;
2385 int res;
2386
2387 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2388 return 0;
2389 if (lvl != SOL_TIPC)
2390 return -ENOPROTOOPT;
2391 if (ol < sizeof(value))
2392 return -EINVAL;
2393 res = get_user(value, (u32 __user *)ov);
2394 if (res)
2395 return res;
2396
2397 lock_sock(sk);
2398
2399 switch (opt) {
2400 case TIPC_IMPORTANCE:
2401 res = tsk_set_importance(tsk, value);
2402 break;
2403 case TIPC_SRC_DROPPABLE:
2404 if (sock->type != SOCK_STREAM)
2405 tsk_set_unreliable(tsk, value);
2406 else
2407 res = -ENOPROTOOPT;
2408 break;
2409 case TIPC_DEST_DROPPABLE:
2410 tsk_set_unreturnable(tsk, value);
2411 break;
2412 case TIPC_CONN_TIMEOUT:
2413 tipc_sk(sk)->conn_timeout = value;
2414 /* no need to set "res", since already 0 at this point */
2415 break;
2416 default:
2417 res = -EINVAL;
2418 }
2419
2420 release_sock(sk);
2421
2422 return res;
2423 }
2424
2425 /**
2426 * tipc_getsockopt - get socket option
2427 * @sock: socket structure
2428 * @lvl: option level
2429 * @opt: option identifier
2430 * @ov: receptacle for option value
2431 * @ol: receptacle for length of option value
2432 *
2433 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2434 * (to ease compatibility).
2435 *
2436 * Returns 0 on success, errno otherwise
2437 */
2438 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2439 char __user *ov, int __user *ol)
2440 {
2441 struct sock *sk = sock->sk;
2442 struct tipc_sock *tsk = tipc_sk(sk);
2443 int len;
2444 u32 value;
2445 int res;
2446
2447 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2448 return put_user(0, ol);
2449 if (lvl != SOL_TIPC)
2450 return -ENOPROTOOPT;
2451 res = get_user(len, ol);
2452 if (res)
2453 return res;
2454
2455 lock_sock(sk);
2456
2457 switch (opt) {
2458 case TIPC_IMPORTANCE:
2459 value = tsk_importance(tsk);
2460 break;
2461 case TIPC_SRC_DROPPABLE:
2462 value = tsk_unreliable(tsk);
2463 break;
2464 case TIPC_DEST_DROPPABLE:
2465 value = tsk_unreturnable(tsk);
2466 break;
2467 case TIPC_CONN_TIMEOUT:
2468 value = tsk->conn_timeout;
2469 /* no need to set "res", since already 0 at this point */
2470 break;
2471 case TIPC_NODE_RECVQ_DEPTH:
2472 value = 0; /* was tipc_queue_size, now obsolete */
2473 break;
2474 case TIPC_SOCK_RECVQ_DEPTH:
2475 value = skb_queue_len(&sk->sk_receive_queue);
2476 break;
2477 default:
2478 res = -EINVAL;
2479 }
2480
2481 release_sock(sk);
2482
2483 if (res)
2484 return res; /* "get" failed */
2485
2486 if (len < sizeof(value))
2487 return -EINVAL;
2488
2489 if (copy_to_user(ov, &value, sizeof(value)))
2490 return -EFAULT;
2491
2492 return put_user(sizeof(value), ol);
2493 }
2494
2495 static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
2496 {
2497 struct tipc_sioc_ln_req lnr;
2498 void __user *argp = (void __user *)arg;
2499
2500 switch (cmd) {
2501 case SIOCGETLINKNAME:
2502 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2503 return -EFAULT;
2504 if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer,
2505 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2506 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2507 return -EFAULT;
2508 return 0;
2509 }
2510 return -EADDRNOTAVAIL;
2511 default:
2512 return -ENOIOCTLCMD;
2513 }
2514 }
2515
2516 /* Protocol switches for the various types of TIPC sockets */
2517
2518 static const struct proto_ops msg_ops = {
2519 .owner = THIS_MODULE,
2520 .family = AF_TIPC,
2521 .release = tipc_release,
2522 .bind = tipc_bind,
2523 .connect = tipc_connect,
2524 .socketpair = sock_no_socketpair,
2525 .accept = sock_no_accept,
2526 .getname = tipc_getname,
2527 .poll = tipc_poll,
2528 .ioctl = tipc_ioctl,
2529 .listen = sock_no_listen,
2530 .shutdown = tipc_shutdown,
2531 .setsockopt = tipc_setsockopt,
2532 .getsockopt = tipc_getsockopt,
2533 .sendmsg = tipc_sendmsg,
2534 .recvmsg = tipc_recvmsg,
2535 .mmap = sock_no_mmap,
2536 .sendpage = sock_no_sendpage
2537 };
2538
2539 static const struct proto_ops packet_ops = {
2540 .owner = THIS_MODULE,
2541 .family = AF_TIPC,
2542 .release = tipc_release,
2543 .bind = tipc_bind,
2544 .connect = tipc_connect,
2545 .socketpair = sock_no_socketpair,
2546 .accept = tipc_accept,
2547 .getname = tipc_getname,
2548 .poll = tipc_poll,
2549 .ioctl = tipc_ioctl,
2550 .listen = tipc_listen,
2551 .shutdown = tipc_shutdown,
2552 .setsockopt = tipc_setsockopt,
2553 .getsockopt = tipc_getsockopt,
2554 .sendmsg = tipc_send_packet,
2555 .recvmsg = tipc_recvmsg,
2556 .mmap = sock_no_mmap,
2557 .sendpage = sock_no_sendpage
2558 };
2559
2560 static const struct proto_ops stream_ops = {
2561 .owner = THIS_MODULE,
2562 .family = AF_TIPC,
2563 .release = tipc_release,
2564 .bind = tipc_bind,
2565 .connect = tipc_connect,
2566 .socketpair = sock_no_socketpair,
2567 .accept = tipc_accept,
2568 .getname = tipc_getname,
2569 .poll = tipc_poll,
2570 .ioctl = tipc_ioctl,
2571 .listen = tipc_listen,
2572 .shutdown = tipc_shutdown,
2573 .setsockopt = tipc_setsockopt,
2574 .getsockopt = tipc_getsockopt,
2575 .sendmsg = tipc_send_stream,
2576 .recvmsg = tipc_recv_stream,
2577 .mmap = sock_no_mmap,
2578 .sendpage = sock_no_sendpage
2579 };
2580
2581 static const struct net_proto_family tipc_family_ops = {
2582 .owner = THIS_MODULE,
2583 .family = AF_TIPC,
2584 .create = tipc_sk_create
2585 };
2586
2587 static struct proto tipc_proto = {
2588 .name = "TIPC",
2589 .owner = THIS_MODULE,
2590 .obj_size = sizeof(struct tipc_sock),
2591 .sysctl_rmem = sysctl_tipc_rmem
2592 };
2593
2594 static struct proto tipc_proto_kern = {
2595 .name = "TIPC",
2596 .obj_size = sizeof(struct tipc_sock),
2597 .sysctl_rmem = sysctl_tipc_rmem
2598 };
2599
2600 /**
2601 * tipc_socket_init - initialize TIPC socket interface
2602 *
2603 * Returns 0 on success, errno otherwise
2604 */
2605 int tipc_socket_init(void)
2606 {
2607 int res;
2608
2609 res = proto_register(&tipc_proto, 1);
2610 if (res) {
2611 pr_err("Failed to register TIPC protocol type\n");
2612 goto out;
2613 }
2614
2615 res = sock_register(&tipc_family_ops);
2616 if (res) {
2617 pr_err("Failed to register TIPC socket type\n");
2618 proto_unregister(&tipc_proto);
2619 goto out;
2620 }
2621 out:
2622 return res;
2623 }
2624
2625 /**
2626 * tipc_socket_stop - stop TIPC socket interface
2627 */
2628 void tipc_socket_stop(void)
2629 {
2630 sock_unregister(tipc_family_ops.family);
2631 proto_unregister(&tipc_proto);
2632 }
2633
2634 /* Caller should hold socket lock for the passed tipc socket. */
2635 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2636 {
2637 u32 peer_node;
2638 u32 peer_port;
2639 struct nlattr *nest;
2640
2641 peer_node = tsk_peer_node(tsk);
2642 peer_port = tsk_peer_port(tsk);
2643
2644 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2645
2646 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2647 goto msg_full;
2648 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2649 goto msg_full;
2650
2651 if (tsk->conn_type != 0) {
2652 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2653 goto msg_full;
2654 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2655 goto msg_full;
2656 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2657 goto msg_full;
2658 }
2659 nla_nest_end(skb, nest);
2660
2661 return 0;
2662
2663 msg_full:
2664 nla_nest_cancel(skb, nest);
2665
2666 return -EMSGSIZE;
2667 }
2668
2669 /* Caller should hold socket lock for the passed tipc socket. */
2670 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2671 struct tipc_sock *tsk)
2672 {
2673 int err;
2674 void *hdr;
2675 struct nlattr *attrs;
2676
2677 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2678 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2679 if (!hdr)
2680 goto msg_cancel;
2681
2682 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2683 if (!attrs)
2684 goto genlmsg_cancel;
2685 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2686 goto attr_msg_cancel;
2687 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
2688 goto attr_msg_cancel;
2689
2690 if (tsk->connected) {
2691 err = __tipc_nl_add_sk_con(skb, tsk);
2692 if (err)
2693 goto attr_msg_cancel;
2694 } else if (!list_empty(&tsk->publications)) {
2695 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2696 goto attr_msg_cancel;
2697 }
2698 nla_nest_end(skb, attrs);
2699 genlmsg_end(skb, hdr);
2700
2701 return 0;
2702
2703 attr_msg_cancel:
2704 nla_nest_cancel(skb, attrs);
2705 genlmsg_cancel:
2706 genlmsg_cancel(skb, hdr);
2707 msg_cancel:
2708 return -EMSGSIZE;
2709 }
2710
2711 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2712 {
2713 int err;
2714 struct tipc_sock *tsk;
2715 const struct bucket_table *tbl;
2716 struct rhash_head *pos;
2717 u32 prev_portid = cb->args[0];
2718 u32 portid = prev_portid;
2719 int i;
2720
2721 rcu_read_lock();
2722 tbl = rht_dereference_rcu((&tipc_sk_rht)->tbl, &tipc_sk_rht);
2723 for (i = 0; i < tbl->size; i++) {
2724 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2725 spin_lock_bh(&tsk->sk.sk_lock.slock);
2726 portid = tsk->portid;
2727 err = __tipc_nl_add_sk(skb, cb, tsk);
2728 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2729 if (err)
2730 break;
2731
2732 prev_portid = portid;
2733 }
2734 }
2735 rcu_read_unlock();
2736
2737 cb->args[0] = prev_portid;
2738
2739 return skb->len;
2740 }
2741
2742 /* Caller should hold socket lock for the passed tipc socket. */
2743 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2744 struct netlink_callback *cb,
2745 struct publication *publ)
2746 {
2747 void *hdr;
2748 struct nlattr *attrs;
2749
2750 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2751 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2752 if (!hdr)
2753 goto msg_cancel;
2754
2755 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2756 if (!attrs)
2757 goto genlmsg_cancel;
2758
2759 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2760 goto attr_msg_cancel;
2761 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2762 goto attr_msg_cancel;
2763 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2764 goto attr_msg_cancel;
2765 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2766 goto attr_msg_cancel;
2767
2768 nla_nest_end(skb, attrs);
2769 genlmsg_end(skb, hdr);
2770
2771 return 0;
2772
2773 attr_msg_cancel:
2774 nla_nest_cancel(skb, attrs);
2775 genlmsg_cancel:
2776 genlmsg_cancel(skb, hdr);
2777 msg_cancel:
2778 return -EMSGSIZE;
2779 }
2780
2781 /* Caller should hold socket lock for the passed tipc socket. */
2782 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2783 struct netlink_callback *cb,
2784 struct tipc_sock *tsk, u32 *last_publ)
2785 {
2786 int err;
2787 struct publication *p;
2788
2789 if (*last_publ) {
2790 list_for_each_entry(p, &tsk->publications, pport_list) {
2791 if (p->key == *last_publ)
2792 break;
2793 }
2794 if (p->key != *last_publ) {
2795 /* We never set seq or call nl_dump_check_consistent()
2796 * this means that setting prev_seq here will cause the
2797 * consistence check to fail in the netlink callback
2798 * handler. Resulting in the last NLMSG_DONE message
2799 * having the NLM_F_DUMP_INTR flag set.
2800 */
2801 cb->prev_seq = 1;
2802 *last_publ = 0;
2803 return -EPIPE;
2804 }
2805 } else {
2806 p = list_first_entry(&tsk->publications, struct publication,
2807 pport_list);
2808 }
2809
2810 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2811 err = __tipc_nl_add_sk_publ(skb, cb, p);
2812 if (err) {
2813 *last_publ = p->key;
2814 return err;
2815 }
2816 }
2817 *last_publ = 0;
2818
2819 return 0;
2820 }
2821
2822 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2823 {
2824 int err;
2825 u32 tsk_portid = cb->args[0];
2826 u32 last_publ = cb->args[1];
2827 u32 done = cb->args[2];
2828 struct tipc_sock *tsk;
2829
2830 if (!tsk_portid) {
2831 struct nlattr **attrs;
2832 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2833
2834 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2835 if (err)
2836 return err;
2837
2838 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2839 attrs[TIPC_NLA_SOCK],
2840 tipc_nl_sock_policy);
2841 if (err)
2842 return err;
2843
2844 if (!sock[TIPC_NLA_SOCK_REF])
2845 return -EINVAL;
2846
2847 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2848 }
2849
2850 if (done)
2851 return 0;
2852
2853 tsk = tipc_sk_lookup(tsk_portid);
2854 if (!tsk)
2855 return -EINVAL;
2856
2857 lock_sock(&tsk->sk);
2858 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2859 if (!err)
2860 done = 1;
2861 release_sock(&tsk->sk);
2862 sock_put(&tsk->sk);
2863
2864 cb->args[0] = tsk_portid;
2865 cb->args[1] = last_publ;
2866 cb->args[2] = done;
2867
2868 return skb->len;
2869 }
This page took 0.086901 seconds and 6 git commands to generate.