tipc: let broadcast transmission use new link transmit function
[deliverable/linux.git] / net / tipc / bcast.c
CommitLineData
b97bf3fd
PL
1/*
2 * net/tipc/bcast.c: TIPC broadcast code
c4307285 3 *
3c724acd 4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
b97bf3fd 5 * Copyright (c) 2004, Intel Corporation.
2d627b92 6 * Copyright (c) 2005, 2010-2011, Wind River Systems
b97bf3fd
PL
7 * All rights reserved.
8 *
9ea1fd3c 9 * Redistribution and use in source and binary forms, with or without
b97bf3fd
PL
10 * modification, are permitted provided that the following conditions are met:
11 *
9ea1fd3c
PL
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
b97bf3fd 20 *
9ea1fd3c
PL
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
b97bf3fd
PL
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
6beb19a6 38#include <linux/tipc_config.h>
078bec82
JPM
39#include "socket.h"
40#include "msg.h"
b97bf3fd 41#include "bcast.h"
9f6bdcd4 42#include "name_distr.h"
6beb19a6
JPM
43#include "link.h"
44#include "node.h"
b97bf3fd 45
987b58be 46#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
53387c4e
JPM
47#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
48#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
b97bf3fd 49
3aec9cc9 50const char tipc_bclink_name[] = "broadcast-link";
b97bf3fd 51
6beb19a6
JPM
52/**
53 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
54 * @primary: pointer to primary bearer
55 * @secondary: pointer to secondary bearer
56 *
57 * Bearers must have same priority and same set of reachable destinations
58 * to be paired.
59 */
60
61struct tipc_bcbearer_pair {
62 struct tipc_bearer *primary;
63 struct tipc_bearer *secondary;
64};
65
66#define BCBEARER MAX_BEARERS
67
68/**
69 * struct tipc_bcbearer - bearer used by broadcast link
70 * @bearer: (non-standard) broadcast bearer structure
71 * @media: (non-standard) broadcast media structure
72 * @bpairs: array of bearer pairs
73 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
74 * @remains: temporary node map used by tipc_bcbearer_send()
75 * @remains_new: temporary node map used tipc_bcbearer_send()
76 *
77 * Note: The fields labelled "temporary" are incorporated into the bearer
78 * to avoid consuming potentially limited stack space through the use of
79 * large local variables within multicast routines. Concurrent access is
80 * prevented through use of the spinlock "bcast_lock".
81 */
82struct tipc_bcbearer {
83 struct tipc_bearer bearer;
84 struct tipc_media media;
85 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
86 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
87 struct tipc_node_map remains;
88 struct tipc_node_map remains_new;
89};
90
91/**
92 * struct tipc_bc_base - link used for broadcast messages
6beb19a6
JPM
93 * @link: (non-standard) broadcast link structure
94 * @node: (non-standard) node structure representing b'cast link's peer node
95 * @bcast_nodes: map of broadcast-capable nodes
96 * @retransmit_to: node that most recently requested a retransmit
97 *
98 * Handles sequence numbering, fragmentation, bundling, etc.
99 */
100struct tipc_bc_base {
32301906 101 struct tipc_link *link;
6beb19a6
JPM
102 struct tipc_node node;
103 struct sk_buff_head arrvq;
104 struct sk_buff_head inputq;
32301906 105 struct sk_buff_head namedq;
6beb19a6
JPM
106 struct tipc_node_map bcast_nodes;
107 struct tipc_node *retransmit_to;
108};
109
5fd9fd63
JPM
110static struct tipc_bc_base *tipc_bc_base(struct net *net)
111{
112 return tipc_net(net)->bcbase;
113}
114
2f566124
JPM
115static struct tipc_link *tipc_bc_sndlink(struct net *net)
116{
117 return tipc_net(net)->bcl;
118}
119
6beb19a6
JPM
120/**
121 * tipc_nmap_equal - test for equality of node maps
122 */
123static int tipc_nmap_equal(struct tipc_node_map *nm_a,
124 struct tipc_node_map *nm_b)
125{
126 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
127}
128
2f566124 129static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq);
31e3c3f6 130static void tipc_nmap_diff(struct tipc_node_map *nm_a,
131 struct tipc_node_map *nm_b,
132 struct tipc_node_map *nm_diff);
28dd9418
YX
133static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
134static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
1da46568 135static void tipc_bclink_lock(struct net *net)
d69afc90 136{
0043550b 137 tipc_bcast_lock(net);
d69afc90
YX
138}
139
1da46568 140static void tipc_bclink_unlock(struct net *net)
d69afc90 141{
0043550b 142 tipc_bcast_unlock(net);
3f5a12bd
YX
143}
144
cb1b7280
JPM
145void tipc_bclink_input(struct net *net)
146{
147 struct tipc_net *tn = net_generic(net, tipc_net_id);
148
6beb19a6 149 tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
cb1b7280
JPM
150}
151
6beb19a6 152uint tipc_bcast_get_mtu(void)
078bec82
JPM
153{
154 return MAX_PKT_DEFAULT_MCAST;
155}
156
2f566124 157static u16 bcbuf_acks(struct sk_buff *skb)
b97bf3fd 158{
2f566124 159 return TIPC_SKB_CB(skb)->ackers;
b97bf3fd
PL
160}
161
2f566124 162static void bcbuf_set_acks(struct sk_buff *buf, u16 ackers)
b97bf3fd 163{
2f566124 164 TIPC_SKB_CB(buf)->ackers = ackers;
b97bf3fd
PL
165}
166
05790c64 167static void bcbuf_decr_acks(struct sk_buff *buf)
b97bf3fd
PL
168{
169 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
170}
171
1da46568 172void tipc_bclink_add_node(struct net *net, u32 addr)
cd3decdf 173{
1da46568 174 struct tipc_net *tn = net_generic(net, tipc_net_id);
2f566124 175 struct tipc_link *l = tipc_bc_sndlink(net);
1da46568 176 tipc_bclink_lock(net);
6beb19a6 177 tipc_nmap_add(&tn->bcbase->bcast_nodes, addr);
2f566124 178 tipc_link_add_bc_peer(l);
1da46568 179 tipc_bclink_unlock(net);
cd3decdf
AS
180}
181
1da46568 182void tipc_bclink_remove_node(struct net *net, u32 addr)
cd3decdf 183{
1da46568
YX
184 struct tipc_net *tn = net_generic(net, tipc_net_id);
185
186 tipc_bclink_lock(net);
6beb19a6 187 tipc_nmap_remove(&tn->bcbase->bcast_nodes, addr);
2f566124 188 tn->bcl->ackers--;
7d967b67
JPM
189
190 /* Last node? => reset backlog queue */
6beb19a6 191 if (!tn->bcbase->bcast_nodes.count)
32301906 192 tipc_link_purge_backlog(tn->bcbase->link);
7d967b67 193
1da46568 194 tipc_bclink_unlock(net);
cd3decdf 195}
b97bf3fd 196
1da46568 197static void bclink_set_last_sent(struct net *net)
5b1f7bde 198{
1da46568
YX
199 struct tipc_net *tn = net_generic(net, tipc_net_id);
200 struct tipc_link *bcl = tn->bcl;
201
dd3f9e70 202 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
5b1f7bde
AS
203}
204
1da46568 205u32 tipc_bclink_get_last_sent(struct net *net)
5b1f7bde 206{
1da46568
YX
207 struct tipc_net *tn = net_generic(net, tipc_net_id);
208
a97b9d3f 209 return tn->bcl->silent_intv_cnt;
5b1f7bde
AS
210}
211
7a54d4a9 212static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
b97bf3fd 213{
7a54d4a9
AS
214 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
215 seqno : node->bclink.last_sent;
b97bf3fd
PL
216}
217
2c53040f 218/**
01d83edd
AS
219 * tipc_bclink_retransmit_to - get most recent node to request retransmission
220 *
d69afc90 221 * Called with bclink_lock locked
01d83edd 222 */
1da46568 223struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
01d83edd 224{
1da46568
YX
225 struct tipc_net *tn = net_generic(net, tipc_net_id);
226
6beb19a6 227 return tn->bcbase->retransmit_to;
01d83edd
AS
228}
229
c4307285 230/**
b97bf3fd
PL
231 * bclink_retransmit_pkt - retransmit broadcast packets
232 * @after: sequence number of last packet to *not* retransmit
233 * @to: sequence number of last packet to retransmit
c4307285 234 *
d69afc90 235 * Called with bclink_lock locked
b97bf3fd 236 */
1da46568 237static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
b97bf3fd 238{
58dc55f2 239 struct sk_buff *skb;
1da46568 240 struct tipc_link *bcl = tn->bcl;
b97bf3fd 241
05dcc5aa 242 skb_queue_walk(&bcl->transmq, skb) {
703068ee
JM
243 if (more(buf_seqno(skb), after)) {
244 tipc_link_retransmit(bcl, skb, mod(to - after));
58dc55f2 245 break;
703068ee 246 }
58dc55f2 247 }
b97bf3fd
PL
248}
249
7845989c
KD
250/**
251 * bclink_prepare_wakeup - prepare users for wakeup after congestion
252 * @bcl: broadcast link
253 * @resultq: queue for users which can be woken up
254 * Move a number of waiting users, as permitted by available space in
255 * the send queue, from link wait queue to specified queue for wakeup
256 */
257static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
258{
259 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
260 int imp, lim;
261 struct sk_buff *skb, *tmp;
262
263 skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
264 imp = TIPC_SKB_CB(skb)->chain_imp;
265 lim = bcl->window + bcl->backlog[imp].limit;
266 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
267 if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
268 continue;
269 skb_unlink(skb, &bcl->wakeupq);
270 skb_queue_tail(resultq, skb);
271 }
272}
273
908344cd
JM
274/**
275 * tipc_bclink_wakeup_users - wake up pending users
276 *
277 * Called with no locks taken
278 */
f2f9800d 279void tipc_bclink_wakeup_users(struct net *net)
908344cd 280{
1da46568 281 struct tipc_net *tn = net_generic(net, tipc_net_id);
7845989c
KD
282 struct tipc_link *bcl = tn->bcl;
283 struct sk_buff_head resultq;
4988bb4a 284
7845989c
KD
285 skb_queue_head_init(&resultq);
286 bclink_prepare_wakeup(bcl, &resultq);
287 tipc_sk_rcv(net, &resultq);
908344cd
JM
288}
289
c4307285 290/**
4323add6 291 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
b97bf3fd
PL
292 * @n_ptr: node that sent acknowledgement info
293 * @acked: broadcast sequence # that has been acknowledged
c4307285 294 *
d69afc90 295 * Node is locked, bclink_lock unlocked.
b97bf3fd 296 */
6c00055a 297void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
b97bf3fd 298{
58dc55f2 299 struct sk_buff *skb, *tmp;
b97bf3fd 300 unsigned int released = 0;
1da46568
YX
301 struct net *net = n_ptr->net;
302 struct tipc_net *tn = net_generic(net, tipc_net_id);
b97bf3fd 303
2cdf3918
JPM
304 if (unlikely(!n_ptr->bclink.recv_permitted))
305 return;
1da46568 306 tipc_bclink_lock(net);
2cdf3918 307
36559591 308 /* Bail out if tx queue is empty (no clean up is required) */
05dcc5aa 309 skb = skb_peek(&tn->bcl->transmq);
58dc55f2 310 if (!skb)
36559591
AS
311 goto exit;
312
313 /* Determine which messages need to be acknowledged */
314 if (acked == INVALID_LINK_SEQ) {
315 /*
316 * Contact with specified node has been lost, so need to
317 * acknowledge sent messages only (if other nodes still exist)
318 * or both sent and unsent messages (otherwise)
319 */
6beb19a6 320 if (tn->bcbase->bcast_nodes.count)
a97b9d3f 321 acked = tn->bcl->silent_intv_cnt;
36559591 322 else
a97b9d3f 323 acked = tn->bcl->snd_nxt;
36559591
AS
324 } else {
325 /*
326 * Bail out if specified sequence number does not correspond
327 * to a message that has been sent and not yet acknowledged
328 */
58dc55f2 329 if (less(acked, buf_seqno(skb)) ||
a97b9d3f 330 less(tn->bcl->silent_intv_cnt, acked) ||
36559591
AS
331 less_eq(acked, n_ptr->bclink.acked))
332 goto exit;
333 }
36559591 334 /* Skip over packets that node has previously acknowledged */
05dcc5aa 335 skb_queue_walk(&tn->bcl->transmq, skb) {
58dc55f2
YX
336 if (more(buf_seqno(skb), n_ptr->bclink.acked))
337 break;
338 }
b97bf3fd 339 /* Update packets that node is now acknowledging */
05dcc5aa 340 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
58dc55f2
YX
341 if (more(buf_seqno(skb), acked))
342 break;
05dcc5aa
JPM
343 bcbuf_decr_acks(skb);
344 bclink_set_last_sent(net);
58dc55f2 345 if (bcbuf_acks(skb) == 0) {
05dcc5aa 346 __skb_unlink(skb, &tn->bcl->transmq);
58dc55f2 347 kfree_skb(skb);
b97bf3fd
PL
348 released = 1;
349 }
b97bf3fd
PL
350 }
351 n_ptr->bclink.acked = acked;
352
353 /* Try resolving broadcast link congestion, if necessary */
05dcc5aa 354 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
1da46568
YX
355 tipc_link_push_packets(tn->bcl);
356 bclink_set_last_sent(net);
5b1f7bde 357 }
c637c103 358 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
908344cd 359 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
36559591 360exit:
1da46568 361 tipc_bclink_unlock(net);
b97bf3fd
PL
362}
363
2c53040f 364/**
7a54d4a9 365 * tipc_bclink_update_link_state - update broadcast link state
c4307285 366 *
7216cd94 367 * RCU and node lock set
b97bf3fd 368 */
c5898636 369void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
c93d3baa 370 u32 last_sent)
b97bf3fd 371{
7a54d4a9 372 struct sk_buff *buf;
c5898636 373 struct net *net = n_ptr->net;
c93d3baa 374 struct tipc_net *tn = net_generic(net, tipc_net_id);
2f566124 375 struct tipc_link *bcl = tn->bcl;
b97bf3fd 376
7a54d4a9 377 /* Ignore "stale" link state info */
7a54d4a9
AS
378 if (less_eq(last_sent, n_ptr->bclink.last_in))
379 return;
b97bf3fd 380
7a54d4a9 381 /* Update link synchronization state; quit if in sync */
7a54d4a9
AS
382 bclink_update_last_sent(n_ptr, last_sent);
383
2f566124
JPM
384 /* This is a good location for statistical profiling */
385 bcl->stats.queue_sz_counts++;
386 bcl->stats.accu_queue_sz += skb_queue_len(&bcl->transmq);
387
7a54d4a9
AS
388 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
389 return;
390
391 /* Update out-of-sync state; quit if loss is still unconfirmed */
7a54d4a9
AS
392 if ((++n_ptr->bclink.oos_state) == 1) {
393 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
394 return;
395 n_ptr->bclink.oos_state++;
396 }
397
398 /* Don't NACK if one has been recently sent (or seen) */
7a54d4a9 399 if (n_ptr->bclink.oos_state & 0x1)
b97bf3fd
PL
400 return;
401
7a54d4a9 402 /* Send NACK */
31e3c3f6 403 buf = tipc_buf_acquire(INT_H_SIZE);
b97bf3fd 404 if (buf) {
7a54d4a9 405 struct tipc_msg *msg = buf_msg(buf);
05dcc5aa 406 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
bc6fecd4 407 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
7a54d4a9 408
c5898636 409 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
7a54d4a9 410 INT_H_SIZE, n_ptr->addr);
bf781ecf 411 msg_set_non_seq(msg, 1);
c93d3baa 412 msg_set_mc_netid(msg, tn->net_id);
7a54d4a9
AS
413 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
414 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
bc6fecd4 415 msg_set_bcgap_to(msg, to);
b97bf3fd 416
1da46568 417 tipc_bclink_lock(net);
7f9f95d9 418 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
1da46568
YX
419 tn->bcl->stats.sent_nacks++;
420 tipc_bclink_unlock(net);
5f6d9123 421 kfree_skb(buf);
b97bf3fd 422
7a54d4a9 423 n_ptr->bclink.oos_state++;
b97bf3fd
PL
424 }
425}
426
d999297c
JPM
427void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
428{
429 u16 last = msg_last_bcast(hdr);
430 int mtyp = msg_type(hdr);
431
432 if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
433 return;
434 if (mtyp == STATE_MSG) {
435 tipc_bclink_update_link_state(n, last);
436 return;
437 }
438 /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
439 * and transfer synch info in LINK_PROTOCOL messages.
440 */
441 if (tipc_node_is_up(n))
442 return;
443 if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
444 return;
445 n->bclink.last_sent = last;
446 n->bclink.last_in = last;
447 n->bclink.oos_state = 0;
448}
449
2c53040f 450/**
7a54d4a9 451 * bclink_peek_nack - monitor retransmission requests sent by other nodes
b97bf3fd 452 *
7a54d4a9
AS
453 * Delay any upcoming NACK by this node if another node has already
454 * requested the first message this node is going to ask for.
b97bf3fd 455 */
f2f9800d 456static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
b97bf3fd 457{
f2f9800d 458 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
b97bf3fd 459
7a54d4a9 460 if (unlikely(!n_ptr))
b97bf3fd 461 return;
7a54d4a9 462
4323add6 463 tipc_node_lock(n_ptr);
389dd9bc 464 if (n_ptr->bclink.recv_permitted &&
7a54d4a9
AS
465 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
466 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
467 n_ptr->bclink.oos_state = 2;
4323add6 468 tipc_node_unlock(n_ptr);
8a0f6ebe 469 tipc_node_put(n_ptr);
b97bf3fd
PL
470}
471
6beb19a6 472/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
9fbfb8b1 473 * and to identified node local sockets
f2f9800d 474 * @net: the applicable net namespace
a6ca1094 475 * @list: chain of buffers containing message
078bec82
JPM
476 * Consumes the buffer chain, except when returning -ELINKCONG
477 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
478 */
6beb19a6 479int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
078bec82 480{
2f566124
JPM
481 struct tipc_link *l = tipc_bc_sndlink(net);
482 struct sk_buff_head xmitq, inputq, rcvq;
078bec82 483 int rc = 0;
078bec82 484
2f566124
JPM
485 __skb_queue_head_init(&rcvq);
486 __skb_queue_head_init(&xmitq);
487 skb_queue_head_init(&inputq);
22d85c79 488
2f566124
JPM
489 /* Prepare message clone for local node */
490 if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
491 return -EHOSTUNREACH;
078bec82 492
2f566124
JPM
493 tipc_bcast_lock(net);
494 if (tipc_link_bc_peers(l))
495 rc = tipc_link_xmit(l, list, &xmitq);
496 bclink_set_last_sent(net);
497 tipc_bcast_unlock(net);
078bec82 498
2f566124 499 /* Don't send to local node if adding to link failed */
cb1b7280 500 if (unlikely(rc)) {
2f566124 501 __skb_queue_purge(&rcvq);
cb1b7280
JPM
502 return rc;
503 }
2f566124
JPM
504 /* Broadcast to all nodes, inluding local node */
505 tipc_bcbearer_xmit(net, &xmitq);
506 tipc_sk_mcast_rcv(net, &rcvq, &inputq);
507 __skb_queue_purge(list);
508 return 0;
078bec82 509}
2c53040f 510/**
63e7f1ac
AS
511 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
512 *
d69afc90 513 * Called with both sending node's lock and bclink_lock taken.
63e7f1ac 514 */
63e7f1ac
AS
515static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
516{
1da46568
YX
517 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
518
63e7f1ac
AS
519 bclink_update_last_sent(node, seqno);
520 node->bclink.last_in = seqno;
521 node->bclink.oos_state = 0;
1da46568 522 tn->bcl->stats.recv_info++;
63e7f1ac
AS
523
524 /*
525 * Unicast an ACK periodically, ensuring that
526 * all nodes in the cluster don't ACK at the same time
527 */
34747539 528 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
9d13ec65 529 tipc_link_proto_xmit(node_active_link(node, node->addr),
ed193ece 530 STATE_MSG, 0, 0, 0, 0);
1da46568 531 tn->bcl->stats.sent_acks++;
63e7f1ac
AS
532 }
533}
534
2c53040f 535/**
247f0f3c 536 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
c4307285 537 *
7216cd94 538 * RCU is locked, no other locks set
b97bf3fd 539 */
c93d3baa 540void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
d356eeba 541{
c93d3baa 542 struct tipc_net *tn = net_generic(net, tipc_net_id);
1da46568 543 struct tipc_link *bcl = tn->bcl;
b97bf3fd 544 struct tipc_msg *msg = buf_msg(buf);
5d3c488d 545 struct tipc_node *node;
b97bf3fd
PL
546 u32 next_in;
547 u32 seqno;
0abd8ff2 548 int deferred = 0;
c637c103
JPM
549 int pos = 0;
550 struct sk_buff *iskb;
cb1b7280 551 struct sk_buff_head *arrvq, *inputq;
b97bf3fd 552
5d3c488d 553 /* Screen out unwanted broadcast messages */
c93d3baa 554 if (msg_mc_netid(msg) != tn->net_id)
5d3c488d
AS
555 goto exit;
556
f2f9800d 557 node = tipc_node_find(net, msg_prevnode(msg));
5d3c488d
AS
558 if (unlikely(!node))
559 goto exit;
5d3c488d 560 tipc_node_lock(node);
389dd9bc 561 if (unlikely(!node->bclink.recv_permitted))
5d3c488d 562 goto unlock;
b97bf3fd 563
8a275a6a 564 /* Handle broadcast protocol message */
b97bf3fd 565 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
9f6bdcd4
AS
566 if (msg_type(msg) != STATE_MSG)
567 goto unlock;
34747539 568 if (msg_destnode(msg) == tn->own_addr) {
4323add6 569 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
1da46568 570 tipc_bclink_lock(net);
b97bf3fd 571 bcl->stats.recv_nacks++;
6beb19a6 572 tn->bcbase->retransmit_to = node;
1da46568 573 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
b97bf3fd 574 msg_bcgap_to(msg));
1da46568 575 tipc_bclink_unlock(net);
b952b2be 576 tipc_node_unlock(node);
b97bf3fd 577 } else {
5d3c488d 578 tipc_node_unlock(node);
f2f9800d 579 bclink_peek_nack(net, msg);
b97bf3fd 580 }
8a0f6ebe 581 tipc_node_put(node);
5d3c488d 582 goto exit;
b97bf3fd 583 }
5d3c488d 584 /* Handle in-sequence broadcast message */
b97bf3fd 585 seqno = msg_seqno(msg);
8a275a6a 586 next_in = mod(node->bclink.last_in + 1);
6beb19a6
JPM
587 arrvq = &tn->bcbase->arrvq;
588 inputq = &tn->bcbase->inputq;
b97bf3fd
PL
589
590 if (likely(seqno == next_in)) {
8a275a6a 591receive:
7a54d4a9 592 /* Deliver message to destination */
b97bf3fd 593 if (likely(msg_isdata(msg))) {
1da46568 594 tipc_bclink_lock(net);
63e7f1ac 595 bclink_accept_pkt(node, seqno);
cb1b7280
JPM
596 spin_lock_bh(&inputq->lock);
597 __skb_queue_tail(arrvq, buf);
598 spin_unlock_bh(&inputq->lock);
599 node->action_flags |= TIPC_BCAST_MSG_EVT;
1da46568 600 tipc_bclink_unlock(net);
4323add6 601 tipc_node_unlock(node);
b97bf3fd 602 } else if (msg_user(msg) == MSG_BUNDLER) {
1da46568 603 tipc_bclink_lock(net);
63e7f1ac 604 bclink_accept_pkt(node, seqno);
b97bf3fd
PL
605 bcl->stats.recv_bundles++;
606 bcl->stats.recv_bundled += msg_msgcnt(msg);
cb1b7280
JPM
607 pos = 0;
608 while (tipc_msg_extract(buf, &iskb, &pos)) {
609 spin_lock_bh(&inputq->lock);
610 __skb_queue_tail(arrvq, iskb);
611 spin_unlock_bh(&inputq->lock);
612 }
613 node->action_flags |= TIPC_BCAST_MSG_EVT;
1da46568 614 tipc_bclink_unlock(net);
4323add6 615 tipc_node_unlock(node);
b97bf3fd 616 } else if (msg_user(msg) == MSG_FRAGMENTER) {
1da46568 617 tipc_bclink_lock(net);
63e7f1ac 618 bclink_accept_pkt(node, seqno);
bc14b8d6
YX
619 tipc_buf_append(&node->bclink.reasm_buf, &buf);
620 if (unlikely(!buf && !node->bclink.reasm_buf)) {
621 tipc_bclink_unlock(net);
622 goto unlock;
623 }
b97bf3fd 624 bcl->stats.recv_fragments++;
37e22164 625 if (buf) {
b97bf3fd 626 bcl->stats.recv_fragmented++;
40ba3cdf 627 msg = buf_msg(buf);
1da46568 628 tipc_bclink_unlock(net);
528f6f4b
EH
629 goto receive;
630 }
1da46568 631 tipc_bclink_unlock(net);
4323add6 632 tipc_node_unlock(node);
b97bf3fd 633 } else {
1da46568 634 tipc_bclink_lock(net);
63e7f1ac 635 bclink_accept_pkt(node, seqno);
1da46568 636 tipc_bclink_unlock(net);
4323add6 637 tipc_node_unlock(node);
5f6d9123 638 kfree_skb(buf);
b97bf3fd 639 }
5d3c488d 640 buf = NULL;
8a275a6a
AS
641
642 /* Determine new synchronization state */
5d3c488d 643 tipc_node_lock(node);
8a275a6a
AS
644 if (unlikely(!tipc_node_is_up(node)))
645 goto unlock;
646
7a54d4a9 647 if (node->bclink.last_in == node->bclink.last_sent)
8a275a6a
AS
648 goto unlock;
649
05dcc5aa 650 if (skb_queue_empty(&node->bclink.deferdq)) {
7a54d4a9
AS
651 node->bclink.oos_state = 1;
652 goto unlock;
653 }
654
05dcc5aa 655 msg = buf_msg(skb_peek(&node->bclink.deferdq));
8a275a6a
AS
656 seqno = msg_seqno(msg);
657 next_in = mod(next_in + 1);
658 if (seqno != next_in)
659 goto unlock;
660
661 /* Take in-sequence message from deferred queue & deliver it */
05dcc5aa 662 buf = __skb_dequeue(&node->bclink.deferdq);
8a275a6a
AS
663 goto receive;
664 }
665
666 /* Handle out-of-sequence broadcast message */
8a275a6a 667 if (less(next_in, seqno)) {
05dcc5aa 668 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
8a275a6a 669 buf);
7a54d4a9 670 bclink_update_last_sent(node, seqno);
5d3c488d 671 buf = NULL;
0abd8ff2 672 }
8a275a6a 673
1da46568 674 tipc_bclink_lock(net);
b98158e3 675
8a275a6a
AS
676 if (deferred)
677 bcl->stats.deferred_recv++;
0232c5a5
AS
678 else
679 bcl->stats.duplicates++;
8a275a6a 680
1da46568 681 tipc_bclink_unlock(net);
b98158e3 682
5d3c488d 683unlock:
4323add6 684 tipc_node_unlock(node);
8a0f6ebe 685 tipc_node_put(node);
5d3c488d 686exit:
5f6d9123 687 kfree_skb(buf);
b97bf3fd
PL
688}
689
6c00055a 690u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
b97bf3fd 691{
389dd9bc 692 return (n_ptr->bclink.recv_permitted &&
1da46568 693 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
b97bf3fd
PL
694}
695
696
697/**
4323add6 698 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
c4307285 699 *
2ff9f924
AS
700 * Send packet over as many bearers as necessary to reach all nodes
701 * that have joined the broadcast link.
c4307285 702 *
2ff9f924
AS
703 * Returns 0 (packet sent successfully) under all circumstances,
704 * since the broadcast link's pseudo-bearer never blocks
b97bf3fd 705 */
1da46568
YX
706static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
707 struct tipc_bearer *unused1,
988f088a 708 struct tipc_media_addr *unused2)
b97bf3fd 709{
b97bf3fd 710 int bp_index;
6f92ee54 711 struct tipc_msg *msg = buf_msg(buf);
c93d3baa 712 struct tipc_net *tn = net_generic(net, tipc_net_id);
1da46568 713 struct tipc_bcbearer *bcbearer = tn->bcbearer;
6beb19a6 714 struct tipc_bc_base *bclink = tn->bcbase;
b97bf3fd 715
e6160710 716 /* Prepare broadcast link message for reliable transmission,
2ff9f924
AS
717 * if first time trying to send it;
718 * preparation is skipped for broadcast link protocol messages
719 * since they are sent in an unreliable manner and don't need it
720 */
b97bf3fd 721 if (likely(!msg_non_seq(buf_msg(buf)))) {
cd3decdf 722 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
40aecb1b 723 msg_set_non_seq(msg, 1);
c93d3baa 724 msg_set_mc_netid(msg, tn->net_id);
1da46568 725 tn->bcl->stats.sent_info++;
cd3decdf 726 if (WARN_ON(!bclink->bcast_nodes.count)) {
5e726900
AS
727 dump_stack();
728 return 0;
729 }
b97bf3fd
PL
730 }
731
b97bf3fd 732 /* Send buffer over bearers until all targets reached */
cd3decdf 733 bcbearer->remains = bclink->bcast_nodes;
b97bf3fd
PL
734
735 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
2d627b92
AS
736 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
737 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
6f92ee54
JPM
738 struct tipc_bearer *bp[2] = {p, s};
739 struct tipc_bearer *b = bp[msg_link_selector(msg)];
488fc9af 740 struct sk_buff *tbuf;
b97bf3fd
PL
741
742 if (!p)
e6160710 743 break; /* No more bearers to try */
6f92ee54
JPM
744 if (!b)
745 b = p;
77861d9c 746 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
e6160710 747 &bcbearer->remains_new);
65f51ef0 748 if (bcbearer->remains_new.count == bcbearer->remains.count)
e6160710 749 continue; /* Nothing added by bearer pair */
b97bf3fd 750
488fc9af
GF
751 if (bp_index == 0) {
752 /* Use original buffer for first bearer */
7f9f95d9 753 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
488fc9af
GF
754 } else {
755 /* Avoid concurrent buffer access */
bad93e9d 756 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
488fc9af
GF
757 if (!tbuf)
758 break;
7f9f95d9
YX
759 tipc_bearer_send(net, b->identity, tbuf,
760 &b->bcast_addr);
488fc9af
GF
761 kfree_skb(tbuf); /* Bearer keeps a clone */
762 }
65f51ef0 763 if (bcbearer->remains_new.count == 0)
e6160710 764 break; /* All targets reached */
b97bf3fd 765
65f51ef0 766 bcbearer->remains = bcbearer->remains_new;
b97bf3fd 767 }
c4307285 768
2ff9f924 769 return 0;
b97bf3fd
PL
770}
771
2f566124
JPM
772static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq)
773{
774 struct sk_buff *skb, *tmp;
775
776 skb_queue_walk_safe(xmitq, skb, tmp) {
777 __skb_dequeue(xmitq);
778 tipc_bcbearer_send(net, skb, NULL, NULL);
779
780 /* Until we remove cloning in tipc_l2_send_msg(): */
781 kfree_skb(skb);
782 }
783}
784
b97bf3fd 785/**
4323add6 786 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
b97bf3fd 787 */
7f9f95d9
YX
788void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
789 u32 node, bool action)
b97bf3fd 790{
7f9f95d9 791 struct tipc_net *tn = net_generic(net, tipc_net_id);
1da46568 792 struct tipc_bcbearer *bcbearer = tn->bcbearer;
7f9ab6ac
PG
793 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
794 struct tipc_bcbearer_pair *bp_curr;
f8322dfc 795 struct tipc_bearer *b;
b97bf3fd
PL
796 int b_index;
797 int pri;
798
1da46568 799 tipc_bclink_lock(net);
b97bf3fd 800
28dd9418
YX
801 if (action)
802 tipc_nmap_add(nm_ptr, node);
803 else
804 tipc_nmap_remove(nm_ptr, node);
805
b97bf3fd 806 /* Group bearers by priority (can assume max of two per priority) */
b97bf3fd
PL
807 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
808
f8322dfc 809 rcu_read_lock();
b97bf3fd 810 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
7f9f95d9 811 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
f47de12b 812 if (!b || !b->nodes.count)
b97bf3fd
PL
813 continue;
814
815 if (!bp_temp[b->priority].primary)
816 bp_temp[b->priority].primary = b;
817 else
818 bp_temp[b->priority].secondary = b;
819 }
f8322dfc 820 rcu_read_unlock();
b97bf3fd
PL
821
822 /* Create array of bearer pairs for broadcasting */
b97bf3fd
PL
823 bp_curr = bcbearer->bpairs;
824 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
825
16cb4b33 826 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
b97bf3fd
PL
827
828 if (!bp_temp[pri].primary)
829 continue;
830
831 bp_curr->primary = bp_temp[pri].primary;
832
833 if (bp_temp[pri].secondary) {
4323add6
PL
834 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
835 &bp_temp[pri].secondary->nodes)) {
b97bf3fd
PL
836 bp_curr->secondary = bp_temp[pri].secondary;
837 } else {
838 bp_curr++;
839 bp_curr->primary = bp_temp[pri].secondary;
840 }
841 }
842
843 bp_curr++;
844 }
845
1da46568 846 tipc_bclink_unlock(net);
b97bf3fd
PL
847}
848
d8182804
RA
849static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
850 struct tipc_stats *stats)
7be57fc6
RA
851{
852 int i;
853 struct nlattr *nest;
854
855 struct nla_map {
856 __u32 key;
857 __u32 val;
858 };
859
860 struct nla_map map[] = {
861 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
862 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
863 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
864 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
865 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
866 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
867 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
868 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
869 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
870 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
871 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
872 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
873 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
874 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
875 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
876 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
877 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
878 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
879 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
880 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
881 };
882
883 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
884 if (!nest)
885 return -EMSGSIZE;
886
887 for (i = 0; i < ARRAY_SIZE(map); i++)
888 if (nla_put_u32(skb, map[i].key, map[i].val))
889 goto msg_full;
890
891 nla_nest_end(skb, nest);
892
893 return 0;
894msg_full:
895 nla_nest_cancel(skb, nest);
896
897 return -EMSGSIZE;
898}
899
1da46568 900int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
7be57fc6
RA
901{
902 int err;
903 void *hdr;
904 struct nlattr *attrs;
905 struct nlattr *prop;
1da46568
YX
906 struct tipc_net *tn = net_generic(net, tipc_net_id);
907 struct tipc_link *bcl = tn->bcl;
7be57fc6
RA
908
909 if (!bcl)
910 return 0;
911
1da46568 912 tipc_bclink_lock(net);
7be57fc6 913
bfb3e5dd 914 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
7be57fc6
RA
915 NLM_F_MULTI, TIPC_NL_LINK_GET);
916 if (!hdr)
917 return -EMSGSIZE;
918
919 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
920 if (!attrs)
921 goto msg_full;
922
923 /* The broadcast link is always up */
924 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
925 goto attr_msg_full;
926
927 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
928 goto attr_msg_full;
929 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
930 goto attr_msg_full;
a97b9d3f 931 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
7be57fc6 932 goto attr_msg_full;
a97b9d3f 933 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
7be57fc6
RA
934 goto attr_msg_full;
935
936 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
937 if (!prop)
938 goto attr_msg_full;
1f66d161 939 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
7be57fc6
RA
940 goto prop_msg_full;
941 nla_nest_end(msg->skb, prop);
942
943 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
944 if (err)
945 goto attr_msg_full;
946
1da46568 947 tipc_bclink_unlock(net);
7be57fc6
RA
948 nla_nest_end(msg->skb, attrs);
949 genlmsg_end(msg->skb, hdr);
950
951 return 0;
952
953prop_msg_full:
954 nla_nest_cancel(msg->skb, prop);
955attr_msg_full:
956 nla_nest_cancel(msg->skb, attrs);
957msg_full:
1da46568 958 tipc_bclink_unlock(net);
7be57fc6
RA
959 genlmsg_cancel(msg->skb, hdr);
960
961 return -EMSGSIZE;
962}
b97bf3fd 963
1da46568 964int tipc_bclink_reset_stats(struct net *net)
b97bf3fd 965{
1da46568
YX
966 struct tipc_net *tn = net_generic(net, tipc_net_id);
967 struct tipc_link *bcl = tn->bcl;
968
b97bf3fd
PL
969 if (!bcl)
970 return -ENOPROTOOPT;
971
1da46568 972 tipc_bclink_lock(net);
b97bf3fd 973 memset(&bcl->stats, 0, sizeof(bcl->stats));
1da46568 974 tipc_bclink_unlock(net);
0e35fd5e 975 return 0;
b97bf3fd
PL
976}
977
1da46568 978int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
b97bf3fd 979{
1da46568
YX
980 struct tipc_net *tn = net_generic(net, tipc_net_id);
981 struct tipc_link *bcl = tn->bcl;
982
b97bf3fd
PL
983 if (!bcl)
984 return -ENOPROTOOPT;
53387c4e
JPM
985 if (limit < BCLINK_WIN_MIN)
986 limit = BCLINK_WIN_MIN;
987 if (limit > TIPC_MAX_LINK_WIN)
b97bf3fd 988 return -EINVAL;
1da46568 989 tipc_bclink_lock(net);
4323add6 990 tipc_link_set_queue_limits(bcl, limit);
1da46568 991 tipc_bclink_unlock(net);
0e35fd5e 992 return 0;
b97bf3fd
PL
993}
994
670f4f88
RA
995int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
996{
997 int err;
998 u32 win;
999 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1000
1001 if (!attrs[TIPC_NLA_LINK_PROP])
1002 return -EINVAL;
1003
1004 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
1005 if (err)
1006 return err;
1007
1008 if (!props[TIPC_NLA_PROP_WIN])
1009 return -EOPNOTSUPP;
1010
1011 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1012
1013 return tipc_bclink_set_queue_limits(net, win);
1014}
1015
6beb19a6 1016int tipc_bcast_init(struct net *net)
b97bf3fd 1017{
32301906
JPM
1018 struct tipc_net *tn = tipc_net(net);
1019 struct tipc_bcbearer *bcb = NULL;
1020 struct tipc_bc_base *bb = NULL;
1021 struct tipc_link *l = NULL;
1022
1023 bcb = kzalloc(sizeof(*bcb), GFP_ATOMIC);
1024 if (!bcb)
1025 goto enomem;
1026 tn->bcbearer = bcb;
1027
1028 bcb->bearer.window = BCLINK_WIN_DEFAULT;
1029 bcb->bearer.mtu = MAX_PKT_DEFAULT_MCAST;
1030 bcb->bearer.identity = MAX_BEARERS;
1031
1032 bcb->bearer.media = &bcb->media;
1033 bcb->media.send_msg = tipc_bcbearer_send;
1034 sprintf(bcb->media.name, "tipc-broadcast");
1035 strcpy(bcb->bearer.name, bcb->media.name);
1036
1037 bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
1038 if (!bb)
1039 goto enomem;
1040 tn->bcbase = bb;
1041 __skb_queue_head_init(&bb->arrvq);
0043550b 1042 spin_lock_init(&tipc_net(net)->bclock);
32301906
JPM
1043 bb->node.net = net;
1044
1045 if (!tipc_link_bc_create(&bb->node,
1046 MAX_PKT_DEFAULT_MCAST,
1047 BCLINK_WIN_DEFAULT,
1048 &bb->inputq,
1049 &bb->namedq,
1050 &l))
1051 goto enomem;
1052 bb->link = l;
1053 tn->bcl = l;
1054 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcb->bearer);
eb8b00f5 1055 return 0;
32301906
JPM
1056enomem:
1057 kfree(bcb);
1058 kfree(bb);
1059 kfree(l);
1060 return -ENOMEM;
b97bf3fd
PL
1061}
1062
5fd9fd63
JPM
1063void tipc_bcast_reinit(struct net *net)
1064{
1065 struct tipc_bc_base *b = tipc_bc_base(net);
1066
32301906 1067 msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
5fd9fd63
JPM
1068}
1069
6beb19a6 1070void tipc_bcast_stop(struct net *net)
b97bf3fd 1071{
7f9f95d9
YX
1072 struct tipc_net *tn = net_generic(net, tipc_net_id);
1073
1da46568
YX
1074 tipc_bclink_lock(net);
1075 tipc_link_purge_queues(tn->bcl);
1076 tipc_bclink_unlock(net);
7f9f95d9 1077 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
eb8b00f5 1078 synchronize_net();
1da46568 1079 kfree(tn->bcbearer);
6beb19a6 1080 kfree(tn->bcbase);
32301906 1081 kfree(tn->bcl);
b97bf3fd
PL
1082}
1083
3e22e62b
AS
1084/**
1085 * tipc_nmap_add - add a node to a node map
1086 */
28dd9418 1087static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
3e22e62b
AS
1088{
1089 int n = tipc_node(node);
1090 int w = n / WSIZE;
1091 u32 mask = (1 << (n % WSIZE));
1092
1093 if ((nm_ptr->map[w] & mask) == 0) {
1094 nm_ptr->count++;
1095 nm_ptr->map[w] |= mask;
1096 }
1097}
1098
1099/**
1100 * tipc_nmap_remove - remove a node from a node map
1101 */
28dd9418 1102static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
3e22e62b
AS
1103{
1104 int n = tipc_node(node);
1105 int w = n / WSIZE;
1106 u32 mask = (1 << (n % WSIZE));
1107
1108 if ((nm_ptr->map[w] & mask) != 0) {
1109 nm_ptr->map[w] &= ~mask;
1110 nm_ptr->count--;
1111 }
1112}
1113
1114/**
1115 * tipc_nmap_diff - find differences between node maps
1116 * @nm_a: input node map A
1117 * @nm_b: input node map B
1118 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
1119 */
31e3c3f6 1120static void tipc_nmap_diff(struct tipc_node_map *nm_a,
1121 struct tipc_node_map *nm_b,
1122 struct tipc_node_map *nm_diff)
3e22e62b
AS
1123{
1124 int stop = ARRAY_SIZE(nm_a->map);
1125 int w;
1126 int b;
1127 u32 map;
1128
1129 memset(nm_diff, 0, sizeof(*nm_diff));
1130 for (w = 0; w < stop; w++) {
1131 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
1132 nm_diff->map[w] = map;
1133 if (map != 0) {
1134 for (b = 0 ; b < WSIZE; b++) {
1135 if (map & (1 << b))
1136 nm_diff->count++;
1137 }
1138 }
1139 }
1140}
This page took 0.805615 seconds and 5 git commands to generate.