2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47 const char tipc_bclink_name
[] = "broadcast-link";
49 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
50 struct tipc_node_map
*nm_b
,
51 struct tipc_node_map
*nm_diff
);
52 static void tipc_nmap_add(struct tipc_node_map
*nm_ptr
, u32 node
);
53 static void tipc_nmap_remove(struct tipc_node_map
*nm_ptr
, u32 node
);
55 static void tipc_bclink_lock(struct net
*net
)
57 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
59 spin_lock_bh(&tn
->bclink
->lock
);
62 static void tipc_bclink_unlock(struct net
*net
)
64 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
65 struct tipc_node
*node
= NULL
;
67 if (likely(!tn
->bclink
->flags
)) {
68 spin_unlock_bh(&tn
->bclink
->lock
);
72 if (tn
->bclink
->flags
& TIPC_BCLINK_RESET
) {
73 tn
->bclink
->flags
&= ~TIPC_BCLINK_RESET
;
74 node
= tipc_bclink_retransmit_to(net
);
76 spin_unlock_bh(&tn
->bclink
->lock
);
79 tipc_link_reset_all(node
);
82 uint
tipc_bclink_get_mtu(void)
84 return MAX_PKT_DEFAULT_MCAST
;
87 void tipc_bclink_set_flags(struct net
*net
, unsigned int flags
)
89 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
91 tn
->bclink
->flags
|= flags
;
94 static u32
bcbuf_acks(struct sk_buff
*buf
)
96 return (u32
)(unsigned long)TIPC_SKB_CB(buf
)->handle
;
99 static void bcbuf_set_acks(struct sk_buff
*buf
, u32 acks
)
101 TIPC_SKB_CB(buf
)->handle
= (void *)(unsigned long)acks
;
104 static void bcbuf_decr_acks(struct sk_buff
*buf
)
106 bcbuf_set_acks(buf
, bcbuf_acks(buf
) - 1);
109 void tipc_bclink_add_node(struct net
*net
, u32 addr
)
111 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
113 tipc_bclink_lock(net
);
114 tipc_nmap_add(&tn
->bclink
->bcast_nodes
, addr
);
115 tipc_bclink_unlock(net
);
118 void tipc_bclink_remove_node(struct net
*net
, u32 addr
)
120 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
122 tipc_bclink_lock(net
);
123 tipc_nmap_remove(&tn
->bclink
->bcast_nodes
, addr
);
124 tipc_bclink_unlock(net
);
127 static void bclink_set_last_sent(struct net
*net
)
129 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
130 struct tipc_link
*bcl
= tn
->bcl
;
133 bcl
->fsm_msg_cnt
= mod(buf_seqno(bcl
->next_out
) - 1);
135 bcl
->fsm_msg_cnt
= mod(bcl
->next_out_no
- 1);
138 u32
tipc_bclink_get_last_sent(struct net
*net
)
140 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
142 return tn
->bcl
->fsm_msg_cnt
;
145 static void bclink_update_last_sent(struct tipc_node
*node
, u32 seqno
)
147 node
->bclink
.last_sent
= less_eq(node
->bclink
.last_sent
, seqno
) ?
148 seqno
: node
->bclink
.last_sent
;
153 * tipc_bclink_retransmit_to - get most recent node to request retransmission
155 * Called with bclink_lock locked
157 struct tipc_node
*tipc_bclink_retransmit_to(struct net
*net
)
159 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
161 return tn
->bclink
->retransmit_to
;
165 * bclink_retransmit_pkt - retransmit broadcast packets
166 * @after: sequence number of last packet to *not* retransmit
167 * @to: sequence number of last packet to retransmit
169 * Called with bclink_lock locked
171 static void bclink_retransmit_pkt(struct tipc_net
*tn
, u32 after
, u32 to
)
174 struct tipc_link
*bcl
= tn
->bcl
;
176 skb_queue_walk(&bcl
->outqueue
, skb
) {
177 if (more(buf_seqno(skb
), after
)) {
178 tipc_link_retransmit(bcl
, skb
, mod(to
- after
));
185 * tipc_bclink_wakeup_users - wake up pending users
187 * Called with no locks taken
189 void tipc_bclink_wakeup_users(struct net
*net
)
191 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
193 tipc_sk_rcv(net
, &tn
->bclink
->link
.wakeupq
);
197 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
198 * @n_ptr: node that sent acknowledgement info
199 * @acked: broadcast sequence # that has been acknowledged
201 * Node is locked, bclink_lock unlocked.
203 void tipc_bclink_acknowledge(struct tipc_node
*n_ptr
, u32 acked
)
205 struct sk_buff
*skb
, *tmp
;
206 struct sk_buff
*next
;
207 unsigned int released
= 0;
208 struct net
*net
= n_ptr
->net
;
209 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
211 tipc_bclink_lock(net
);
212 /* Bail out if tx queue is empty (no clean up is required) */
213 skb
= skb_peek(&tn
->bcl
->outqueue
);
217 /* Determine which messages need to be acknowledged */
218 if (acked
== INVALID_LINK_SEQ
) {
220 * Contact with specified node has been lost, so need to
221 * acknowledge sent messages only (if other nodes still exist)
222 * or both sent and unsent messages (otherwise)
224 if (tn
->bclink
->bcast_nodes
.count
)
225 acked
= tn
->bcl
->fsm_msg_cnt
;
227 acked
= tn
->bcl
->next_out_no
;
230 * Bail out if specified sequence number does not correspond
231 * to a message that has been sent and not yet acknowledged
233 if (less(acked
, buf_seqno(skb
)) ||
234 less(tn
->bcl
->fsm_msg_cnt
, acked
) ||
235 less_eq(acked
, n_ptr
->bclink
.acked
))
239 /* Skip over packets that node has previously acknowledged */
240 skb_queue_walk(&tn
->bcl
->outqueue
, skb
) {
241 if (more(buf_seqno(skb
), n_ptr
->bclink
.acked
))
245 /* Update packets that node is now acknowledging */
246 skb_queue_walk_from_safe(&tn
->bcl
->outqueue
, skb
, tmp
) {
247 if (more(buf_seqno(skb
), acked
))
250 next
= tipc_skb_queue_next(&tn
->bcl
->outqueue
, skb
);
251 if (skb
!= tn
->bcl
->next_out
) {
252 bcbuf_decr_acks(skb
);
254 bcbuf_set_acks(skb
, 0);
255 tn
->bcl
->next_out
= next
;
256 bclink_set_last_sent(net
);
259 if (bcbuf_acks(skb
) == 0) {
260 __skb_unlink(skb
, &tn
->bcl
->outqueue
);
265 n_ptr
->bclink
.acked
= acked
;
267 /* Try resolving broadcast link congestion, if necessary */
268 if (unlikely(tn
->bcl
->next_out
)) {
269 tipc_link_push_packets(tn
->bcl
);
270 bclink_set_last_sent(net
);
272 if (unlikely(released
&& !skb_queue_empty(&tn
->bcl
->wakeupq
)))
273 n_ptr
->action_flags
|= TIPC_WAKEUP_BCAST_USERS
;
275 tipc_bclink_unlock(net
);
279 * tipc_bclink_update_link_state - update broadcast link state
281 * RCU and node lock set
283 void tipc_bclink_update_link_state(struct tipc_node
*n_ptr
,
287 struct net
*net
= n_ptr
->net
;
288 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
290 /* Ignore "stale" link state info */
291 if (less_eq(last_sent
, n_ptr
->bclink
.last_in
))
294 /* Update link synchronization state; quit if in sync */
295 bclink_update_last_sent(n_ptr
, last_sent
);
297 if (n_ptr
->bclink
.last_sent
== n_ptr
->bclink
.last_in
)
300 /* Update out-of-sync state; quit if loss is still unconfirmed */
301 if ((++n_ptr
->bclink
.oos_state
) == 1) {
302 if (n_ptr
->bclink
.deferred_size
< (TIPC_MIN_LINK_WIN
/ 2))
304 n_ptr
->bclink
.oos_state
++;
307 /* Don't NACK if one has been recently sent (or seen) */
308 if (n_ptr
->bclink
.oos_state
& 0x1)
312 buf
= tipc_buf_acquire(INT_H_SIZE
);
314 struct tipc_msg
*msg
= buf_msg(buf
);
315 struct sk_buff
*skb
= skb_peek(&n_ptr
->bclink
.deferred_queue
);
316 u32 to
= skb
? buf_seqno(skb
) - 1 : n_ptr
->bclink
.last_sent
;
318 tipc_msg_init(tn
->own_addr
, msg
, BCAST_PROTOCOL
, STATE_MSG
,
319 INT_H_SIZE
, n_ptr
->addr
);
320 msg_set_non_seq(msg
, 1);
321 msg_set_mc_netid(msg
, tn
->net_id
);
322 msg_set_bcast_ack(msg
, n_ptr
->bclink
.last_in
);
323 msg_set_bcgap_after(msg
, n_ptr
->bclink
.last_in
);
324 msg_set_bcgap_to(msg
, to
);
326 tipc_bclink_lock(net
);
327 tipc_bearer_send(net
, MAX_BEARERS
, buf
, NULL
);
328 tn
->bcl
->stats
.sent_nacks
++;
329 tipc_bclink_unlock(net
);
332 n_ptr
->bclink
.oos_state
++;
337 * bclink_peek_nack - monitor retransmission requests sent by other nodes
339 * Delay any upcoming NACK by this node if another node has already
340 * requested the first message this node is going to ask for.
342 static void bclink_peek_nack(struct net
*net
, struct tipc_msg
*msg
)
344 struct tipc_node
*n_ptr
= tipc_node_find(net
, msg_destnode(msg
));
346 if (unlikely(!n_ptr
))
349 tipc_node_lock(n_ptr
);
351 if (n_ptr
->bclink
.recv_permitted
&&
352 (n_ptr
->bclink
.last_in
!= n_ptr
->bclink
.last_sent
) &&
353 (n_ptr
->bclink
.last_in
== msg_bcgap_after(msg
)))
354 n_ptr
->bclink
.oos_state
= 2;
356 tipc_node_unlock(n_ptr
);
359 /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
360 * and to identified node local sockets
361 * @net: the applicable net namespace
362 * @list: chain of buffers containing message
363 * Consumes the buffer chain, except when returning -ELINKCONG
364 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
366 int tipc_bclink_xmit(struct net
*net
, struct sk_buff_head
*list
)
368 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
369 struct tipc_link
*bcl
= tn
->bcl
;
370 struct tipc_bclink
*bclink
= tn
->bclink
;
375 /* Prepare clone of message for local node */
376 skb
= tipc_msg_reassemble(list
);
377 if (unlikely(!skb
)) {
378 __skb_queue_purge(list
);
379 return -EHOSTUNREACH
;
382 /* Broadcast to all other nodes */
383 if (likely(bclink
)) {
384 tipc_bclink_lock(net
);
385 if (likely(bclink
->bcast_nodes
.count
)) {
386 rc
= __tipc_link_xmit(net
, bcl
, list
);
388 u32 len
= skb_queue_len(&bcl
->outqueue
);
390 bclink_set_last_sent(net
);
391 bcl
->stats
.queue_sz_counts
++;
392 bcl
->stats
.accu_queue_sz
+= len
;
396 tipc_bclink_unlock(net
);
400 __skb_queue_purge(list
);
402 /* Deliver message clone */
404 tipc_sk_mcast_rcv(net
, skb
);
412 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
414 * Called with both sending node's lock and bclink_lock taken.
416 static void bclink_accept_pkt(struct tipc_node
*node
, u32 seqno
)
418 struct tipc_net
*tn
= net_generic(node
->net
, tipc_net_id
);
420 bclink_update_last_sent(node
, seqno
);
421 node
->bclink
.last_in
= seqno
;
422 node
->bclink
.oos_state
= 0;
423 tn
->bcl
->stats
.recv_info
++;
426 * Unicast an ACK periodically, ensuring that
427 * all nodes in the cluster don't ACK at the same time
429 if (((seqno
- tn
->own_addr
) % TIPC_MIN_LINK_WIN
) == 0) {
430 tipc_link_proto_xmit(node
->active_links
[node
->addr
& 1],
431 STATE_MSG
, 0, 0, 0, 0, 0);
432 tn
->bcl
->stats
.sent_acks
++;
437 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
439 * RCU is locked, no other locks set
441 void tipc_bclink_rcv(struct net
*net
, struct sk_buff
*buf
)
443 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
444 struct tipc_link
*bcl
= tn
->bcl
;
445 struct tipc_msg
*msg
= buf_msg(buf
);
446 struct tipc_node
*node
;
451 struct sk_buff
*iskb
;
452 struct sk_buff_head msgs
;
454 /* Screen out unwanted broadcast messages */
455 if (msg_mc_netid(msg
) != tn
->net_id
)
458 node
= tipc_node_find(net
, msg_prevnode(msg
));
462 tipc_node_lock(node
);
463 if (unlikely(!node
->bclink
.recv_permitted
))
466 /* Handle broadcast protocol message */
467 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
)) {
468 if (msg_type(msg
) != STATE_MSG
)
470 if (msg_destnode(msg
) == tn
->own_addr
) {
471 tipc_bclink_acknowledge(node
, msg_bcast_ack(msg
));
472 tipc_node_unlock(node
);
473 tipc_bclink_lock(net
);
474 bcl
->stats
.recv_nacks
++;
475 tn
->bclink
->retransmit_to
= node
;
476 bclink_retransmit_pkt(tn
, msg_bcgap_after(msg
),
478 tipc_bclink_unlock(net
);
480 tipc_node_unlock(node
);
481 bclink_peek_nack(net
, msg
);
486 /* Handle in-sequence broadcast message */
487 seqno
= msg_seqno(msg
);
488 next_in
= mod(node
->bclink
.last_in
+ 1);
490 if (likely(seqno
== next_in
)) {
492 /* Deliver message to destination */
493 if (likely(msg_isdata(msg
))) {
494 tipc_bclink_lock(net
);
495 bclink_accept_pkt(node
, seqno
);
496 tipc_bclink_unlock(net
);
497 tipc_node_unlock(node
);
498 if (likely(msg_mcast(msg
)))
499 tipc_sk_mcast_rcv(net
, buf
);
502 } else if (msg_user(msg
) == MSG_BUNDLER
) {
503 tipc_bclink_lock(net
);
504 bclink_accept_pkt(node
, seqno
);
505 bcl
->stats
.recv_bundles
++;
506 bcl
->stats
.recv_bundled
+= msg_msgcnt(msg
);
507 tipc_bclink_unlock(net
);
508 tipc_node_unlock(node
);
509 while (tipc_msg_extract(buf
, &iskb
, &pos
))
510 tipc_sk_mcast_rcv(net
, iskb
);
511 } else if (msg_user(msg
) == MSG_FRAGMENTER
) {
512 tipc_buf_append(&node
->bclink
.reasm_buf
, &buf
);
513 if (unlikely(!buf
&& !node
->bclink
.reasm_buf
))
515 tipc_bclink_lock(net
);
516 bclink_accept_pkt(node
, seqno
);
517 bcl
->stats
.recv_fragments
++;
519 bcl
->stats
.recv_fragmented
++;
521 tipc_bclink_unlock(net
);
524 tipc_bclink_unlock(net
);
525 tipc_node_unlock(node
);
526 } else if (msg_user(msg
) == NAME_DISTRIBUTOR
) {
527 tipc_bclink_lock(net
);
528 bclink_accept_pkt(node
, seqno
);
529 tipc_bclink_unlock(net
);
530 tipc_node_unlock(node
);
531 skb_queue_head_init(&msgs
);
532 skb_queue_tail(&msgs
, buf
);
533 tipc_named_rcv(net
, &msgs
);
535 tipc_bclink_lock(net
);
536 bclink_accept_pkt(node
, seqno
);
537 tipc_bclink_unlock(net
);
538 tipc_node_unlock(node
);
543 /* Determine new synchronization state */
544 tipc_node_lock(node
);
545 if (unlikely(!tipc_node_is_up(node
)))
548 if (node
->bclink
.last_in
== node
->bclink
.last_sent
)
551 if (skb_queue_empty(&node
->bclink
.deferred_queue
)) {
552 node
->bclink
.oos_state
= 1;
556 msg
= buf_msg(skb_peek(&node
->bclink
.deferred_queue
));
557 seqno
= msg_seqno(msg
);
558 next_in
= mod(next_in
+ 1);
559 if (seqno
!= next_in
)
562 /* Take in-sequence message from deferred queue & deliver it */
563 buf
= __skb_dequeue(&node
->bclink
.deferred_queue
);
567 /* Handle out-of-sequence broadcast message */
568 if (less(next_in
, seqno
)) {
569 deferred
= tipc_link_defer_pkt(&node
->bclink
.deferred_queue
,
571 bclink_update_last_sent(node
, seqno
);
575 tipc_bclink_lock(net
);
578 bcl
->stats
.deferred_recv
++;
580 bcl
->stats
.duplicates
++;
582 tipc_bclink_unlock(net
);
585 tipc_node_unlock(node
);
590 u32
tipc_bclink_acks_missing(struct tipc_node
*n_ptr
)
592 return (n_ptr
->bclink
.recv_permitted
&&
593 (tipc_bclink_get_last_sent(n_ptr
->net
) != n_ptr
->bclink
.acked
));
598 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
600 * Send packet over as many bearers as necessary to reach all nodes
601 * that have joined the broadcast link.
603 * Returns 0 (packet sent successfully) under all circumstances,
604 * since the broadcast link's pseudo-bearer never blocks
606 static int tipc_bcbearer_send(struct net
*net
, struct sk_buff
*buf
,
607 struct tipc_bearer
*unused1
,
608 struct tipc_media_addr
*unused2
)
611 struct tipc_msg
*msg
= buf_msg(buf
);
612 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
613 struct tipc_bcbearer
*bcbearer
= tn
->bcbearer
;
614 struct tipc_bclink
*bclink
= tn
->bclink
;
616 /* Prepare broadcast link message for reliable transmission,
617 * if first time trying to send it;
618 * preparation is skipped for broadcast link protocol messages
619 * since they are sent in an unreliable manner and don't need it
621 if (likely(!msg_non_seq(buf_msg(buf
)))) {
622 bcbuf_set_acks(buf
, bclink
->bcast_nodes
.count
);
623 msg_set_non_seq(msg
, 1);
624 msg_set_mc_netid(msg
, tn
->net_id
);
625 tn
->bcl
->stats
.sent_info
++;
627 if (WARN_ON(!bclink
->bcast_nodes
.count
)) {
633 /* Send buffer over bearers until all targets reached */
634 bcbearer
->remains
= bclink
->bcast_nodes
;
636 for (bp_index
= 0; bp_index
< MAX_BEARERS
; bp_index
++) {
637 struct tipc_bearer
*p
= bcbearer
->bpairs
[bp_index
].primary
;
638 struct tipc_bearer
*s
= bcbearer
->bpairs
[bp_index
].secondary
;
639 struct tipc_bearer
*bp
[2] = {p
, s
};
640 struct tipc_bearer
*b
= bp
[msg_link_selector(msg
)];
641 struct sk_buff
*tbuf
;
644 break; /* No more bearers to try */
647 tipc_nmap_diff(&bcbearer
->remains
, &b
->nodes
,
648 &bcbearer
->remains_new
);
649 if (bcbearer
->remains_new
.count
== bcbearer
->remains
.count
)
650 continue; /* Nothing added by bearer pair */
653 /* Use original buffer for first bearer */
654 tipc_bearer_send(net
, b
->identity
, buf
, &b
->bcast_addr
);
656 /* Avoid concurrent buffer access */
657 tbuf
= pskb_copy_for_clone(buf
, GFP_ATOMIC
);
660 tipc_bearer_send(net
, b
->identity
, tbuf
,
662 kfree_skb(tbuf
); /* Bearer keeps a clone */
664 if (bcbearer
->remains_new
.count
== 0)
665 break; /* All targets reached */
667 bcbearer
->remains
= bcbearer
->remains_new
;
674 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
676 void tipc_bcbearer_sort(struct net
*net
, struct tipc_node_map
*nm_ptr
,
677 u32 node
, bool action
)
679 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
680 struct tipc_bcbearer
*bcbearer
= tn
->bcbearer
;
681 struct tipc_bcbearer_pair
*bp_temp
= bcbearer
->bpairs_temp
;
682 struct tipc_bcbearer_pair
*bp_curr
;
683 struct tipc_bearer
*b
;
687 tipc_bclink_lock(net
);
690 tipc_nmap_add(nm_ptr
, node
);
692 tipc_nmap_remove(nm_ptr
, node
);
694 /* Group bearers by priority (can assume max of two per priority) */
695 memset(bp_temp
, 0, sizeof(bcbearer
->bpairs_temp
));
698 for (b_index
= 0; b_index
< MAX_BEARERS
; b_index
++) {
699 b
= rcu_dereference_rtnl(tn
->bearer_list
[b_index
]);
700 if (!b
|| !b
->nodes
.count
)
703 if (!bp_temp
[b
->priority
].primary
)
704 bp_temp
[b
->priority
].primary
= b
;
706 bp_temp
[b
->priority
].secondary
= b
;
710 /* Create array of bearer pairs for broadcasting */
711 bp_curr
= bcbearer
->bpairs
;
712 memset(bcbearer
->bpairs
, 0, sizeof(bcbearer
->bpairs
));
714 for (pri
= TIPC_MAX_LINK_PRI
; pri
>= 0; pri
--) {
716 if (!bp_temp
[pri
].primary
)
719 bp_curr
->primary
= bp_temp
[pri
].primary
;
721 if (bp_temp
[pri
].secondary
) {
722 if (tipc_nmap_equal(&bp_temp
[pri
].primary
->nodes
,
723 &bp_temp
[pri
].secondary
->nodes
)) {
724 bp_curr
->secondary
= bp_temp
[pri
].secondary
;
727 bp_curr
->primary
= bp_temp
[pri
].secondary
;
734 tipc_bclink_unlock(net
);
737 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
738 struct tipc_stats
*stats
)
748 struct nla_map map
[] = {
749 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_info
},
750 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
751 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
752 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
753 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
754 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_info
},
755 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
756 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
757 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
758 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
759 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
760 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
761 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
762 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
763 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
764 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
765 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
766 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
767 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
768 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
771 nest
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
775 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
776 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
779 nla_nest_end(skb
, nest
);
783 nla_nest_cancel(skb
, nest
);
788 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
792 struct nlattr
*attrs
;
794 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
795 struct tipc_link
*bcl
= tn
->bcl
;
800 tipc_bclink_lock(net
);
802 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_v2_family
,
803 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
807 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
811 /* The broadcast link is always up */
812 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
815 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
817 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
819 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, bcl
->next_in_no
))
821 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, bcl
->next_out_no
))
824 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
827 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->queue_limit
[0]))
829 nla_nest_end(msg
->skb
, prop
);
831 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
835 tipc_bclink_unlock(net
);
836 nla_nest_end(msg
->skb
, attrs
);
837 genlmsg_end(msg
->skb
, hdr
);
842 nla_nest_cancel(msg
->skb
, prop
);
844 nla_nest_cancel(msg
->skb
, attrs
);
846 tipc_bclink_unlock(net
);
847 genlmsg_cancel(msg
->skb
, hdr
);
852 int tipc_bclink_stats(struct net
*net
, char *buf
, const u32 buf_size
)
855 struct tipc_stats
*s
;
856 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
857 struct tipc_link
*bcl
= tn
->bcl
;
862 tipc_bclink_lock(net
);
866 ret
= tipc_snprintf(buf
, buf_size
, "Link <%s>\n"
867 " Window:%u packets\n",
868 bcl
->name
, bcl
->queue_limit
[0]);
869 ret
+= tipc_snprintf(buf
+ ret
, buf_size
- ret
,
870 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
871 s
->recv_info
, s
->recv_fragments
,
872 s
->recv_fragmented
, s
->recv_bundles
,
874 ret
+= tipc_snprintf(buf
+ ret
, buf_size
- ret
,
875 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
876 s
->sent_info
, s
->sent_fragments
,
877 s
->sent_fragmented
, s
->sent_bundles
,
879 ret
+= tipc_snprintf(buf
+ ret
, buf_size
- ret
,
880 " RX naks:%u defs:%u dups:%u\n",
881 s
->recv_nacks
, s
->deferred_recv
, s
->duplicates
);
882 ret
+= tipc_snprintf(buf
+ ret
, buf_size
- ret
,
883 " TX naks:%u acks:%u dups:%u\n",
884 s
->sent_nacks
, s
->sent_acks
, s
->retransmitted
);
885 ret
+= tipc_snprintf(buf
+ ret
, buf_size
- ret
,
886 " Congestion link:%u Send queue max:%u avg:%u\n",
887 s
->link_congs
, s
->max_queue_sz
,
889 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0);
891 tipc_bclink_unlock(net
);
895 int tipc_bclink_reset_stats(struct net
*net
)
897 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
898 struct tipc_link
*bcl
= tn
->bcl
;
903 tipc_bclink_lock(net
);
904 memset(&bcl
->stats
, 0, sizeof(bcl
->stats
));
905 tipc_bclink_unlock(net
);
909 int tipc_bclink_set_queue_limits(struct net
*net
, u32 limit
)
911 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
912 struct tipc_link
*bcl
= tn
->bcl
;
916 if ((limit
< TIPC_MIN_LINK_WIN
) || (limit
> TIPC_MAX_LINK_WIN
))
919 tipc_bclink_lock(net
);
920 tipc_link_set_queue_limits(bcl
, limit
);
921 tipc_bclink_unlock(net
);
925 int tipc_bclink_init(struct net
*net
)
927 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
928 struct tipc_bcbearer
*bcbearer
;
929 struct tipc_bclink
*bclink
;
930 struct tipc_link
*bcl
;
932 bcbearer
= kzalloc(sizeof(*bcbearer
), GFP_ATOMIC
);
936 bclink
= kzalloc(sizeof(*bclink
), GFP_ATOMIC
);
943 bcbearer
->bearer
.media
= &bcbearer
->media
;
944 bcbearer
->media
.send_msg
= tipc_bcbearer_send
;
945 sprintf(bcbearer
->media
.name
, "tipc-broadcast");
947 spin_lock_init(&bclink
->lock
);
948 __skb_queue_head_init(&bcl
->outqueue
);
949 __skb_queue_head_init(&bcl
->deferred_queue
);
950 skb_queue_head_init(&bcl
->wakeupq
);
951 bcl
->next_out_no
= 1;
952 spin_lock_init(&bclink
->node
.lock
);
953 bcl
->owner
= &bclink
->node
;
954 bcl
->owner
->net
= net
;
955 bcl
->max_pkt
= MAX_PKT_DEFAULT_MCAST
;
956 tipc_link_set_queue_limits(bcl
, BCLINK_WIN_DEFAULT
);
957 bcl
->bearer_id
= MAX_BEARERS
;
958 rcu_assign_pointer(tn
->bearer_list
[MAX_BEARERS
], &bcbearer
->bearer
);
959 bcl
->state
= WORKING_WORKING
;
960 bcl
->pmsg
= (struct tipc_msg
*)&bcl
->proto_msg
;
961 msg_set_prevnode(bcl
->pmsg
, tn
->own_addr
);
962 strlcpy(bcl
->name
, tipc_bclink_name
, TIPC_MAX_LINK_NAME
);
963 tn
->bcbearer
= bcbearer
;
969 void tipc_bclink_stop(struct net
*net
)
971 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
973 tipc_bclink_lock(net
);
974 tipc_link_purge_queues(tn
->bcl
);
975 tipc_bclink_unlock(net
);
977 RCU_INIT_POINTER(tn
->bearer_list
[BCBEARER
], NULL
);
984 * tipc_nmap_add - add a node to a node map
986 static void tipc_nmap_add(struct tipc_node_map
*nm_ptr
, u32 node
)
988 int n
= tipc_node(node
);
990 u32 mask
= (1 << (n
% WSIZE
));
992 if ((nm_ptr
->map
[w
] & mask
) == 0) {
994 nm_ptr
->map
[w
] |= mask
;
999 * tipc_nmap_remove - remove a node from a node map
1001 static void tipc_nmap_remove(struct tipc_node_map
*nm_ptr
, u32 node
)
1003 int n
= tipc_node(node
);
1005 u32 mask
= (1 << (n
% WSIZE
));
1007 if ((nm_ptr
->map
[w
] & mask
) != 0) {
1008 nm_ptr
->map
[w
] &= ~mask
;
1014 * tipc_nmap_diff - find differences between node maps
1015 * @nm_a: input node map A
1016 * @nm_b: input node map B
1017 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
1019 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
1020 struct tipc_node_map
*nm_b
,
1021 struct tipc_node_map
*nm_diff
)
1023 int stop
= ARRAY_SIZE(nm_a
->map
);
1028 memset(nm_diff
, 0, sizeof(*nm_diff
));
1029 for (w
= 0; w
< stop
; w
++) {
1030 map
= nm_a
->map
[w
] ^ (nm_a
->map
[w
] & nm_b
->map
[w
]);
1031 nm_diff
->map
[w
] = map
;
1033 for (b
= 0 ; b
< WSIZE
; b
++) {
1042 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
1044 void tipc_port_list_add(struct tipc_port_list
*pl_ptr
, u32 port
)
1046 struct tipc_port_list
*item
= pl_ptr
;
1048 int item_sz
= PLSIZE
;
1049 int cnt
= pl_ptr
->count
;
1051 for (; ; cnt
-= item_sz
, item
= item
->next
) {
1054 for (i
= 0; i
< item_sz
; i
++)
1055 if (item
->ports
[i
] == port
)
1058 item
->ports
[i
] = port
;
1063 item
->next
= kmalloc(sizeof(*item
), GFP_ATOMIC
);
1065 pr_warn("Incomplete multicast delivery, no memory\n");
1068 item
->next
->next
= NULL
;
1074 * tipc_port_list_free - free dynamically created entries in port_list chain
1077 void tipc_port_list_free(struct tipc_port_list
*pl_ptr
)
1079 struct tipc_port_list
*item
;
1080 struct tipc_port_list
*next
;
1082 for (item
= pl_ptr
->next
; item
; item
= next
) {