2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err
= "Link tunneling error, ";
52 static const char *link_rst_msg
= "Resetting link ";
54 /* Properties valid for media, bearar and link */
55 static const struct nla_policy tipc_nl_prop_policy
[TIPC_NLA_PROP_MAX
+ 1] = {
56 [TIPC_NLA_PROP_UNSPEC
] = { .type
= NLA_UNSPEC
},
57 [TIPC_NLA_PROP_PRIO
] = { .type
= NLA_U32
},
58 [TIPC_NLA_PROP_TOL
] = { .type
= NLA_U32
},
59 [TIPC_NLA_PROP_WIN
] = { .type
= NLA_U32
}
62 /* Send states for broadcast NACKs
65 BC_NACK_SND_CONDITIONAL
,
66 BC_NACK_SND_UNCONDITIONAL
,
71 * Interval between NACKs when packets arrive out of order
73 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
75 * Out-of-range value for link session numbers
77 #define WILDCARD_SESSION 0x10000
82 LINK_ESTABLISHED
= 0xe,
83 LINK_ESTABLISHING
= 0xe << 4,
84 LINK_RESET
= 0x1 << 8,
85 LINK_RESETTING
= 0x2 << 12,
86 LINK_PEER_RESET
= 0xd << 16,
87 LINK_FAILINGOVER
= 0xf << 20,
88 LINK_SYNCHING
= 0xc << 24
91 /* Link FSM state checking routines
93 static int link_is_up(struct tipc_link
*l
)
95 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
98 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
99 struct sk_buff_head
*xmitq
);
100 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
101 u16 rcvgap
, int tolerance
, int priority
,
102 struct sk_buff_head
*xmitq
);
103 static void link_print(struct tipc_link
*l_ptr
, const char *str
);
104 static void tipc_link_build_nack_msg(struct tipc_link
*l
,
105 struct sk_buff_head
*xmitq
);
106 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
107 struct sk_buff_head
*xmitq
);
108 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 to
);
111 * Simple non-static link routines (i.e. referenced outside this file)
113 bool tipc_link_is_up(struct tipc_link
*l
)
115 return link_is_up(l
);
118 bool tipc_link_peer_is_down(struct tipc_link
*l
)
120 return l
->state
== LINK_PEER_RESET
;
123 bool tipc_link_is_reset(struct tipc_link
*l
)
125 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
128 bool tipc_link_is_establishing(struct tipc_link
*l
)
130 return l
->state
== LINK_ESTABLISHING
;
133 bool tipc_link_is_synching(struct tipc_link
*l
)
135 return l
->state
== LINK_SYNCHING
;
138 bool tipc_link_is_failingover(struct tipc_link
*l
)
140 return l
->state
== LINK_FAILINGOVER
;
143 bool tipc_link_is_blocked(struct tipc_link
*l
)
145 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
148 static bool link_is_bc_sndlink(struct tipc_link
*l
)
150 return !l
->bc_sndlink
;
153 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
155 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
158 int tipc_link_is_active(struct tipc_link
*l
)
163 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
168 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
169 struct tipc_link
*uc_l
,
170 struct sk_buff_head
*xmitq
)
172 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
175 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
176 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
179 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
180 struct tipc_link
*rcv_l
,
181 struct sk_buff_head
*xmitq
)
183 u16 ack
= snd_l
->snd_nxt
- 1;
186 tipc_link_bc_ack_rcv(rcv_l
, ack
, xmitq
);
187 tipc_link_reset(rcv_l
);
188 rcv_l
->state
= LINK_RESET
;
189 if (!snd_l
->ackers
) {
190 tipc_link_reset(snd_l
);
191 __skb_queue_purge(xmitq
);
195 int tipc_link_bc_peers(struct tipc_link
*l
)
200 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
205 int tipc_link_mtu(struct tipc_link
*l
)
210 static u32
link_own_addr(struct tipc_link
*l
)
212 return msg_prevnode(l
->pmsg
);
216 * tipc_link_create - create a new link
217 * @n: pointer to associated node
218 * @if_name: associated interface name
219 * @bearer_id: id (index) of associated bearer
220 * @tolerance: link tolerance to be used by link
221 * @net_plane: network plane (A,B,c..) this link belongs to
222 * @mtu: mtu to be advertised by link
223 * @priority: priority to be used by link
224 * @window: send window to be used by link
225 * @session: session to be used by link
226 * @ownnode: identity of own node
227 * @peer: node id of peer node
228 * @peer_caps: bitmap describing peer node capabilities
229 * @bc_sndlink: the namespace global link used for broadcast sending
230 * @bc_rcvlink: the peer specific link used for broadcast reception
231 * @inputq: queue to put messages ready for delivery
232 * @namedq: queue to put binding table update messages ready for delivery
233 * @link: return value, pointer to put the created link
235 * Returns true if link was created, otherwise false
237 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
238 int tolerance
, char net_plane
, u32 mtu
, int priority
,
239 int window
, u32 session
, u32 ownnode
, u32 peer
,
241 struct tipc_link
*bc_sndlink
,
242 struct tipc_link
*bc_rcvlink
,
243 struct sk_buff_head
*inputq
,
244 struct sk_buff_head
*namedq
,
245 struct tipc_link
**link
)
248 struct tipc_msg
*hdr
;
250 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
254 l
->pmsg
= (struct tipc_msg
*)&l
->proto_msg
;
256 tipc_msg_init(ownnode
, hdr
, LINK_PROTOCOL
, RESET_MSG
, INT_H_SIZE
, peer
);
257 msg_set_size(hdr
, sizeof(l
->proto_msg
));
258 msg_set_session(hdr
, session
);
259 msg_set_bearer_id(hdr
, l
->bearer_id
);
261 /* Note: peer i/f name is completed by reset/activate message */
262 sprintf(l
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
263 tipc_zone(ownnode
), tipc_cluster(ownnode
), tipc_node(ownnode
),
264 if_name
, tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
265 strcpy((char *)msg_data(hdr
), if_name
);
268 l
->peer_caps
= peer_caps
;
270 l
->peer_session
= WILDCARD_SESSION
;
271 l
->bearer_id
= bearer_id
;
272 l
->tolerance
= tolerance
;
273 l
->net_plane
= net_plane
;
274 l
->advertised_mtu
= mtu
;
276 l
->priority
= priority
;
277 tipc_link_set_queue_limits(l
, window
);
279 l
->bc_sndlink
= bc_sndlink
;
280 l
->bc_rcvlink
= bc_rcvlink
;
283 l
->state
= LINK_RESETTING
;
284 __skb_queue_head_init(&l
->transmq
);
285 __skb_queue_head_init(&l
->backlogq
);
286 __skb_queue_head_init(&l
->deferdq
);
287 skb_queue_head_init(&l
->wakeupq
);
288 skb_queue_head_init(l
->inputq
);
293 * tipc_link_bc_create - create new link to be used for broadcast
294 * @n: pointer to associated node
295 * @mtu: mtu to be used
296 * @window: send window to be used
297 * @inputq: queue to put messages ready for delivery
298 * @namedq: queue to put binding table update messages ready for delivery
299 * @link: return value, pointer to put the created link
301 * Returns true if link was created, otherwise false
303 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
,
304 int mtu
, int window
, u16 peer_caps
,
305 struct sk_buff_head
*inputq
,
306 struct sk_buff_head
*namedq
,
307 struct tipc_link
*bc_sndlink
,
308 struct tipc_link
**link
)
312 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, window
,
313 0, ownnode
, peer
, peer_caps
, bc_sndlink
,
314 NULL
, inputq
, namedq
, link
))
318 strcpy(l
->name
, tipc_bclink_name
);
320 l
->state
= LINK_RESET
;
324 /* Broadcast send link is always up */
325 if (link_is_bc_sndlink(l
))
326 l
->state
= LINK_ESTABLISHED
;
332 * tipc_link_fsm_evt - link finite state machine
333 * @l: pointer to link
334 * @evt: state machine event to be processed
336 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
343 case LINK_PEER_RESET_EVT
:
344 l
->state
= LINK_PEER_RESET
;
347 l
->state
= LINK_RESET
;
349 case LINK_FAILURE_EVT
:
350 case LINK_FAILOVER_BEGIN_EVT
:
351 case LINK_ESTABLISH_EVT
:
352 case LINK_FAILOVER_END_EVT
:
353 case LINK_SYNCH_BEGIN_EVT
:
354 case LINK_SYNCH_END_EVT
:
361 case LINK_PEER_RESET_EVT
:
362 l
->state
= LINK_ESTABLISHING
;
364 case LINK_FAILOVER_BEGIN_EVT
:
365 l
->state
= LINK_FAILINGOVER
;
366 case LINK_FAILURE_EVT
:
368 case LINK_ESTABLISH_EVT
:
369 case LINK_FAILOVER_END_EVT
:
371 case LINK_SYNCH_BEGIN_EVT
:
372 case LINK_SYNCH_END_EVT
:
377 case LINK_PEER_RESET
:
380 l
->state
= LINK_ESTABLISHING
;
382 case LINK_PEER_RESET_EVT
:
383 case LINK_ESTABLISH_EVT
:
384 case LINK_FAILURE_EVT
:
386 case LINK_SYNCH_BEGIN_EVT
:
387 case LINK_SYNCH_END_EVT
:
388 case LINK_FAILOVER_BEGIN_EVT
:
389 case LINK_FAILOVER_END_EVT
:
394 case LINK_FAILINGOVER
:
396 case LINK_FAILOVER_END_EVT
:
397 l
->state
= LINK_RESET
;
399 case LINK_PEER_RESET_EVT
:
401 case LINK_ESTABLISH_EVT
:
402 case LINK_FAILURE_EVT
:
404 case LINK_FAILOVER_BEGIN_EVT
:
405 case LINK_SYNCH_BEGIN_EVT
:
406 case LINK_SYNCH_END_EVT
:
411 case LINK_ESTABLISHING
:
413 case LINK_ESTABLISH_EVT
:
414 l
->state
= LINK_ESTABLISHED
;
416 case LINK_FAILOVER_BEGIN_EVT
:
417 l
->state
= LINK_FAILINGOVER
;
420 l
->state
= LINK_RESET
;
422 case LINK_FAILURE_EVT
:
423 case LINK_PEER_RESET_EVT
:
424 case LINK_SYNCH_BEGIN_EVT
:
425 case LINK_FAILOVER_END_EVT
:
427 case LINK_SYNCH_END_EVT
:
432 case LINK_ESTABLISHED
:
434 case LINK_PEER_RESET_EVT
:
435 l
->state
= LINK_PEER_RESET
;
436 rc
|= TIPC_LINK_DOWN_EVT
;
438 case LINK_FAILURE_EVT
:
439 l
->state
= LINK_RESETTING
;
440 rc
|= TIPC_LINK_DOWN_EVT
;
443 l
->state
= LINK_RESET
;
445 case LINK_ESTABLISH_EVT
:
446 case LINK_SYNCH_END_EVT
:
448 case LINK_SYNCH_BEGIN_EVT
:
449 l
->state
= LINK_SYNCHING
;
451 case LINK_FAILOVER_BEGIN_EVT
:
452 case LINK_FAILOVER_END_EVT
:
459 case LINK_PEER_RESET_EVT
:
460 l
->state
= LINK_PEER_RESET
;
461 rc
|= TIPC_LINK_DOWN_EVT
;
463 case LINK_FAILURE_EVT
:
464 l
->state
= LINK_RESETTING
;
465 rc
|= TIPC_LINK_DOWN_EVT
;
468 l
->state
= LINK_RESET
;
470 case LINK_ESTABLISH_EVT
:
471 case LINK_SYNCH_BEGIN_EVT
:
473 case LINK_SYNCH_END_EVT
:
474 l
->state
= LINK_ESTABLISHED
;
476 case LINK_FAILOVER_BEGIN_EVT
:
477 case LINK_FAILOVER_END_EVT
:
483 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
487 pr_err("Illegal FSM event %x in state %x on link %s\n",
488 evt
, l
->state
, l
->name
);
492 /* link_profile_stats - update statistical profiling of traffic
494 static void link_profile_stats(struct tipc_link
*l
)
497 struct tipc_msg
*msg
;
500 /* Update counters used in statistical profiling of send traffic */
501 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
502 l
->stats
.queue_sz_counts
++;
504 skb
= skb_peek(&l
->transmq
);
508 length
= msg_size(msg
);
510 if (msg_user(msg
) == MSG_FRAGMENTER
) {
511 if (msg_type(msg
) != FIRST_FRAGMENT
)
513 length
= msg_size(msg_get_wrapped(msg
));
515 l
->stats
.msg_lengths_total
+= length
;
516 l
->stats
.msg_length_counts
++;
518 l
->stats
.msg_length_profile
[0]++;
519 else if (length
<= 256)
520 l
->stats
.msg_length_profile
[1]++;
521 else if (length
<= 1024)
522 l
->stats
.msg_length_profile
[2]++;
523 else if (length
<= 4096)
524 l
->stats
.msg_length_profile
[3]++;
525 else if (length
<= 16384)
526 l
->stats
.msg_length_profile
[4]++;
527 else if (length
<= 32768)
528 l
->stats
.msg_length_profile
[5]++;
530 l
->stats
.msg_length_profile
[6]++;
533 /* tipc_link_timeout - perform periodic task as instructed from node timeout
535 /* tipc_link_timeout - perform periodic task as instructed from node timeout
537 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
540 int mtyp
= STATE_MSG
;
543 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
544 u16 bc_acked
= l
->bc_rcvlink
->acked
;
545 bool bc_up
= link_is_up(l
->bc_rcvlink
);
547 link_profile_stats(l
);
550 case LINK_ESTABLISHED
:
552 if (!l
->silent_intv_cnt
) {
553 if (bc_up
&& (bc_acked
!= bc_snt
))
555 } else if (l
->silent_intv_cnt
<= l
->abort_limit
) {
559 rc
|= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
561 l
->silent_intv_cnt
++;
567 case LINK_ESTABLISHING
:
571 case LINK_PEER_RESET
:
573 case LINK_FAILINGOVER
:
580 tipc_link_build_proto_msg(l
, mtyp
, prb
, 0, 0, 0, xmitq
);
586 * link_schedule_user - schedule a message sender for wakeup after congestion
587 * @link: congested link
588 * @list: message that was attempted sent
589 * Create pseudo msg to send back to user when congestion abates
590 * Does not consume buffer list
592 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
594 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
595 int imp
= msg_importance(msg
);
596 u32 oport
= msg_origport(msg
);
597 u32 addr
= link_own_addr(link
);
600 /* This really cannot happen... */
601 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
602 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
605 /* Non-blocking sender: */
606 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
609 /* Create and schedule wakeup pseudo message */
610 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
611 addr
, addr
, oport
, 0, 0);
614 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
615 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
616 skb_queue_tail(&link
->wakeupq
, skb
);
617 link
->stats
.link_congs
++;
622 * link_prepare_wakeup - prepare users for wakeup after congestion
623 * @link: congested link
624 * Move a number of waiting users, as permitted by available space in
625 * the send queue, from link wait queue to node wait queue for wakeup
627 void link_prepare_wakeup(struct tipc_link
*l
)
629 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
631 struct sk_buff
*skb
, *tmp
;
633 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
634 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
635 lim
= l
->window
+ l
->backlog
[imp
].limit
;
636 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
637 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
639 skb_unlink(skb
, &l
->wakeupq
);
640 skb_queue_tail(l
->inputq
, skb
);
644 void tipc_link_reset(struct tipc_link
*l
)
646 /* Link is down, accept any session */
647 l
->peer_session
= WILDCARD_SESSION
;
649 /* If peer is up, it only accepts an incremented session number */
650 msg_set_session(l
->pmsg
, msg_session(l
->pmsg
) + 1);
652 /* Prepare for renewed mtu size negotiation */
653 l
->mtu
= l
->advertised_mtu
;
655 /* Clean up all queues and counters: */
656 __skb_queue_purge(&l
->transmq
);
657 __skb_queue_purge(&l
->deferdq
);
658 skb_queue_splice_init(&l
->wakeupq
, l
->inputq
);
659 __skb_queue_purge(&l
->backlogq
);
660 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
661 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
662 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
663 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
664 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
665 kfree_skb(l
->reasm_buf
);
666 kfree_skb(l
->failover_reasm_skb
);
668 l
->failover_reasm_skb
= NULL
;
673 l
->silent_intv_cnt
= 0;
674 l
->stats
.recv_info
= 0;
676 l
->bc_peer_is_up
= false;
677 link_reset_statistics(l
);
681 * tipc_link_xmit(): enqueue buffer list according to queue situation
683 * @list: chain of buffers containing message
684 * @xmitq: returned list of packets to be sent by caller
686 * Consumes the buffer chain, except when returning -ELINKCONG,
687 * since the caller then may want to make more send attempts.
688 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
689 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
691 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
692 struct sk_buff_head
*xmitq
)
694 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
695 unsigned int maxwin
= l
->window
;
696 unsigned int i
, imp
= msg_importance(hdr
);
697 unsigned int mtu
= l
->mtu
;
698 u16 ack
= l
->rcv_nxt
- 1;
699 u16 seqno
= l
->snd_nxt
;
700 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
701 struct sk_buff_head
*transmq
= &l
->transmq
;
702 struct sk_buff_head
*backlogq
= &l
->backlogq
;
703 struct sk_buff
*skb
, *_skb
, *bskb
;
705 /* Match msg importance against this and all higher backlog limits: */
706 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
707 if (unlikely(l
->backlog
[i
].len
>= l
->backlog
[i
].limit
))
708 return link_schedule_user(l
, list
);
710 if (unlikely(msg_size(hdr
) > mtu
))
713 /* Prepare each packet for sending, and add to relevant queue: */
714 while (skb_queue_len(list
)) {
715 skb
= skb_peek(list
);
717 msg_set_seqno(hdr
, seqno
);
718 msg_set_ack(hdr
, ack
);
719 msg_set_bcast_ack(hdr
, bc_ack
);
721 if (likely(skb_queue_len(transmq
) < maxwin
)) {
722 _skb
= skb_clone(skb
, GFP_ATOMIC
);
726 __skb_queue_tail(transmq
, skb
);
727 __skb_queue_tail(xmitq
, _skb
);
728 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
733 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
734 kfree_skb(__skb_dequeue(list
));
735 l
->stats
.sent_bundled
++;
738 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
739 kfree_skb(__skb_dequeue(list
));
740 __skb_queue_tail(backlogq
, bskb
);
741 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
742 l
->stats
.sent_bundled
++;
743 l
->stats
.sent_bundles
++;
746 l
->backlog
[imp
].len
+= skb_queue_len(list
);
747 skb_queue_splice_tail_init(list
, backlogq
);
753 void tipc_link_advance_backlog(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
755 struct sk_buff
*skb
, *_skb
;
756 struct tipc_msg
*hdr
;
757 u16 seqno
= l
->snd_nxt
;
758 u16 ack
= l
->rcv_nxt
- 1;
759 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
761 while (skb_queue_len(&l
->transmq
) < l
->window
) {
762 skb
= skb_peek(&l
->backlogq
);
765 _skb
= skb_clone(skb
, GFP_ATOMIC
);
768 __skb_dequeue(&l
->backlogq
);
770 l
->backlog
[msg_importance(hdr
)].len
--;
771 __skb_queue_tail(&l
->transmq
, skb
);
772 __skb_queue_tail(xmitq
, _skb
);
773 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
774 msg_set_seqno(hdr
, seqno
);
775 msg_set_ack(hdr
, ack
);
776 msg_set_bcast_ack(hdr
, bc_ack
);
783 static void link_retransmit_failure(struct tipc_link
*l
, struct sk_buff
*skb
)
785 struct tipc_msg
*hdr
= buf_msg(skb
);
787 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
788 link_print(l
, "Resetting link ");
789 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
790 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
), msg_errcode(hdr
));
791 pr_info("sqno %u, prev: %x, src: %x\n",
792 msg_seqno(hdr
), msg_prevnode(hdr
), msg_orignode(hdr
));
795 int tipc_link_retrans(struct tipc_link
*l
, u16 from
, u16 to
,
796 struct sk_buff_head
*xmitq
)
798 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
799 struct tipc_msg
*hdr
;
800 u16 ack
= l
->rcv_nxt
- 1;
801 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
806 /* Detect repeated retransmit failures on same packet */
807 if (likely(l
->last_retransm
!= buf_seqno(skb
))) {
808 l
->last_retransm
= buf_seqno(skb
);
810 } else if (++l
->stale_count
> 100) {
811 link_retransmit_failure(l
, skb
);
812 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
815 /* Move forward to where retransmission should start */
816 skb_queue_walk(&l
->transmq
, skb
) {
817 if (!less(buf_seqno(skb
), from
))
821 skb_queue_walk_from(&l
->transmq
, skb
) {
822 if (more(buf_seqno(skb
), to
))
825 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
829 msg_set_ack(hdr
, ack
);
830 msg_set_bcast_ack(hdr
, bc_ack
);
831 _skb
->priority
= TC_PRIO_CONTROL
;
832 __skb_queue_tail(xmitq
, _skb
);
833 l
->stats
.retransmitted
++;
838 /* tipc_data_input - deliver data and name distr msgs to upper layer
840 * Consumes buffer if message is of right type
841 * Node lock must be held
843 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
844 struct sk_buff_head
*inputq
)
846 switch (msg_user(buf_msg(skb
))) {
847 case TIPC_LOW_IMPORTANCE
:
848 case TIPC_MEDIUM_IMPORTANCE
:
849 case TIPC_HIGH_IMPORTANCE
:
850 case TIPC_CRITICAL_IMPORTANCE
:
852 skb_queue_tail(inputq
, skb
);
854 case NAME_DISTRIBUTOR
:
855 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
856 skb_queue_tail(l
->namedq
, skb
);
859 case TUNNEL_PROTOCOL
:
864 pr_warn("Dropping received illegal msg type\n");
870 /* tipc_link_input - process packet that has passed link protocol check
874 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
875 struct sk_buff_head
*inputq
)
877 struct tipc_msg
*hdr
= buf_msg(skb
);
878 struct sk_buff
**reasm_skb
= &l
->reasm_buf
;
879 struct sk_buff
*iskb
;
880 struct sk_buff_head tmpq
;
881 int usr
= msg_user(hdr
);
886 if (unlikely(usr
== TUNNEL_PROTOCOL
)) {
887 if (msg_type(hdr
) == SYNCH_MSG
) {
888 __skb_queue_purge(&l
->deferdq
);
891 if (!tipc_msg_extract(skb
, &iskb
, &ipos
))
896 if (less(msg_seqno(hdr
), l
->drop_point
))
898 if (tipc_data_input(l
, skb
, inputq
))
901 reasm_skb
= &l
->failover_reasm_skb
;
904 if (usr
== MSG_BUNDLER
) {
905 skb_queue_head_init(&tmpq
);
906 l
->stats
.recv_bundles
++;
907 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
908 while (tipc_msg_extract(skb
, &iskb
, &pos
))
909 tipc_data_input(l
, iskb
, &tmpq
);
910 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
912 } else if (usr
== MSG_FRAGMENTER
) {
913 l
->stats
.recv_fragments
++;
914 if (tipc_buf_append(reasm_skb
, &skb
)) {
915 l
->stats
.recv_fragmented
++;
916 tipc_data_input(l
, skb
, inputq
);
917 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
918 pr_warn_ratelimited("Unable to build fragment list\n");
919 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
922 } else if (usr
== BCAST_PROTOCOL
) {
923 tipc_bcast_lock(l
->net
);
924 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
925 tipc_bcast_unlock(l
->net
);
932 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
934 bool released
= false;
935 struct sk_buff
*skb
, *tmp
;
937 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
938 if (more(buf_seqno(skb
), acked
))
940 __skb_unlink(skb
, &l
->transmq
);
947 /* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
949 * Note that sending of broadcast ack is coordinated among nodes, to reduce
950 * risk of ack storms towards the sender
952 int tipc_link_build_ack_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
957 /* Broadcast ACK must be sent via a unicast link => defer to caller */
958 if (link_is_bc_rcvlink(l
)) {
959 if (((l
->rcv_nxt
^ link_own_addr(l
)) & 0xf) != 0xf)
962 return TIPC_LINK_SND_BC_ACK
;
967 l
->stats
.sent_acks
++;
968 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
972 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
974 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
976 int mtyp
= RESET_MSG
;
978 if (l
->state
== LINK_ESTABLISHING
)
981 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, xmitq
);
984 /* tipc_link_build_nack_msg: prepare link nack message for transmission
986 static void tipc_link_build_nack_msg(struct tipc_link
*l
,
987 struct sk_buff_head
*xmitq
)
989 u32 def_cnt
= ++l
->stats
.deferred_recv
;
991 if (link_is_bc_rcvlink(l
))
994 if ((skb_queue_len(&l
->deferdq
) == 1) || !(def_cnt
% TIPC_NACK_INTV
))
995 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
998 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
999 * @l: the link that should handle the message
1001 * @xmitq: queue to place packets to be sent after this call
1003 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1004 struct sk_buff_head
*xmitq
)
1006 struct sk_buff_head
*defq
= &l
->deferdq
;
1007 struct tipc_msg
*hdr
;
1008 u16 seqno
, rcv_nxt
, win_lim
;
1013 seqno
= msg_seqno(hdr
);
1014 rcv_nxt
= l
->rcv_nxt
;
1015 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1017 /* Verify and update link state */
1018 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1019 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1021 if (unlikely(!link_is_up(l
))) {
1022 if (l
->state
== LINK_ESTABLISHING
)
1023 rc
= TIPC_LINK_UP_EVT
;
1027 /* Don't send probe at next timeout expiration */
1028 l
->silent_intv_cnt
= 0;
1030 /* Drop if outside receive window */
1031 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1032 l
->stats
.duplicates
++;
1036 /* Forward queues and wake up waiting users */
1037 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1038 tipc_link_advance_backlog(l
, xmitq
);
1039 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1040 link_prepare_wakeup(l
);
1043 /* Defer delivery if sequence gap */
1044 if (unlikely(seqno
!= rcv_nxt
)) {
1045 __tipc_skb_queue_sorted(defq
, seqno
, skb
);
1046 tipc_link_build_nack_msg(l
, xmitq
);
1050 /* Deliver packet */
1052 l
->stats
.recv_info
++;
1053 if (!tipc_data_input(l
, skb
, l
->inputq
))
1054 rc
|= tipc_link_input(l
, skb
, l
->inputq
);
1055 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1056 rc
|= tipc_link_build_ack_msg(l
, xmitq
);
1057 if (unlikely(rc
& ~TIPC_LINK_SND_BC_ACK
))
1059 } while ((skb
= __skb_dequeue(defq
)));
1068 * Send protocol message to the other endpoint.
1070 void tipc_link_proto_xmit(struct tipc_link
*l
, u32 msg_typ
, int probe_msg
,
1071 u32 gap
, u32 tolerance
, u32 priority
)
1073 struct sk_buff
*skb
= NULL
;
1074 struct sk_buff_head xmitq
;
1076 __skb_queue_head_init(&xmitq
);
1077 tipc_link_build_proto_msg(l
, msg_typ
, probe_msg
, gap
,
1078 tolerance
, priority
, &xmitq
);
1079 skb
= __skb_dequeue(&xmitq
);
1082 tipc_bearer_xmit_skb(l
->net
, l
->bearer_id
, skb
, l
->media_addr
);
1086 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1087 u16 rcvgap
, int tolerance
, int priority
,
1088 struct sk_buff_head
*xmitq
)
1090 struct sk_buff
*skb
= NULL
;
1091 struct tipc_msg
*hdr
= l
->pmsg
;
1092 bool node_up
= link_is_up(l
->bc_rcvlink
);
1094 /* Don't send protocol message during reset or link failover */
1095 if (tipc_link_is_blocked(l
))
1098 msg_set_type(hdr
, mtyp
);
1099 msg_set_net_plane(hdr
, l
->net_plane
);
1100 msg_set_next_sent(hdr
, l
->snd_nxt
);
1101 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1102 msg_set_bcast_ack(hdr
, l
->bc_rcvlink
->rcv_nxt
- 1);
1103 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1104 msg_set_link_tolerance(hdr
, tolerance
);
1105 msg_set_linkprio(hdr
, priority
);
1106 msg_set_redundant_link(hdr
, node_up
);
1107 msg_set_seq_gap(hdr
, 0);
1109 /* Compatibility: created msg must not be in sequence with pkt flow */
1110 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1112 if (mtyp
== STATE_MSG
) {
1113 if (!tipc_link_is_up(l
))
1116 /* Override rcvgap if there are packets in deferred queue */
1117 if (!skb_queue_empty(&l
->deferdq
))
1118 rcvgap
= buf_seqno(skb_peek(&l
->deferdq
)) - l
->rcv_nxt
;
1120 msg_set_seq_gap(hdr
, rcvgap
);
1121 l
->stats
.sent_nacks
++;
1123 msg_set_probe(hdr
, probe
);
1125 l
->stats
.sent_probes
++;
1126 l
->stats
.sent_states
++;
1129 /* RESET_MSG or ACTIVATE_MSG */
1130 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1131 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1132 msg_set_next_sent(hdr
, 1);
1134 skb
= tipc_buf_acquire(msg_size(hdr
));
1137 skb_copy_to_linear_data(skb
, hdr
, msg_size(hdr
));
1138 skb
->priority
= TC_PRIO_CONTROL
;
1139 __skb_queue_tail(xmitq
, skb
);
1142 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1143 * with contents of the link's transmit and backlog queues.
1145 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1146 int mtyp
, struct sk_buff_head
*xmitq
)
1148 struct sk_buff
*skb
, *tnlskb
;
1149 struct tipc_msg
*hdr
, tnlhdr
;
1150 struct sk_buff_head
*queue
= &l
->transmq
;
1151 struct sk_buff_head tmpxq
, tnlq
;
1152 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1157 skb_queue_head_init(&tnlq
);
1158 skb_queue_head_init(&tmpxq
);
1160 /* At least one packet required for safe algorithm => add dummy */
1161 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1162 BASIC_H_SIZE
, 0, l
->addr
, link_own_addr(l
),
1163 0, 0, TIPC_ERR_NO_PORT
);
1165 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1168 skb_queue_tail(&tnlq
, skb
);
1169 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1170 __skb_queue_purge(&tmpxq
);
1172 /* Initialize reusable tunnel packet header */
1173 tipc_msg_init(link_own_addr(l
), &tnlhdr
, TUNNEL_PROTOCOL
,
1174 mtyp
, INT_H_SIZE
, l
->addr
);
1175 pktcnt
= skb_queue_len(&l
->transmq
) + skb_queue_len(&l
->backlogq
);
1176 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1177 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1179 /* Wrap each packet into a tunnel packet */
1180 skb_queue_walk(queue
, skb
) {
1182 if (queue
== &l
->backlogq
)
1183 msg_set_seqno(hdr
, seqno
++);
1184 pktlen
= msg_size(hdr
);
1185 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1186 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
);
1188 pr_warn("%sunable to send packet\n", link_co_err
);
1191 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1192 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1193 __skb_queue_tail(&tnlq
, tnlskb
);
1195 if (queue
!= &l
->backlogq
) {
1196 queue
= &l
->backlogq
;
1200 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1202 if (mtyp
== FAILOVER_MSG
) {
1203 tnl
->drop_point
= l
->rcv_nxt
;
1204 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1205 l
->reasm_buf
= NULL
;
1209 /* tipc_link_proto_rcv(): receive link level protocol message :
1210 * Note that network plane id propagates through the network, and may
1211 * change at any time. The node with lowest numerical id determines
1214 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1215 struct sk_buff_head
*xmitq
)
1217 struct tipc_msg
*hdr
= buf_msg(skb
);
1219 u16 ack
= msg_ack(hdr
);
1220 u16 gap
= msg_seq_gap(hdr
);
1221 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1222 u16 peers_tol
= msg_link_tolerance(hdr
);
1223 u16 peers_prio
= msg_linkprio(hdr
);
1224 u16 rcv_nxt
= l
->rcv_nxt
;
1225 int mtyp
= msg_type(hdr
);
1229 if (tipc_link_is_blocked(l
) || !xmitq
)
1232 if (link_own_addr(l
) > msg_prevnode(hdr
))
1233 l
->net_plane
= msg_net_plane(hdr
);
1238 /* Ignore duplicate RESET with old session number */
1239 if ((less_eq(msg_session(hdr
), l
->peer_session
)) &&
1240 (l
->peer_session
!= WILDCARD_SESSION
))
1248 /* Complete own link name with peer's interface name */
1249 if_name
= strrchr(l
->name
, ':') + 1;
1250 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1252 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1254 strncpy(if_name
, msg_data(hdr
), TIPC_MAX_IF_NAME
);
1256 /* Update own tolerance if peer indicates a non-zero value */
1257 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1258 l
->tolerance
= peers_tol
;
1260 /* Update own priority if peer's priority is higher */
1261 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1262 l
->priority
= peers_prio
;
1264 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1265 if ((mtyp
== RESET_MSG
) || !link_is_up(l
))
1266 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1268 /* ACTIVATE_MSG takes up link if it was already locally reset */
1269 if ((mtyp
== ACTIVATE_MSG
) && (l
->state
== LINK_ESTABLISHING
))
1270 rc
= TIPC_LINK_UP_EVT
;
1272 l
->peer_session
= msg_session(hdr
);
1273 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1274 if (l
->mtu
> msg_max_pkt(hdr
))
1275 l
->mtu
= msg_max_pkt(hdr
);
1280 /* Update own tolerance if peer indicates a non-zero value */
1281 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1282 l
->tolerance
= peers_tol
;
1284 l
->silent_intv_cnt
= 0;
1285 l
->stats
.recv_states
++;
1287 l
->stats
.recv_probes
++;
1289 if (!link_is_up(l
)) {
1290 if (l
->state
== LINK_ESTABLISHING
)
1291 rc
= TIPC_LINK_UP_EVT
;
1295 /* Send NACK if peer has sent pkts we haven't received yet */
1296 if (more(peers_snd_nxt
, rcv_nxt
) && !tipc_link_is_synching(l
))
1297 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1298 if (rcvgap
|| (msg_probe(hdr
)))
1299 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, rcvgap
,
1301 tipc_link_release_pkts(l
, ack
);
1303 /* If NACK, retransmit will now start at right position */
1305 rc
= tipc_link_retrans(l
, ack
+ 1, ack
+ gap
, xmitq
);
1306 l
->stats
.recv_nacks
++;
1309 tipc_link_advance_backlog(l
, xmitq
);
1310 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1311 link_prepare_wakeup(l
);
1318 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1320 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
1322 struct sk_buff_head
*xmitq
)
1324 struct sk_buff
*skb
;
1325 struct tipc_msg
*hdr
;
1326 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
1327 u16 ack
= l
->rcv_nxt
- 1;
1328 u16 gap_to
= peers_snd_nxt
- 1;
1330 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
1331 0, l
->addr
, link_own_addr(l
), 0, 0, 0);
1335 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1336 msg_set_bcast_ack(hdr
, ack
);
1337 msg_set_bcgap_after(hdr
, ack
);
1339 gap_to
= buf_seqno(dfrd_skb
) - 1;
1340 msg_set_bcgap_to(hdr
, gap_to
);
1341 msg_set_non_seq(hdr
, bcast
);
1342 __skb_queue_tail(xmitq
, skb
);
1346 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1348 * Give a newly added peer node the sequence number where it should
1349 * start receiving and acking broadcast packets.
1351 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
1352 struct sk_buff_head
*xmitq
)
1354 struct sk_buff_head list
;
1356 __skb_queue_head_init(&list
);
1357 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
1359 tipc_link_xmit(l
, &list
, xmitq
);
1362 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1364 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1366 int mtyp
= msg_type(hdr
);
1367 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1372 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
1373 l
->rcv_nxt
= peers_snd_nxt
;
1374 l
->state
= LINK_ESTABLISHED
;
1378 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
1381 if (msg_peer_node_is_up(hdr
))
1384 /* Compatibility: accept older, less safe initial synch data */
1385 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
1386 l
->rcv_nxt
= peers_snd_nxt
;
1389 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1391 void tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
1392 struct sk_buff_head
*xmitq
)
1394 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1399 if (!msg_peer_node_is_up(hdr
))
1402 l
->bc_peer_is_up
= true;
1404 /* Ignore if peers_snd_nxt goes beyond receive window */
1405 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
1408 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
1409 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
1413 /* Don't NACK if one was recently sent or peeked */
1414 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
1415 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1419 /* Conditionally delay NACK sending until next synch rcv */
1420 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
1421 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1422 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
1426 /* Send NACK now but suppress next one */
1427 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
1428 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1431 void tipc_link_bc_ack_rcv(struct tipc_link
*l
, u16 acked
,
1432 struct sk_buff_head
*xmitq
)
1434 struct sk_buff
*skb
, *tmp
;
1435 struct tipc_link
*snd_l
= l
->bc_sndlink
;
1437 if (!link_is_up(l
) || !l
->bc_peer_is_up
)
1440 if (!more(acked
, l
->acked
))
1443 /* Skip over packets peer has already acked */
1444 skb_queue_walk(&snd_l
->transmq
, skb
) {
1445 if (more(buf_seqno(skb
), l
->acked
))
1449 /* Update/release the packets peer is acking now */
1450 skb_queue_walk_from_safe(&snd_l
->transmq
, skb
, tmp
) {
1451 if (more(buf_seqno(skb
), acked
))
1453 if (!--TIPC_SKB_CB(skb
)->ackers
) {
1454 __skb_unlink(skb
, &snd_l
->transmq
);
1459 tipc_link_advance_backlog(snd_l
, xmitq
);
1460 if (unlikely(!skb_queue_empty(&snd_l
->wakeupq
)))
1461 link_prepare_wakeup(snd_l
);
1464 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1466 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1467 struct sk_buff_head
*xmitq
)
1469 struct tipc_msg
*hdr
= buf_msg(skb
);
1470 u32 dnode
= msg_destnode(hdr
);
1471 int mtyp
= msg_type(hdr
);
1472 u16 acked
= msg_bcast_ack(hdr
);
1473 u16 from
= acked
+ 1;
1474 u16 to
= msg_bcgap_to(hdr
);
1475 u16 peers_snd_nxt
= to
+ 1;
1480 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
1483 if (mtyp
!= STATE_MSG
)
1486 if (dnode
== link_own_addr(l
)) {
1487 tipc_link_bc_ack_rcv(l
, acked
, xmitq
);
1488 rc
= tipc_link_retrans(l
->bc_sndlink
, from
, to
, xmitq
);
1489 l
->stats
.recv_nacks
++;
1493 /* Msg for other node => suppress own NACK at next sync if applicable */
1494 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
1495 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1500 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1502 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->mtu
/ ITEM_SIZE
);
1505 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= win
/ 2;
1506 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= win
;
1507 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= win
/ 2 * 3;
1508 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= win
* 2;
1509 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1513 * link_reset_statistics - reset link statistics
1514 * @l_ptr: pointer to link
1516 void link_reset_statistics(struct tipc_link
*l_ptr
)
1518 memset(&l_ptr
->stats
, 0, sizeof(l_ptr
->stats
));
1519 l_ptr
->stats
.sent_info
= l_ptr
->snd_nxt
;
1520 l_ptr
->stats
.recv_info
= l_ptr
->rcv_nxt
;
1523 static void link_print(struct tipc_link
*l
, const char *str
)
1525 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
1526 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
1527 u16 tail
= l
->snd_nxt
- 1;
1529 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
1530 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1531 skb_queue_len(&l
->transmq
), head
, tail
,
1532 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
1535 /* Parse and validate nested (link) properties valid for media, bearer and link
1537 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1541 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1542 tipc_nl_prop_policy
);
1546 if (props
[TIPC_NLA_PROP_PRIO
]) {
1549 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1550 if (prio
> TIPC_MAX_LINK_PRI
)
1554 if (props
[TIPC_NLA_PROP_TOL
]) {
1557 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1558 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1562 if (props
[TIPC_NLA_PROP_WIN
]) {
1565 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1566 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1573 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1576 struct nlattr
*stats
;
1583 struct nla_map map
[] = {
1584 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
1585 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1586 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1587 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1588 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1589 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
1590 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1591 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1592 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1593 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1594 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1595 s
->msg_length_counts
: 1},
1596 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1597 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1598 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1599 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1600 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1601 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1602 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1603 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1604 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1605 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1606 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1607 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1608 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1609 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1610 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1611 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1612 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1613 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1614 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1615 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1616 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1617 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1618 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1621 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1625 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1626 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1629 nla_nest_end(skb
, stats
);
1633 nla_nest_cancel(skb
, stats
);
1638 /* Caller should hold appropriate locks to protect the link */
1639 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
1640 struct tipc_link
*link
, int nlflags
)
1644 struct nlattr
*attrs
;
1645 struct nlattr
*prop
;
1646 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1648 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1649 nlflags
, TIPC_NL_LINK_GET
);
1653 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1657 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
1659 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
1660 tipc_cluster_mask(tn
->own_addr
)))
1662 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
1664 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->rcv_nxt
))
1666 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->snd_nxt
))
1669 if (tipc_link_is_up(link
))
1670 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1673 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
1676 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1679 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1681 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
1683 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
1686 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1688 nla_nest_end(msg
->skb
, prop
);
1690 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
1694 nla_nest_end(msg
->skb
, attrs
);
1695 genlmsg_end(msg
->skb
, hdr
);
1700 nla_nest_cancel(msg
->skb
, prop
);
1702 nla_nest_cancel(msg
->skb
, attrs
);
1704 genlmsg_cancel(msg
->skb
, hdr
);