2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 u32 sent_info
; /* used in counting # sent packets */
50 u32 recv_info
; /* used in counting # recv'd packets */
67 u32 link_congs
; /* # port sends blocked by congestion */
70 u32 max_queue_sz
; /* send queue size high water mark */
71 u32 accu_queue_sz
; /* used for send queue size profiling */
72 u32 queue_sz_counts
; /* used for send queue size profiling */
73 u32 msg_length_counts
; /* used for message length profiling */
74 u32 msg_lengths_total
; /* used for message length profiling */
75 u32 msg_length_profile
[7]; /* used for msg. length profiling */
79 * struct tipc_link - TIPC link data structure
80 * @addr: network address of link's peer node
81 * @name: link name character string
82 * @media_addr: media address to use when sending messages over link
84 * @net: pointer to namespace struct
85 * @refcnt: reference counter for permanent references (owner node & timer)
86 * @peer_session: link session # being used by peer end of link
87 * @peer_bearer_id: bearer id used by link's peer endpoint
88 * @bearer_id: local bearer id used by link
89 * @tolerance: minimum link continuity loss needed to reset link [in ms]
90 * @abort_limit: # of unacknowledged continuity probes needed to reset link
91 * @state: current state of link FSM
92 * @peer_caps: bitmap describing capabilities of peer node
93 * @silent_intv_cnt: # of timer intervals without any reception from peer
94 * @proto_msg: template for control messages generated by link
95 * @pmsg: convenience pointer to "proto_msg" field
96 * @priority: current link priority
97 * @net_plane: current link network plane ('A' through 'H')
98 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
99 * @exp_msg_count: # of tunnelled messages expected during link changeover
100 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
101 * @mtu: current maximum packet size for this link
102 * @advertised_mtu: advertised own mtu when link is being established
103 * @transmitq: queue for sent, non-acked messages
104 * @backlogq: queue for messages waiting to be sent
105 * @snt_nxt: next sequence number to use for outbound messages
106 * @last_retransmitted: sequence number of most recently retransmitted message
107 * @stale_count: # of identical retransmit requests made by peer
108 * @ackers: # of peers that needs to ack each packet before it can be released
109 * @acked: # last packet acked by a certain peer. Used for broadcast.
110 * @rcv_nxt: next sequence number to expect for inbound messages
111 * @deferred_queue: deferred queue saved OOS b'cast message received from node
112 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
113 * @inputq: buffer queue for messages to be delivered upwards
114 * @namedq: buffer queue for name table messages to be delivered upwards
115 * @next_out: ptr to first unsent outbound message in queue
116 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
117 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
118 * @reasm_buf: head of partially reassembled inbound message fragments
119 * @bc_rcvr: marks that this is a broadcast receiver link
120 * @stats: collects statistics regarding link activity
124 char name
[TIPC_MAX_LINK_NAME
];
127 /* Management and link supervision data */
138 char if_name
[TIPC_MAX_IF_NAME
];
145 struct sk_buff
*failover_reasm_skb
;
147 /* Max packet negotiation */
152 struct sk_buff_head transmq
;
153 struct sk_buff_head backlogq
;
166 struct sk_buff_head deferdq
;
167 struct sk_buff_head
*inputq
;
168 struct sk_buff_head
*namedq
;
170 /* Congestion handling */
171 struct sk_buff_head wakeupq
;
173 /* Fragmentation/reassembly */
174 struct sk_buff
*reasm_buf
;
179 struct tipc_link
*bc_rcvlink
;
180 struct tipc_link
*bc_sndlink
;
185 struct tipc_stats stats
;
189 * Error message prefixes
191 static const char *link_co_err
= "Link tunneling error, ";
192 static const char *link_rst_msg
= "Resetting link ";
194 /* Send states for broadcast NACKs
197 BC_NACK_SND_CONDITIONAL
,
198 BC_NACK_SND_UNCONDITIONAL
,
199 BC_NACK_SND_SUPPRESS
,
203 * Interval between NACKs when packets arrive out of order
205 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
207 /* Wildcard value for link session numbers. When it is known that
208 * peer endpoint is down, any session number must be accepted.
210 #define ANY_SESSION 0x10000
215 LINK_ESTABLISHED
= 0xe,
216 LINK_ESTABLISHING
= 0xe << 4,
217 LINK_RESET
= 0x1 << 8,
218 LINK_RESETTING
= 0x2 << 12,
219 LINK_PEER_RESET
= 0xd << 16,
220 LINK_FAILINGOVER
= 0xf << 20,
221 LINK_SYNCHING
= 0xc << 24
224 /* Link FSM state checking routines
226 static int link_is_up(struct tipc_link
*l
)
228 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
231 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
232 struct sk_buff_head
*xmitq
);
233 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
234 u16 rcvgap
, int tolerance
, int priority
,
235 struct sk_buff_head
*xmitq
);
236 static void link_print(struct tipc_link
*l
, const char *str
);
237 static void tipc_link_build_nack_msg(struct tipc_link
*l
,
238 struct sk_buff_head
*xmitq
);
239 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
240 struct sk_buff_head
*xmitq
);
241 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 to
);
244 * Simple non-static link routines (i.e. referenced outside this file)
246 bool tipc_link_is_up(struct tipc_link
*l
)
248 return link_is_up(l
);
251 bool tipc_link_peer_is_down(struct tipc_link
*l
)
253 return l
->state
== LINK_PEER_RESET
;
256 bool tipc_link_is_reset(struct tipc_link
*l
)
258 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
261 bool tipc_link_is_establishing(struct tipc_link
*l
)
263 return l
->state
== LINK_ESTABLISHING
;
266 bool tipc_link_is_synching(struct tipc_link
*l
)
268 return l
->state
== LINK_SYNCHING
;
271 bool tipc_link_is_failingover(struct tipc_link
*l
)
273 return l
->state
== LINK_FAILINGOVER
;
276 bool tipc_link_is_blocked(struct tipc_link
*l
)
278 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
281 static bool link_is_bc_sndlink(struct tipc_link
*l
)
283 return !l
->bc_sndlink
;
286 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
288 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
291 int tipc_link_is_active(struct tipc_link
*l
)
296 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
301 u32
tipc_link_id(struct tipc_link
*l
)
303 return l
->peer_bearer_id
<< 16 | l
->bearer_id
;
306 int tipc_link_window(struct tipc_link
*l
)
311 int tipc_link_prio(struct tipc_link
*l
)
316 unsigned long tipc_link_tolerance(struct tipc_link
*l
)
321 struct sk_buff_head
*tipc_link_inputq(struct tipc_link
*l
)
326 char tipc_link_plane(struct tipc_link
*l
)
331 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
332 struct tipc_link
*uc_l
,
333 struct sk_buff_head
*xmitq
)
335 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
338 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
339 snd_l
->state
= LINK_ESTABLISHED
;
340 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
343 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
344 struct tipc_link
*rcv_l
,
345 struct sk_buff_head
*xmitq
)
347 u16 ack
= snd_l
->snd_nxt
- 1;
350 tipc_link_bc_ack_rcv(rcv_l
, ack
, xmitq
);
351 tipc_link_reset(rcv_l
);
352 rcv_l
->state
= LINK_RESET
;
353 if (!snd_l
->ackers
) {
354 tipc_link_reset(snd_l
);
355 snd_l
->state
= LINK_RESET
;
356 __skb_queue_purge(xmitq
);
360 int tipc_link_bc_peers(struct tipc_link
*l
)
365 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
370 int tipc_link_mtu(struct tipc_link
*l
)
375 u16
tipc_link_rcv_nxt(struct tipc_link
*l
)
380 u16
tipc_link_acked(struct tipc_link
*l
)
385 char *tipc_link_name(struct tipc_link
*l
)
391 * tipc_link_create - create a new link
392 * @n: pointer to associated node
393 * @if_name: associated interface name
394 * @bearer_id: id (index) of associated bearer
395 * @tolerance: link tolerance to be used by link
396 * @net_plane: network plane (A,B,c..) this link belongs to
397 * @mtu: mtu to be advertised by link
398 * @priority: priority to be used by link
399 * @window: send window to be used by link
400 * @session: session to be used by link
401 * @ownnode: identity of own node
402 * @peer: node id of peer node
403 * @peer_caps: bitmap describing peer node capabilities
404 * @bc_sndlink: the namespace global link used for broadcast sending
405 * @bc_rcvlink: the peer specific link used for broadcast reception
406 * @inputq: queue to put messages ready for delivery
407 * @namedq: queue to put binding table update messages ready for delivery
408 * @link: return value, pointer to put the created link
410 * Returns true if link was created, otherwise false
412 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
413 int tolerance
, char net_plane
, u32 mtu
, int priority
,
414 int window
, u32 session
, u32 ownnode
, u32 peer
,
416 struct tipc_link
*bc_sndlink
,
417 struct tipc_link
*bc_rcvlink
,
418 struct sk_buff_head
*inputq
,
419 struct sk_buff_head
*namedq
,
420 struct tipc_link
**link
)
424 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
428 l
->session
= session
;
430 /* Note: peer i/f name is completed by reset/activate message */
431 sprintf(l
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
432 tipc_zone(ownnode
), tipc_cluster(ownnode
), tipc_node(ownnode
),
433 if_name
, tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
434 strcpy(l
->if_name
, if_name
);
436 l
->peer_caps
= peer_caps
;
438 l
->peer_session
= ANY_SESSION
;
439 l
->bearer_id
= bearer_id
;
440 l
->tolerance
= tolerance
;
441 l
->net_plane
= net_plane
;
442 l
->advertised_mtu
= mtu
;
444 l
->priority
= priority
;
445 tipc_link_set_queue_limits(l
, window
);
447 l
->bc_sndlink
= bc_sndlink
;
448 l
->bc_rcvlink
= bc_rcvlink
;
451 l
->state
= LINK_RESETTING
;
452 __skb_queue_head_init(&l
->transmq
);
453 __skb_queue_head_init(&l
->backlogq
);
454 __skb_queue_head_init(&l
->deferdq
);
455 skb_queue_head_init(&l
->wakeupq
);
456 skb_queue_head_init(l
->inputq
);
461 * tipc_link_bc_create - create new link to be used for broadcast
462 * @n: pointer to associated node
463 * @mtu: mtu to be used
464 * @window: send window to be used
465 * @inputq: queue to put messages ready for delivery
466 * @namedq: queue to put binding table update messages ready for delivery
467 * @link: return value, pointer to put the created link
469 * Returns true if link was created, otherwise false
471 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
,
472 int mtu
, int window
, u16 peer_caps
,
473 struct sk_buff_head
*inputq
,
474 struct sk_buff_head
*namedq
,
475 struct tipc_link
*bc_sndlink
,
476 struct tipc_link
**link
)
480 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, window
,
481 0, ownnode
, peer
, peer_caps
, bc_sndlink
,
482 NULL
, inputq
, namedq
, link
))
486 strcpy(l
->name
, tipc_bclink_name
);
488 l
->state
= LINK_RESET
;
492 /* Broadcast send link is always up */
493 if (link_is_bc_sndlink(l
))
494 l
->state
= LINK_ESTABLISHED
;
500 * tipc_link_fsm_evt - link finite state machine
501 * @l: pointer to link
502 * @evt: state machine event to be processed
504 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
511 case LINK_PEER_RESET_EVT
:
512 l
->state
= LINK_PEER_RESET
;
515 l
->state
= LINK_RESET
;
517 case LINK_FAILURE_EVT
:
518 case LINK_FAILOVER_BEGIN_EVT
:
519 case LINK_ESTABLISH_EVT
:
520 case LINK_FAILOVER_END_EVT
:
521 case LINK_SYNCH_BEGIN_EVT
:
522 case LINK_SYNCH_END_EVT
:
529 case LINK_PEER_RESET_EVT
:
530 l
->state
= LINK_ESTABLISHING
;
532 case LINK_FAILOVER_BEGIN_EVT
:
533 l
->state
= LINK_FAILINGOVER
;
534 case LINK_FAILURE_EVT
:
536 case LINK_ESTABLISH_EVT
:
537 case LINK_FAILOVER_END_EVT
:
539 case LINK_SYNCH_BEGIN_EVT
:
540 case LINK_SYNCH_END_EVT
:
545 case LINK_PEER_RESET
:
548 l
->state
= LINK_ESTABLISHING
;
550 case LINK_PEER_RESET_EVT
:
551 case LINK_ESTABLISH_EVT
:
552 case LINK_FAILURE_EVT
:
554 case LINK_SYNCH_BEGIN_EVT
:
555 case LINK_SYNCH_END_EVT
:
556 case LINK_FAILOVER_BEGIN_EVT
:
557 case LINK_FAILOVER_END_EVT
:
562 case LINK_FAILINGOVER
:
564 case LINK_FAILOVER_END_EVT
:
565 l
->state
= LINK_RESET
;
567 case LINK_PEER_RESET_EVT
:
569 case LINK_ESTABLISH_EVT
:
570 case LINK_FAILURE_EVT
:
572 case LINK_FAILOVER_BEGIN_EVT
:
573 case LINK_SYNCH_BEGIN_EVT
:
574 case LINK_SYNCH_END_EVT
:
579 case LINK_ESTABLISHING
:
581 case LINK_ESTABLISH_EVT
:
582 l
->state
= LINK_ESTABLISHED
;
584 case LINK_FAILOVER_BEGIN_EVT
:
585 l
->state
= LINK_FAILINGOVER
;
588 l
->state
= LINK_RESET
;
590 case LINK_FAILURE_EVT
:
591 case LINK_PEER_RESET_EVT
:
592 case LINK_SYNCH_BEGIN_EVT
:
593 case LINK_FAILOVER_END_EVT
:
595 case LINK_SYNCH_END_EVT
:
600 case LINK_ESTABLISHED
:
602 case LINK_PEER_RESET_EVT
:
603 l
->state
= LINK_PEER_RESET
;
604 rc
|= TIPC_LINK_DOWN_EVT
;
606 case LINK_FAILURE_EVT
:
607 l
->state
= LINK_RESETTING
;
608 rc
|= TIPC_LINK_DOWN_EVT
;
611 l
->state
= LINK_RESET
;
613 case LINK_ESTABLISH_EVT
:
614 case LINK_SYNCH_END_EVT
:
616 case LINK_SYNCH_BEGIN_EVT
:
617 l
->state
= LINK_SYNCHING
;
619 case LINK_FAILOVER_BEGIN_EVT
:
620 case LINK_FAILOVER_END_EVT
:
627 case LINK_PEER_RESET_EVT
:
628 l
->state
= LINK_PEER_RESET
;
629 rc
|= TIPC_LINK_DOWN_EVT
;
631 case LINK_FAILURE_EVT
:
632 l
->state
= LINK_RESETTING
;
633 rc
|= TIPC_LINK_DOWN_EVT
;
636 l
->state
= LINK_RESET
;
638 case LINK_ESTABLISH_EVT
:
639 case LINK_SYNCH_BEGIN_EVT
:
641 case LINK_SYNCH_END_EVT
:
642 l
->state
= LINK_ESTABLISHED
;
644 case LINK_FAILOVER_BEGIN_EVT
:
645 case LINK_FAILOVER_END_EVT
:
651 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
655 pr_err("Illegal FSM event %x in state %x on link %s\n",
656 evt
, l
->state
, l
->name
);
660 /* link_profile_stats - update statistical profiling of traffic
662 static void link_profile_stats(struct tipc_link
*l
)
665 struct tipc_msg
*msg
;
668 /* Update counters used in statistical profiling of send traffic */
669 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
670 l
->stats
.queue_sz_counts
++;
672 skb
= skb_peek(&l
->transmq
);
676 length
= msg_size(msg
);
678 if (msg_user(msg
) == MSG_FRAGMENTER
) {
679 if (msg_type(msg
) != FIRST_FRAGMENT
)
681 length
= msg_size(msg_get_wrapped(msg
));
683 l
->stats
.msg_lengths_total
+= length
;
684 l
->stats
.msg_length_counts
++;
686 l
->stats
.msg_length_profile
[0]++;
687 else if (length
<= 256)
688 l
->stats
.msg_length_profile
[1]++;
689 else if (length
<= 1024)
690 l
->stats
.msg_length_profile
[2]++;
691 else if (length
<= 4096)
692 l
->stats
.msg_length_profile
[3]++;
693 else if (length
<= 16384)
694 l
->stats
.msg_length_profile
[4]++;
695 else if (length
<= 32768)
696 l
->stats
.msg_length_profile
[5]++;
698 l
->stats
.msg_length_profile
[6]++;
701 /* tipc_link_timeout - perform periodic task as instructed from node timeout
703 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
709 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
710 u16 bc_acked
= l
->bc_rcvlink
->acked
;
712 link_profile_stats(l
);
715 case LINK_ESTABLISHED
:
717 if (l
->silent_intv_cnt
> l
->abort_limit
)
718 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
720 state
= bc_acked
!= bc_snt
;
721 probe
= l
->silent_intv_cnt
;
722 l
->silent_intv_cnt
++;
725 setup
= l
->rst_cnt
++ <= 4;
726 setup
|= !(l
->rst_cnt
% 16);
729 case LINK_ESTABLISHING
:
733 case LINK_PEER_RESET
:
735 case LINK_FAILINGOVER
:
741 if (state
|| probe
|| setup
)
742 tipc_link_build_proto_msg(l
, mtyp
, probe
, 0, 0, 0, xmitq
);
748 * link_schedule_user - schedule a message sender for wakeup after congestion
749 * @link: congested link
750 * @list: message that was attempted sent
751 * Create pseudo msg to send back to user when congestion abates
752 * Does not consume buffer list
754 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
756 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
757 int imp
= msg_importance(msg
);
758 u32 oport
= msg_origport(msg
);
759 u32 addr
= tipc_own_addr(link
->net
);
762 /* This really cannot happen... */
763 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
764 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
767 /* Non-blocking sender: */
768 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
771 /* Create and schedule wakeup pseudo message */
772 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
773 addr
, addr
, oport
, 0, 0);
776 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
777 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
778 skb_queue_tail(&link
->wakeupq
, skb
);
779 link
->stats
.link_congs
++;
784 * link_prepare_wakeup - prepare users for wakeup after congestion
785 * @link: congested link
786 * Move a number of waiting users, as permitted by available space in
787 * the send queue, from link wait queue to node wait queue for wakeup
789 void link_prepare_wakeup(struct tipc_link
*l
)
791 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
793 struct sk_buff
*skb
, *tmp
;
795 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
796 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
797 lim
= l
->window
+ l
->backlog
[imp
].limit
;
798 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
799 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
801 skb_unlink(skb
, &l
->wakeupq
);
802 skb_queue_tail(l
->inputq
, skb
);
806 void tipc_link_reset(struct tipc_link
*l
)
808 l
->peer_session
= ANY_SESSION
;
810 l
->mtu
= l
->advertised_mtu
;
811 __skb_queue_purge(&l
->transmq
);
812 __skb_queue_purge(&l
->deferdq
);
813 skb_queue_splice_init(&l
->wakeupq
, l
->inputq
);
814 __skb_queue_purge(&l
->backlogq
);
815 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
816 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
817 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
818 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
819 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
820 kfree_skb(l
->reasm_buf
);
821 kfree_skb(l
->failover_reasm_skb
);
823 l
->failover_reasm_skb
= NULL
;
828 l
->silent_intv_cnt
= 0;
830 l
->stats
.recv_info
= 0;
832 l
->bc_peer_is_up
= false;
833 tipc_link_reset_stats(l
);
837 * tipc_link_xmit(): enqueue buffer list according to queue situation
839 * @list: chain of buffers containing message
840 * @xmitq: returned list of packets to be sent by caller
842 * Consumes the buffer chain, except when returning -ELINKCONG,
843 * since the caller then may want to make more send attempts.
844 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
845 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
847 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
848 struct sk_buff_head
*xmitq
)
850 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
851 unsigned int maxwin
= l
->window
;
852 unsigned int i
, imp
= msg_importance(hdr
);
853 unsigned int mtu
= l
->mtu
;
854 u16 ack
= l
->rcv_nxt
- 1;
855 u16 seqno
= l
->snd_nxt
;
856 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
857 struct sk_buff_head
*transmq
= &l
->transmq
;
858 struct sk_buff_head
*backlogq
= &l
->backlogq
;
859 struct sk_buff
*skb
, *_skb
, *bskb
;
861 /* Match msg importance against this and all higher backlog limits: */
862 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
863 if (unlikely(l
->backlog
[i
].len
>= l
->backlog
[i
].limit
))
864 return link_schedule_user(l
, list
);
866 if (unlikely(msg_size(hdr
) > mtu
)) {
867 skb_queue_purge(list
);
871 /* Prepare each packet for sending, and add to relevant queue: */
872 while (skb_queue_len(list
)) {
873 skb
= skb_peek(list
);
875 msg_set_seqno(hdr
, seqno
);
876 msg_set_ack(hdr
, ack
);
877 msg_set_bcast_ack(hdr
, bc_ack
);
879 if (likely(skb_queue_len(transmq
) < maxwin
)) {
880 _skb
= skb_clone(skb
, GFP_ATOMIC
);
882 skb_queue_purge(list
);
886 __skb_queue_tail(transmq
, skb
);
887 __skb_queue_tail(xmitq
, _skb
);
888 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
893 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
894 kfree_skb(__skb_dequeue(list
));
895 l
->stats
.sent_bundled
++;
898 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
899 kfree_skb(__skb_dequeue(list
));
900 __skb_queue_tail(backlogq
, bskb
);
901 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
902 l
->stats
.sent_bundled
++;
903 l
->stats
.sent_bundles
++;
906 l
->backlog
[imp
].len
+= skb_queue_len(list
);
907 skb_queue_splice_tail_init(list
, backlogq
);
913 void tipc_link_advance_backlog(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
915 struct sk_buff
*skb
, *_skb
;
916 struct tipc_msg
*hdr
;
917 u16 seqno
= l
->snd_nxt
;
918 u16 ack
= l
->rcv_nxt
- 1;
919 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
921 while (skb_queue_len(&l
->transmq
) < l
->window
) {
922 skb
= skb_peek(&l
->backlogq
);
925 _skb
= skb_clone(skb
, GFP_ATOMIC
);
928 __skb_dequeue(&l
->backlogq
);
930 l
->backlog
[msg_importance(hdr
)].len
--;
931 __skb_queue_tail(&l
->transmq
, skb
);
932 __skb_queue_tail(xmitq
, _skb
);
933 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
934 msg_set_seqno(hdr
, seqno
);
935 msg_set_ack(hdr
, ack
);
936 msg_set_bcast_ack(hdr
, bc_ack
);
943 static void link_retransmit_failure(struct tipc_link
*l
, struct sk_buff
*skb
)
945 struct tipc_msg
*hdr
= buf_msg(skb
);
947 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
948 link_print(l
, "Resetting link ");
949 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
950 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
), msg_errcode(hdr
));
951 pr_info("sqno %u, prev: %x, src: %x\n",
952 msg_seqno(hdr
), msg_prevnode(hdr
), msg_orignode(hdr
));
955 int tipc_link_retrans(struct tipc_link
*l
, u16 from
, u16 to
,
956 struct sk_buff_head
*xmitq
)
958 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
959 struct tipc_msg
*hdr
;
960 u16 ack
= l
->rcv_nxt
- 1;
961 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
966 /* Detect repeated retransmit failures on same packet */
967 if (likely(l
->last_retransm
!= buf_seqno(skb
))) {
968 l
->last_retransm
= buf_seqno(skb
);
970 } else if (++l
->stale_count
> 100) {
971 link_retransmit_failure(l
, skb
);
972 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
975 /* Move forward to where retransmission should start */
976 skb_queue_walk(&l
->transmq
, skb
) {
977 if (!less(buf_seqno(skb
), from
))
981 skb_queue_walk_from(&l
->transmq
, skb
) {
982 if (more(buf_seqno(skb
), to
))
985 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
989 msg_set_ack(hdr
, ack
);
990 msg_set_bcast_ack(hdr
, bc_ack
);
991 _skb
->priority
= TC_PRIO_CONTROL
;
992 __skb_queue_tail(xmitq
, _skb
);
993 l
->stats
.retransmitted
++;
998 /* tipc_data_input - deliver data and name distr msgs to upper layer
1000 * Consumes buffer if message is of right type
1001 * Node lock must be held
1003 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1004 struct sk_buff_head
*inputq
)
1006 switch (msg_user(buf_msg(skb
))) {
1007 case TIPC_LOW_IMPORTANCE
:
1008 case TIPC_MEDIUM_IMPORTANCE
:
1009 case TIPC_HIGH_IMPORTANCE
:
1010 case TIPC_CRITICAL_IMPORTANCE
:
1012 skb_queue_tail(inputq
, skb
);
1014 case NAME_DISTRIBUTOR
:
1015 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
1016 skb_queue_tail(l
->namedq
, skb
);
1019 case TUNNEL_PROTOCOL
:
1020 case MSG_FRAGMENTER
:
1021 case BCAST_PROTOCOL
:
1024 pr_warn("Dropping received illegal msg type\n");
1030 /* tipc_link_input - process packet that has passed link protocol check
1034 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1035 struct sk_buff_head
*inputq
)
1037 struct tipc_msg
*hdr
= buf_msg(skb
);
1038 struct sk_buff
**reasm_skb
= &l
->reasm_buf
;
1039 struct sk_buff
*iskb
;
1040 struct sk_buff_head tmpq
;
1041 int usr
= msg_user(hdr
);
1046 if (unlikely(usr
== TUNNEL_PROTOCOL
)) {
1047 if (msg_type(hdr
) == SYNCH_MSG
) {
1048 __skb_queue_purge(&l
->deferdq
);
1051 if (!tipc_msg_extract(skb
, &iskb
, &ipos
))
1056 if (less(msg_seqno(hdr
), l
->drop_point
))
1058 if (tipc_data_input(l
, skb
, inputq
))
1060 usr
= msg_user(hdr
);
1061 reasm_skb
= &l
->failover_reasm_skb
;
1064 if (usr
== MSG_BUNDLER
) {
1065 skb_queue_head_init(&tmpq
);
1066 l
->stats
.recv_bundles
++;
1067 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1068 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1069 tipc_data_input(l
, iskb
, &tmpq
);
1070 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
1072 } else if (usr
== MSG_FRAGMENTER
) {
1073 l
->stats
.recv_fragments
++;
1074 if (tipc_buf_append(reasm_skb
, &skb
)) {
1075 l
->stats
.recv_fragmented
++;
1076 tipc_data_input(l
, skb
, inputq
);
1077 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
1078 pr_warn_ratelimited("Unable to build fragment list\n");
1079 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1082 } else if (usr
== BCAST_PROTOCOL
) {
1083 tipc_bcast_lock(l
->net
);
1084 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
1085 tipc_bcast_unlock(l
->net
);
1092 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
1094 bool released
= false;
1095 struct sk_buff
*skb
, *tmp
;
1097 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1098 if (more(buf_seqno(skb
), acked
))
1100 __skb_unlink(skb
, &l
->transmq
);
1107 /* tipc_link_build_state_msg: prepare link state message for transmission
1109 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1110 * risk of ack storms towards the sender
1112 int tipc_link_build_state_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1117 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1118 if (link_is_bc_rcvlink(l
)) {
1119 if (((l
->rcv_nxt
^ tipc_own_addr(l
->net
)) & 0xf) != 0xf)
1122 return TIPC_LINK_SND_BC_ACK
;
1127 l
->stats
.sent_acks
++;
1128 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
1132 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1134 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1136 int mtyp
= RESET_MSG
;
1137 struct sk_buff
*skb
;
1139 if (l
->state
== LINK_ESTABLISHING
)
1140 mtyp
= ACTIVATE_MSG
;
1142 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, xmitq
);
1144 /* Inform peer that this endpoint is going down if applicable */
1145 skb
= skb_peek_tail(xmitq
);
1146 if (skb
&& (l
->state
== LINK_RESET
))
1147 msg_set_peer_stopping(buf_msg(skb
), 1);
1150 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1152 static void tipc_link_build_nack_msg(struct tipc_link
*l
,
1153 struct sk_buff_head
*xmitq
)
1155 u32 def_cnt
= ++l
->stats
.deferred_recv
;
1157 if (link_is_bc_rcvlink(l
))
1160 if ((skb_queue_len(&l
->deferdq
) == 1) || !(def_cnt
% TIPC_NACK_INTV
))
1161 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
1164 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1165 * @l: the link that should handle the message
1167 * @xmitq: queue to place packets to be sent after this call
1169 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1170 struct sk_buff_head
*xmitq
)
1172 struct sk_buff_head
*defq
= &l
->deferdq
;
1173 struct tipc_msg
*hdr
;
1174 u16 seqno
, rcv_nxt
, win_lim
;
1179 seqno
= msg_seqno(hdr
);
1180 rcv_nxt
= l
->rcv_nxt
;
1181 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1183 /* Verify and update link state */
1184 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1185 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1187 if (unlikely(!link_is_up(l
))) {
1188 if (l
->state
== LINK_ESTABLISHING
)
1189 rc
= TIPC_LINK_UP_EVT
;
1193 /* Don't send probe at next timeout expiration */
1194 l
->silent_intv_cnt
= 0;
1196 /* Drop if outside receive window */
1197 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1198 l
->stats
.duplicates
++;
1202 /* Forward queues and wake up waiting users */
1203 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1204 tipc_link_advance_backlog(l
, xmitq
);
1205 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1206 link_prepare_wakeup(l
);
1209 /* Defer delivery if sequence gap */
1210 if (unlikely(seqno
!= rcv_nxt
)) {
1211 __tipc_skb_queue_sorted(defq
, seqno
, skb
);
1212 tipc_link_build_nack_msg(l
, xmitq
);
1216 /* Deliver packet */
1218 l
->stats
.recv_info
++;
1219 if (!tipc_data_input(l
, skb
, l
->inputq
))
1220 rc
|= tipc_link_input(l
, skb
, l
->inputq
);
1221 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1222 rc
|= tipc_link_build_state_msg(l
, xmitq
);
1223 if (unlikely(rc
& ~TIPC_LINK_SND_BC_ACK
))
1225 } while ((skb
= __skb_dequeue(defq
)));
1233 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1234 u16 rcvgap
, int tolerance
, int priority
,
1235 struct sk_buff_head
*xmitq
)
1237 struct sk_buff
*skb
;
1238 struct tipc_msg
*hdr
;
1239 struct sk_buff_head
*dfq
= &l
->deferdq
;
1240 bool node_up
= link_is_up(l
->bc_rcvlink
);
1242 /* Don't send protocol message during reset or link failover */
1243 if (tipc_link_is_blocked(l
))
1246 if (!tipc_link_is_up(l
) && (mtyp
== STATE_MSG
))
1249 if (!skb_queue_empty(dfq
))
1250 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1252 skb
= tipc_msg_create(LINK_PROTOCOL
, mtyp
, INT_H_SIZE
,
1253 TIPC_MAX_IF_NAME
, l
->addr
,
1254 tipc_own_addr(l
->net
), 0, 0, 0);
1259 msg_set_session(hdr
, l
->session
);
1260 msg_set_bearer_id(hdr
, l
->bearer_id
);
1261 msg_set_net_plane(hdr
, l
->net_plane
);
1262 msg_set_next_sent(hdr
, l
->snd_nxt
);
1263 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1264 msg_set_bcast_ack(hdr
, l
->bc_rcvlink
->rcv_nxt
- 1);
1265 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1266 msg_set_link_tolerance(hdr
, tolerance
);
1267 msg_set_linkprio(hdr
, priority
);
1268 msg_set_redundant_link(hdr
, node_up
);
1269 msg_set_seq_gap(hdr
, 0);
1270 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1272 if (mtyp
== STATE_MSG
) {
1273 msg_set_seq_gap(hdr
, rcvgap
);
1274 msg_set_size(hdr
, INT_H_SIZE
);
1275 msg_set_probe(hdr
, probe
);
1276 l
->stats
.sent_states
++;
1279 /* RESET_MSG or ACTIVATE_MSG */
1280 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1281 strcpy(msg_data(hdr
), l
->if_name
);
1284 l
->stats
.sent_probes
++;
1286 l
->stats
.sent_nacks
++;
1287 skb
->priority
= TC_PRIO_CONTROL
;
1288 __skb_queue_tail(xmitq
, skb
);
1291 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1292 * with contents of the link's transmit and backlog queues.
1294 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1295 int mtyp
, struct sk_buff_head
*xmitq
)
1297 struct sk_buff
*skb
, *tnlskb
;
1298 struct tipc_msg
*hdr
, tnlhdr
;
1299 struct sk_buff_head
*queue
= &l
->transmq
;
1300 struct sk_buff_head tmpxq
, tnlq
;
1301 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1306 skb_queue_head_init(&tnlq
);
1307 skb_queue_head_init(&tmpxq
);
1309 /* At least one packet required for safe algorithm => add dummy */
1310 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1311 BASIC_H_SIZE
, 0, l
->addr
, tipc_own_addr(l
->net
),
1312 0, 0, TIPC_ERR_NO_PORT
);
1314 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1317 skb_queue_tail(&tnlq
, skb
);
1318 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1319 __skb_queue_purge(&tmpxq
);
1321 /* Initialize reusable tunnel packet header */
1322 tipc_msg_init(tipc_own_addr(l
->net
), &tnlhdr
, TUNNEL_PROTOCOL
,
1323 mtyp
, INT_H_SIZE
, l
->addr
);
1324 pktcnt
= skb_queue_len(&l
->transmq
) + skb_queue_len(&l
->backlogq
);
1325 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1326 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1328 /* Wrap each packet into a tunnel packet */
1329 skb_queue_walk(queue
, skb
) {
1331 if (queue
== &l
->backlogq
)
1332 msg_set_seqno(hdr
, seqno
++);
1333 pktlen
= msg_size(hdr
);
1334 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1335 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
);
1337 pr_warn("%sunable to send packet\n", link_co_err
);
1340 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1341 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1342 __skb_queue_tail(&tnlq
, tnlskb
);
1344 if (queue
!= &l
->backlogq
) {
1345 queue
= &l
->backlogq
;
1349 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1351 if (mtyp
== FAILOVER_MSG
) {
1352 tnl
->drop_point
= l
->rcv_nxt
;
1353 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1354 l
->reasm_buf
= NULL
;
1358 /* tipc_link_proto_rcv(): receive link level protocol message :
1359 * Note that network plane id propagates through the network, and may
1360 * change at any time. The node with lowest numerical id determines
1363 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1364 struct sk_buff_head
*xmitq
)
1366 struct tipc_msg
*hdr
= buf_msg(skb
);
1368 u16 ack
= msg_ack(hdr
);
1369 u16 gap
= msg_seq_gap(hdr
);
1370 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1371 u16 peers_tol
= msg_link_tolerance(hdr
);
1372 u16 peers_prio
= msg_linkprio(hdr
);
1373 u16 rcv_nxt
= l
->rcv_nxt
;
1374 int mtyp
= msg_type(hdr
);
1378 if (tipc_link_is_blocked(l
) || !xmitq
)
1381 if (tipc_own_addr(l
->net
) > msg_prevnode(hdr
))
1382 l
->net_plane
= msg_net_plane(hdr
);
1387 /* Ignore duplicate RESET with old session number */
1388 if ((less_eq(msg_session(hdr
), l
->peer_session
)) &&
1389 (l
->peer_session
!= ANY_SESSION
))
1397 /* Complete own link name with peer's interface name */
1398 if_name
= strrchr(l
->name
, ':') + 1;
1399 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1401 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1403 strncpy(if_name
, msg_data(hdr
), TIPC_MAX_IF_NAME
);
1405 /* Update own tolerance if peer indicates a non-zero value */
1406 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1407 l
->tolerance
= peers_tol
;
1409 /* Update own priority if peer's priority is higher */
1410 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1411 l
->priority
= peers_prio
;
1413 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1414 if (msg_peer_stopping(hdr
))
1415 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1416 else if ((mtyp
== RESET_MSG
) || !link_is_up(l
))
1417 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1419 /* ACTIVATE_MSG takes up link if it was already locally reset */
1420 if ((mtyp
== ACTIVATE_MSG
) && (l
->state
== LINK_ESTABLISHING
))
1421 rc
= TIPC_LINK_UP_EVT
;
1423 l
->peer_session
= msg_session(hdr
);
1424 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1425 if (l
->mtu
> msg_max_pkt(hdr
))
1426 l
->mtu
= msg_max_pkt(hdr
);
1431 /* Update own tolerance if peer indicates a non-zero value */
1432 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1433 l
->tolerance
= peers_tol
;
1435 if (peers_prio
&& in_range(peers_prio
, TIPC_MIN_LINK_PRI
,
1436 TIPC_MAX_LINK_PRI
)) {
1437 l
->priority
= peers_prio
;
1438 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1441 l
->silent_intv_cnt
= 0;
1442 l
->stats
.recv_states
++;
1444 l
->stats
.recv_probes
++;
1446 if (!link_is_up(l
)) {
1447 if (l
->state
== LINK_ESTABLISHING
)
1448 rc
= TIPC_LINK_UP_EVT
;
1452 /* Send NACK if peer has sent pkts we haven't received yet */
1453 if (more(peers_snd_nxt
, rcv_nxt
) && !tipc_link_is_synching(l
))
1454 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1455 if (rcvgap
|| (msg_probe(hdr
)))
1456 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, rcvgap
,
1458 tipc_link_release_pkts(l
, ack
);
1460 /* If NACK, retransmit will now start at right position */
1462 rc
= tipc_link_retrans(l
, ack
+ 1, ack
+ gap
, xmitq
);
1463 l
->stats
.recv_nacks
++;
1466 tipc_link_advance_backlog(l
, xmitq
);
1467 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1468 link_prepare_wakeup(l
);
1475 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1477 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
1479 struct sk_buff_head
*xmitq
)
1481 struct sk_buff
*skb
;
1482 struct tipc_msg
*hdr
;
1483 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
1484 u16 ack
= l
->rcv_nxt
- 1;
1485 u16 gap_to
= peers_snd_nxt
- 1;
1487 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
1488 0, l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1492 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1493 msg_set_bcast_ack(hdr
, ack
);
1494 msg_set_bcgap_after(hdr
, ack
);
1496 gap_to
= buf_seqno(dfrd_skb
) - 1;
1497 msg_set_bcgap_to(hdr
, gap_to
);
1498 msg_set_non_seq(hdr
, bcast
);
1499 __skb_queue_tail(xmitq
, skb
);
1503 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1505 * Give a newly added peer node the sequence number where it should
1506 * start receiving and acking broadcast packets.
1508 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
1509 struct sk_buff_head
*xmitq
)
1511 struct sk_buff_head list
;
1513 __skb_queue_head_init(&list
);
1514 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
1516 tipc_link_xmit(l
, &list
, xmitq
);
1519 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1521 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1523 int mtyp
= msg_type(hdr
);
1524 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1529 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
1530 l
->rcv_nxt
= peers_snd_nxt
;
1531 l
->state
= LINK_ESTABLISHED
;
1535 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
1538 if (msg_peer_node_is_up(hdr
))
1541 /* Compatibility: accept older, less safe initial synch data */
1542 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
1543 l
->rcv_nxt
= peers_snd_nxt
;
1546 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1548 void tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
1549 struct sk_buff_head
*xmitq
)
1551 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1556 if (!msg_peer_node_is_up(hdr
))
1559 l
->bc_peer_is_up
= true;
1561 /* Ignore if peers_snd_nxt goes beyond receive window */
1562 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
1565 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
1566 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
1570 /* Don't NACK if one was recently sent or peeked */
1571 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
1572 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1576 /* Conditionally delay NACK sending until next synch rcv */
1577 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
1578 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1579 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
1583 /* Send NACK now but suppress next one */
1584 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
1585 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1588 void tipc_link_bc_ack_rcv(struct tipc_link
*l
, u16 acked
,
1589 struct sk_buff_head
*xmitq
)
1591 struct sk_buff
*skb
, *tmp
;
1592 struct tipc_link
*snd_l
= l
->bc_sndlink
;
1594 if (!link_is_up(l
) || !l
->bc_peer_is_up
)
1597 if (!more(acked
, l
->acked
))
1600 /* Skip over packets peer has already acked */
1601 skb_queue_walk(&snd_l
->transmq
, skb
) {
1602 if (more(buf_seqno(skb
), l
->acked
))
1606 /* Update/release the packets peer is acking now */
1607 skb_queue_walk_from_safe(&snd_l
->transmq
, skb
, tmp
) {
1608 if (more(buf_seqno(skb
), acked
))
1610 if (!--TIPC_SKB_CB(skb
)->ackers
) {
1611 __skb_unlink(skb
, &snd_l
->transmq
);
1616 tipc_link_advance_backlog(snd_l
, xmitq
);
1617 if (unlikely(!skb_queue_empty(&snd_l
->wakeupq
)))
1618 link_prepare_wakeup(snd_l
);
1621 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1623 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1624 struct sk_buff_head
*xmitq
)
1626 struct tipc_msg
*hdr
= buf_msg(skb
);
1627 u32 dnode
= msg_destnode(hdr
);
1628 int mtyp
= msg_type(hdr
);
1629 u16 acked
= msg_bcast_ack(hdr
);
1630 u16 from
= acked
+ 1;
1631 u16 to
= msg_bcgap_to(hdr
);
1632 u16 peers_snd_nxt
= to
+ 1;
1637 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
1640 if (mtyp
!= STATE_MSG
)
1643 if (dnode
== tipc_own_addr(l
->net
)) {
1644 tipc_link_bc_ack_rcv(l
, acked
, xmitq
);
1645 rc
= tipc_link_retrans(l
->bc_sndlink
, from
, to
, xmitq
);
1646 l
->stats
.recv_nacks
++;
1650 /* Msg for other node => suppress own NACK at next sync if applicable */
1651 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
1652 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1657 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1659 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->mtu
/ ITEM_SIZE
);
1662 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= win
/ 2;
1663 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= win
;
1664 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= win
/ 2 * 3;
1665 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= win
* 2;
1666 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1670 * link_reset_stats - reset link statistics
1671 * @l: pointer to link
1673 void tipc_link_reset_stats(struct tipc_link
*l
)
1675 memset(&l
->stats
, 0, sizeof(l
->stats
));
1676 if (!link_is_bc_sndlink(l
)) {
1677 l
->stats
.sent_info
= l
->snd_nxt
;
1678 l
->stats
.recv_info
= l
->rcv_nxt
;
1682 static void link_print(struct tipc_link
*l
, const char *str
)
1684 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
1685 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
1686 u16 tail
= l
->snd_nxt
- 1;
1688 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
1689 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1690 skb_queue_len(&l
->transmq
), head
, tail
,
1691 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
1694 /* Parse and validate nested (link) properties valid for media, bearer and link
1696 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1700 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1701 tipc_nl_prop_policy
);
1705 if (props
[TIPC_NLA_PROP_PRIO
]) {
1708 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1709 if (prio
> TIPC_MAX_LINK_PRI
)
1713 if (props
[TIPC_NLA_PROP_TOL
]) {
1716 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1717 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1721 if (props
[TIPC_NLA_PROP_WIN
]) {
1724 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1725 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1732 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1735 struct nlattr
*stats
;
1742 struct nla_map map
[] = {
1743 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
1744 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1745 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1746 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1747 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1748 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
1749 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1750 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1751 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1752 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1753 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1754 s
->msg_length_counts
: 1},
1755 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1756 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1757 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1758 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1759 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1760 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1761 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1762 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1763 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1764 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1765 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1766 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1767 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1768 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1769 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1770 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1771 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1772 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1773 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1774 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1775 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1776 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1777 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1780 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1784 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1785 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1788 nla_nest_end(skb
, stats
);
1792 nla_nest_cancel(skb
, stats
);
1797 /* Caller should hold appropriate locks to protect the link */
1798 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
1799 struct tipc_link
*link
, int nlflags
)
1803 struct nlattr
*attrs
;
1804 struct nlattr
*prop
;
1805 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1807 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1808 nlflags
, TIPC_NL_LINK_GET
);
1812 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1816 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
1818 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
1819 tipc_cluster_mask(tn
->own_addr
)))
1821 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
1823 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->rcv_nxt
))
1825 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->snd_nxt
))
1828 if (tipc_link_is_up(link
))
1829 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1832 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
1835 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1838 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1840 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
1842 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
1845 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1847 nla_nest_end(msg
->skb
, prop
);
1849 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
1853 nla_nest_end(msg
->skb
, attrs
);
1854 genlmsg_end(msg
->skb
, hdr
);
1859 nla_nest_cancel(msg
->skb
, prop
);
1861 nla_nest_cancel(msg
->skb
, attrs
);
1863 genlmsg_cancel(msg
->skb
, hdr
);
1868 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
1869 struct tipc_stats
*stats
)
1872 struct nlattr
*nest
;
1879 struct nla_map map
[] = {
1880 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_info
},
1881 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
1882 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
1883 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
1884 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
1885 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_info
},
1886 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
1887 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
1888 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
1889 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
1890 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
1891 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
1892 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
1893 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
1894 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
1895 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
1896 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
1897 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
1898 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
1899 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
1902 nest
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1906 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1907 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1910 nla_nest_end(skb
, nest
);
1914 nla_nest_cancel(skb
, nest
);
1919 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
1923 struct nlattr
*attrs
;
1924 struct nlattr
*prop
;
1925 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1926 struct tipc_link
*bcl
= tn
->bcl
;
1931 tipc_bcast_lock(net
);
1933 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1934 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
1936 tipc_bcast_unlock(net
);
1940 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1944 /* The broadcast link is always up */
1945 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1948 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
1950 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
1952 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, bcl
->rcv_nxt
))
1954 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, bcl
->snd_nxt
))
1957 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1960 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->window
))
1962 nla_nest_end(msg
->skb
, prop
);
1964 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
1968 tipc_bcast_unlock(net
);
1969 nla_nest_end(msg
->skb
, attrs
);
1970 genlmsg_end(msg
->skb
, hdr
);
1975 nla_nest_cancel(msg
->skb
, prop
);
1977 nla_nest_cancel(msg
->skb
, attrs
);
1979 tipc_bcast_unlock(net
);
1980 genlmsg_cancel(msg
->skb
, hdr
);
1985 void tipc_link_set_tolerance(struct tipc_link
*l
, u32 tol
,
1986 struct sk_buff_head
*xmitq
)
1989 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, tol
, 0, xmitq
);
1992 void tipc_link_set_prio(struct tipc_link
*l
, u32 prio
,
1993 struct sk_buff_head
*xmitq
)
1996 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, prio
, xmitq
);
1999 void tipc_link_set_abort_limit(struct tipc_link
*l
, u32 limit
)
2001 l
->abort_limit
= limit
;