2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err
= "Link tunneling error, ";
52 static const char *link_rst_msg
= "Resetting link ";
54 static const struct nla_policy tipc_nl_link_policy
[TIPC_NLA_LINK_MAX
+ 1] = {
55 [TIPC_NLA_LINK_UNSPEC
] = { .type
= NLA_UNSPEC
},
56 [TIPC_NLA_LINK_NAME
] = {
58 .len
= TIPC_MAX_LINK_NAME
60 [TIPC_NLA_LINK_MTU
] = { .type
= NLA_U32
},
61 [TIPC_NLA_LINK_BROADCAST
] = { .type
= NLA_FLAG
},
62 [TIPC_NLA_LINK_UP
] = { .type
= NLA_FLAG
},
63 [TIPC_NLA_LINK_ACTIVE
] = { .type
= NLA_FLAG
},
64 [TIPC_NLA_LINK_PROP
] = { .type
= NLA_NESTED
},
65 [TIPC_NLA_LINK_STATS
] = { .type
= NLA_NESTED
},
66 [TIPC_NLA_LINK_RX
] = { .type
= NLA_U32
},
67 [TIPC_NLA_LINK_TX
] = { .type
= NLA_U32
}
70 /* Properties valid for media, bearar and link */
71 static const struct nla_policy tipc_nl_prop_policy
[TIPC_NLA_PROP_MAX
+ 1] = {
72 [TIPC_NLA_PROP_UNSPEC
] = { .type
= NLA_UNSPEC
},
73 [TIPC_NLA_PROP_PRIO
] = { .type
= NLA_U32
},
74 [TIPC_NLA_PROP_TOL
] = { .type
= NLA_U32
},
75 [TIPC_NLA_PROP_WIN
] = { .type
= NLA_U32
}
79 * Interval between NACKs when packets arrive out of order
81 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
83 * Out-of-range value for link session numbers
85 #define WILDCARD_SESSION 0x10000
90 LINK_ESTABLISHED
= 0xe,
91 LINK_ESTABLISHING
= 0xe << 4,
92 LINK_RESET
= 0x1 << 8,
93 LINK_RESETTING
= 0x2 << 12,
94 LINK_PEER_RESET
= 0xd << 16,
95 LINK_FAILINGOVER
= 0xf << 20,
96 LINK_SYNCHING
= 0xc << 24
99 /* Link FSM state checking routines
101 static int link_is_up(struct tipc_link
*l
)
103 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
106 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
107 struct sk_buff_head
*xmitq
);
108 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
109 u16 rcvgap
, int tolerance
, int priority
,
110 struct sk_buff_head
*xmitq
);
111 static void link_reset_statistics(struct tipc_link
*l_ptr
);
112 static void link_print(struct tipc_link
*l_ptr
, const char *str
);
113 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
);
116 * Simple non-static link routines (i.e. referenced outside this file)
118 bool tipc_link_is_up(struct tipc_link
*l
)
120 return link_is_up(l
);
123 bool tipc_link_is_reset(struct tipc_link
*l
)
125 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
128 bool tipc_link_is_synching(struct tipc_link
*l
)
130 return l
->state
== LINK_SYNCHING
;
133 bool tipc_link_is_failingover(struct tipc_link
*l
)
135 return l
->state
== LINK_FAILINGOVER
;
138 bool tipc_link_is_blocked(struct tipc_link
*l
)
140 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
143 int tipc_link_is_active(struct tipc_link
*l
)
145 struct tipc_node
*n
= l
->owner
;
147 return (node_active_link(n
, 0) == l
) || (node_active_link(n
, 1) == l
);
150 static u32
link_own_addr(struct tipc_link
*l
)
152 return msg_prevnode(l
->pmsg
);
156 * tipc_link_create - create a new link
157 * @n: pointer to associated node
158 * @b: pointer to associated bearer
159 * @ownnode: identity of own node
160 * @peer: identity of peer node
161 * @maddr: media address to be used
162 * @inputq: queue to put messages ready for delivery
163 * @namedq: queue to put binding table update messages ready for delivery
164 * @link: return value, pointer to put the created link
166 * Returns true if link was created, otherwise false
168 bool tipc_link_create(struct tipc_node
*n
, struct tipc_bearer
*b
, u32 session
,
169 u32 ownnode
, u32 peer
, struct tipc_media_addr
*maddr
,
170 struct sk_buff_head
*inputq
, struct sk_buff_head
*namedq
,
171 struct tipc_link
**link
)
174 struct tipc_msg
*hdr
;
177 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
182 /* Note: peer i/f name is completed by reset/activate message */
183 if_name
= strchr(b
->name
, ':') + 1;
184 sprintf(l
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
185 tipc_zone(ownnode
), tipc_cluster(ownnode
), tipc_node(ownnode
),
186 if_name
, tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
189 l
->media_addr
= maddr
;
191 l
->peer_session
= WILDCARD_SESSION
;
192 l
->bearer_id
= b
->identity
;
193 l
->tolerance
= b
->tolerance
;
194 l
->net_plane
= b
->net_plane
;
195 l
->advertised_mtu
= b
->mtu
;
197 l
->priority
= b
->priority
;
198 tipc_link_set_queue_limits(l
, b
->window
);
201 l
->state
= LINK_RESETTING
;
202 l
->pmsg
= (struct tipc_msg
*)&l
->proto_msg
;
204 tipc_msg_init(ownnode
, hdr
, LINK_PROTOCOL
, RESET_MSG
, INT_H_SIZE
, peer
);
205 msg_set_size(hdr
, sizeof(l
->proto_msg
));
206 msg_set_session(hdr
, session
);
207 msg_set_bearer_id(hdr
, l
->bearer_id
);
208 strcpy((char *)msg_data(hdr
), if_name
);
209 __skb_queue_head_init(&l
->transmq
);
210 __skb_queue_head_init(&l
->backlogq
);
211 __skb_queue_head_init(&l
->deferdq
);
212 skb_queue_head_init(&l
->wakeupq
);
213 skb_queue_head_init(l
->inputq
);
217 /* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
219 * Give a newly added peer node the sequence number where it should
220 * start receiving and acking broadcast packets.
222 void tipc_link_build_bcast_sync_msg(struct tipc_link
*l
,
223 struct sk_buff_head
*xmitq
)
226 struct sk_buff_head list
;
229 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
230 0, l
->addr
, link_own_addr(l
), 0, 0, 0);
233 last_sent
= tipc_bclink_get_last_sent(l
->owner
->net
);
234 msg_set_last_bcast(buf_msg(skb
), last_sent
);
235 __skb_queue_head_init(&list
);
236 __skb_queue_tail(&list
, skb
);
237 tipc_link_xmit(l
, &list
, xmitq
);
241 * tipc_link_fsm_evt - link finite state machine
242 * @l: pointer to link
243 * @evt: state machine event to be processed
245 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
252 case LINK_PEER_RESET_EVT
:
253 l
->state
= LINK_PEER_RESET
;
256 l
->state
= LINK_RESET
;
258 case LINK_FAILURE_EVT
:
259 case LINK_FAILOVER_BEGIN_EVT
:
260 case LINK_ESTABLISH_EVT
:
261 case LINK_FAILOVER_END_EVT
:
262 case LINK_SYNCH_BEGIN_EVT
:
263 case LINK_SYNCH_END_EVT
:
270 case LINK_PEER_RESET_EVT
:
271 l
->state
= LINK_ESTABLISHING
;
273 case LINK_FAILOVER_BEGIN_EVT
:
274 l
->state
= LINK_FAILINGOVER
;
275 case LINK_FAILURE_EVT
:
277 case LINK_ESTABLISH_EVT
:
278 case LINK_FAILOVER_END_EVT
:
280 case LINK_SYNCH_BEGIN_EVT
:
281 case LINK_SYNCH_END_EVT
:
286 case LINK_PEER_RESET
:
289 l
->state
= LINK_ESTABLISHING
;
291 case LINK_PEER_RESET_EVT
:
292 case LINK_ESTABLISH_EVT
:
293 case LINK_FAILURE_EVT
:
295 case LINK_SYNCH_BEGIN_EVT
:
296 case LINK_SYNCH_END_EVT
:
297 case LINK_FAILOVER_BEGIN_EVT
:
298 case LINK_FAILOVER_END_EVT
:
303 case LINK_FAILINGOVER
:
305 case LINK_FAILOVER_END_EVT
:
306 l
->state
= LINK_RESET
;
308 case LINK_PEER_RESET_EVT
:
310 case LINK_ESTABLISH_EVT
:
311 case LINK_FAILURE_EVT
:
313 case LINK_FAILOVER_BEGIN_EVT
:
314 case LINK_SYNCH_BEGIN_EVT
:
315 case LINK_SYNCH_END_EVT
:
320 case LINK_ESTABLISHING
:
322 case LINK_ESTABLISH_EVT
:
323 l
->state
= LINK_ESTABLISHED
;
324 rc
|= TIPC_LINK_UP_EVT
;
326 case LINK_FAILOVER_BEGIN_EVT
:
327 l
->state
= LINK_FAILINGOVER
;
329 case LINK_PEER_RESET_EVT
:
331 case LINK_FAILURE_EVT
:
332 case LINK_SYNCH_BEGIN_EVT
:
333 case LINK_FAILOVER_END_EVT
:
335 case LINK_SYNCH_END_EVT
:
340 case LINK_ESTABLISHED
:
342 case LINK_PEER_RESET_EVT
:
343 l
->state
= LINK_PEER_RESET
;
344 rc
|= TIPC_LINK_DOWN_EVT
;
346 case LINK_FAILURE_EVT
:
347 l
->state
= LINK_RESETTING
;
348 rc
|= TIPC_LINK_DOWN_EVT
;
351 l
->state
= LINK_RESET
;
353 case LINK_ESTABLISH_EVT
:
354 case LINK_SYNCH_END_EVT
:
356 case LINK_SYNCH_BEGIN_EVT
:
357 l
->state
= LINK_SYNCHING
;
359 case LINK_FAILOVER_BEGIN_EVT
:
360 case LINK_FAILOVER_END_EVT
:
367 case LINK_PEER_RESET_EVT
:
368 l
->state
= LINK_PEER_RESET
;
369 rc
|= TIPC_LINK_DOWN_EVT
;
371 case LINK_FAILURE_EVT
:
372 l
->state
= LINK_RESETTING
;
373 rc
|= TIPC_LINK_DOWN_EVT
;
376 l
->state
= LINK_RESET
;
378 case LINK_ESTABLISH_EVT
:
379 case LINK_SYNCH_BEGIN_EVT
:
381 case LINK_SYNCH_END_EVT
:
382 l
->state
= LINK_ESTABLISHED
;
384 case LINK_FAILOVER_BEGIN_EVT
:
385 case LINK_FAILOVER_END_EVT
:
391 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
395 pr_err("Illegal FSM event %x in state %x on link %s\n",
396 evt
, l
->state
, l
->name
);
400 /* link_profile_stats - update statistical profiling of traffic
402 static void link_profile_stats(struct tipc_link
*l
)
405 struct tipc_msg
*msg
;
408 /* Update counters used in statistical profiling of send traffic */
409 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
410 l
->stats
.queue_sz_counts
++;
412 skb
= skb_peek(&l
->transmq
);
416 length
= msg_size(msg
);
418 if (msg_user(msg
) == MSG_FRAGMENTER
) {
419 if (msg_type(msg
) != FIRST_FRAGMENT
)
421 length
= msg_size(msg_get_wrapped(msg
));
423 l
->stats
.msg_lengths_total
+= length
;
424 l
->stats
.msg_length_counts
++;
426 l
->stats
.msg_length_profile
[0]++;
427 else if (length
<= 256)
428 l
->stats
.msg_length_profile
[1]++;
429 else if (length
<= 1024)
430 l
->stats
.msg_length_profile
[2]++;
431 else if (length
<= 4096)
432 l
->stats
.msg_length_profile
[3]++;
433 else if (length
<= 16384)
434 l
->stats
.msg_length_profile
[4]++;
435 else if (length
<= 32768)
436 l
->stats
.msg_length_profile
[5]++;
438 l
->stats
.msg_length_profile
[6]++;
441 /* tipc_link_timeout - perform periodic task as instructed from node timeout
443 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
446 int mtyp
= STATE_MSG
;
450 link_profile_stats(l
);
453 case LINK_ESTABLISHED
:
455 if (!l
->silent_intv_cnt
) {
456 if (tipc_bclink_acks_missing(l
->owner
))
458 } else if (l
->silent_intv_cnt
<= l
->abort_limit
) {
462 rc
|= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
464 l
->silent_intv_cnt
++;
470 case LINK_ESTABLISHING
:
474 case LINK_PEER_RESET
:
476 case LINK_FAILINGOVER
:
483 tipc_link_build_proto_msg(l
, mtyp
, prb
, 0, 0, 0, xmitq
);
489 * link_schedule_user - schedule a message sender for wakeup after congestion
490 * @link: congested link
491 * @list: message that was attempted sent
492 * Create pseudo msg to send back to user when congestion abates
493 * Does not consume buffer list
495 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
497 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
498 int imp
= msg_importance(msg
);
499 u32 oport
= msg_origport(msg
);
500 u32 addr
= link_own_addr(link
);
503 /* This really cannot happen... */
504 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
505 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
508 /* Non-blocking sender: */
509 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
512 /* Create and schedule wakeup pseudo message */
513 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
514 addr
, addr
, oport
, 0, 0);
517 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
518 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
519 skb_queue_tail(&link
->wakeupq
, skb
);
520 link
->stats
.link_congs
++;
525 * link_prepare_wakeup - prepare users for wakeup after congestion
526 * @link: congested link
527 * Move a number of waiting users, as permitted by available space in
528 * the send queue, from link wait queue to node wait queue for wakeup
530 void link_prepare_wakeup(struct tipc_link
*l
)
532 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
534 struct sk_buff
*skb
, *tmp
;
536 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
537 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
538 lim
= l
->window
+ l
->backlog
[imp
].limit
;
539 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
540 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
542 skb_unlink(skb
, &l
->wakeupq
);
543 skb_queue_tail(l
->inputq
, skb
);
548 * tipc_link_reset_fragments - purge link's inbound message fragments queue
549 * @l_ptr: pointer to link
551 void tipc_link_reset_fragments(struct tipc_link
*l_ptr
)
553 kfree_skb(l_ptr
->reasm_buf
);
554 l_ptr
->reasm_buf
= NULL
;
557 void tipc_link_purge_backlog(struct tipc_link
*l
)
559 __skb_queue_purge(&l
->backlogq
);
560 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
561 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
562 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
563 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
564 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
568 * tipc_link_purge_queues - purge all pkt queues associated with link
569 * @l_ptr: pointer to link
571 void tipc_link_purge_queues(struct tipc_link
*l_ptr
)
573 __skb_queue_purge(&l_ptr
->deferdq
);
574 __skb_queue_purge(&l_ptr
->transmq
);
575 tipc_link_purge_backlog(l_ptr
);
576 tipc_link_reset_fragments(l_ptr
);
579 void tipc_link_reset(struct tipc_link
*l
)
581 tipc_link_fsm_evt(l
, LINK_RESET_EVT
);
583 /* Link is down, accept any session */
584 l
->peer_session
= WILDCARD_SESSION
;
586 /* If peer is up, it only accepts an incremented session number */
587 msg_set_session(l
->pmsg
, msg_session(l
->pmsg
) + 1);
589 /* Prepare for renewed mtu size negotiation */
590 l
->mtu
= l
->advertised_mtu
;
592 /* Clean up all queues: */
593 __skb_queue_purge(&l
->transmq
);
594 __skb_queue_purge(&l
->deferdq
);
595 skb_queue_splice_init(&l
->wakeupq
, l
->inputq
);
597 tipc_link_purge_backlog(l
);
598 kfree_skb(l
->reasm_buf
);
599 kfree_skb(l
->failover_reasm_skb
);
601 l
->failover_reasm_skb
= NULL
;
605 l
->silent_intv_cnt
= 0;
606 l
->stats
.recv_info
= 0;
608 link_reset_statistics(l
);
612 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
614 * @list: chain of buffers containing message
616 * Consumes the buffer chain, except when returning an error code,
617 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
618 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
620 int __tipc_link_xmit(struct net
*net
, struct tipc_link
*link
,
621 struct sk_buff_head
*list
)
623 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
624 unsigned int maxwin
= link
->window
;
625 unsigned int i
, imp
= msg_importance(msg
);
626 uint mtu
= link
->mtu
;
627 u16 ack
= mod(link
->rcv_nxt
- 1);
628 u16 seqno
= link
->snd_nxt
;
629 u16 bc_last_in
= link
->owner
->bclink
.last_in
;
630 struct tipc_media_addr
*addr
= link
->media_addr
;
631 struct sk_buff_head
*transmq
= &link
->transmq
;
632 struct sk_buff_head
*backlogq
= &link
->backlogq
;
633 struct sk_buff
*skb
, *bskb
;
635 /* Match msg importance against this and all higher backlog limits: */
636 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
637 if (unlikely(link
->backlog
[i
].len
>= link
->backlog
[i
].limit
))
638 return link_schedule_user(link
, list
);
640 if (unlikely(msg_size(msg
) > mtu
))
643 /* Prepare each packet for sending, and add to relevant queue: */
644 while (skb_queue_len(list
)) {
645 skb
= skb_peek(list
);
647 msg_set_seqno(msg
, seqno
);
648 msg_set_ack(msg
, ack
);
649 msg_set_bcast_ack(msg
, bc_last_in
);
651 if (likely(skb_queue_len(transmq
) < maxwin
)) {
653 __skb_queue_tail(transmq
, skb
);
654 tipc_bearer_send(net
, link
->bearer_id
, skb
, addr
);
655 link
->rcv_unacked
= 0;
659 if (tipc_msg_bundle(skb_peek_tail(backlogq
), msg
, mtu
)) {
660 kfree_skb(__skb_dequeue(list
));
661 link
->stats
.sent_bundled
++;
664 if (tipc_msg_make_bundle(&bskb
, msg
, mtu
, link
->addr
)) {
665 kfree_skb(__skb_dequeue(list
));
666 __skb_queue_tail(backlogq
, bskb
);
667 link
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
668 link
->stats
.sent_bundled
++;
669 link
->stats
.sent_bundles
++;
672 link
->backlog
[imp
].len
+= skb_queue_len(list
);
673 skb_queue_splice_tail_init(list
, backlogq
);
675 link
->snd_nxt
= seqno
;
680 * tipc_link_xmit(): enqueue buffer list according to queue situation
682 * @list: chain of buffers containing message
683 * @xmitq: returned list of packets to be sent by caller
685 * Consumes the buffer chain, except when returning -ELINKCONG,
686 * since the caller then may want to make more send attempts.
687 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
688 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
690 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
691 struct sk_buff_head
*xmitq
)
693 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
694 unsigned int maxwin
= l
->window
;
695 unsigned int i
, imp
= msg_importance(hdr
);
696 unsigned int mtu
= l
->mtu
;
697 u16 ack
= l
->rcv_nxt
- 1;
698 u16 seqno
= l
->snd_nxt
;
699 u16 bc_last_in
= l
->owner
->bclink
.last_in
;
700 struct sk_buff_head
*transmq
= &l
->transmq
;
701 struct sk_buff_head
*backlogq
= &l
->backlogq
;
702 struct sk_buff
*skb
, *_skb
, *bskb
;
704 /* Match msg importance against this and all higher backlog limits: */
705 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
706 if (unlikely(l
->backlog
[i
].len
>= l
->backlog
[i
].limit
))
707 return link_schedule_user(l
, list
);
709 if (unlikely(msg_size(hdr
) > mtu
))
712 /* Prepare each packet for sending, and add to relevant queue: */
713 while (skb_queue_len(list
)) {
714 skb
= skb_peek(list
);
716 msg_set_seqno(hdr
, seqno
);
717 msg_set_ack(hdr
, ack
);
718 msg_set_bcast_ack(hdr
, bc_last_in
);
720 if (likely(skb_queue_len(transmq
) < maxwin
)) {
721 _skb
= skb_clone(skb
, GFP_ATOMIC
);
725 __skb_queue_tail(transmq
, skb
);
726 __skb_queue_tail(xmitq
, _skb
);
731 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
732 kfree_skb(__skb_dequeue(list
));
733 l
->stats
.sent_bundled
++;
736 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
737 kfree_skb(__skb_dequeue(list
));
738 __skb_queue_tail(backlogq
, bskb
);
739 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
740 l
->stats
.sent_bundled
++;
741 l
->stats
.sent_bundles
++;
744 l
->backlog
[imp
].len
+= skb_queue_len(list
);
745 skb_queue_splice_tail_init(list
, backlogq
);
752 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
753 * Receive the sequence number where we should start receiving and
754 * acking broadcast packets from a newly added peer node, and open
755 * up for reception of such packets.
757 * Called with node locked
759 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
)
761 struct tipc_msg
*msg
= buf_msg(buf
);
763 n
->bclink
.last_sent
= n
->bclink
.last_in
= msg_last_bcast(msg
);
764 n
->bclink
.recv_permitted
= true;
769 * tipc_link_push_packets - push unsent packets to bearer
771 * Push out the unsent messages of a link where congestion
772 * has abated. Node is locked.
774 * Called with node locked
776 void tipc_link_push_packets(struct tipc_link
*link
)
779 struct tipc_msg
*msg
;
780 u16 seqno
= link
->snd_nxt
;
781 u16 ack
= mod(link
->rcv_nxt
- 1);
783 while (skb_queue_len(&link
->transmq
) < link
->window
) {
784 skb
= __skb_dequeue(&link
->backlogq
);
788 link
->backlog
[msg_importance(msg
)].len
--;
789 msg_set_ack(msg
, ack
);
790 msg_set_seqno(msg
, seqno
);
791 seqno
= mod(seqno
+ 1);
792 msg_set_bcast_ack(msg
, link
->owner
->bclink
.last_in
);
793 link
->rcv_unacked
= 0;
794 __skb_queue_tail(&link
->transmq
, skb
);
795 tipc_bearer_send(link
->owner
->net
, link
->bearer_id
,
796 skb
, link
->media_addr
);
798 link
->snd_nxt
= seqno
;
801 void tipc_link_advance_backlog(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
803 struct sk_buff
*skb
, *_skb
;
804 struct tipc_msg
*hdr
;
805 u16 seqno
= l
->snd_nxt
;
806 u16 ack
= l
->rcv_nxt
- 1;
808 while (skb_queue_len(&l
->transmq
) < l
->window
) {
809 skb
= skb_peek(&l
->backlogq
);
812 _skb
= skb_clone(skb
, GFP_ATOMIC
);
815 __skb_dequeue(&l
->backlogq
);
817 l
->backlog
[msg_importance(hdr
)].len
--;
818 __skb_queue_tail(&l
->transmq
, skb
);
819 __skb_queue_tail(xmitq
, _skb
);
820 msg_set_ack(hdr
, ack
);
821 msg_set_seqno(hdr
, seqno
);
822 msg_set_bcast_ack(hdr
, l
->owner
->bclink
.last_in
);
829 static void link_retransmit_failure(struct tipc_link
*l_ptr
,
832 struct tipc_msg
*msg
= buf_msg(buf
);
833 struct net
*net
= l_ptr
->owner
->net
;
835 pr_warn("Retransmission failure on link <%s>\n", l_ptr
->name
);
838 /* Handle failure on standard link */
839 link_print(l_ptr
, "Resetting link ");
840 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
841 msg_user(msg
), msg_type(msg
), msg_size(msg
),
843 pr_info("sqno %u, prev: %x, src: %x\n",
844 msg_seqno(msg
), msg_prevnode(msg
), msg_orignode(msg
));
846 /* Handle failure on broadcast link */
847 struct tipc_node
*n_ptr
;
848 char addr_string
[16];
850 pr_info("Msg seq number: %u, ", msg_seqno(msg
));
851 pr_cont("Outstanding acks: %lu\n",
852 (unsigned long) TIPC_SKB_CB(buf
)->handle
);
854 n_ptr
= tipc_bclink_retransmit_to(net
);
856 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
857 pr_info("Broadcast link info for %s\n", addr_string
);
858 pr_info("Reception permitted: %d, Acked: %u\n",
859 n_ptr
->bclink
.recv_permitted
,
860 n_ptr
->bclink
.acked
);
861 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
862 n_ptr
->bclink
.last_in
,
863 n_ptr
->bclink
.oos_state
,
864 n_ptr
->bclink
.last_sent
);
866 n_ptr
->action_flags
|= TIPC_BCAST_RESET
;
867 l_ptr
->stale_count
= 0;
871 void tipc_link_retransmit(struct tipc_link
*l_ptr
, struct sk_buff
*skb
,
874 struct tipc_msg
*msg
;
881 /* Detect repeated retransmit failures */
882 if (l_ptr
->last_retransm
== msg_seqno(msg
)) {
883 if (++l_ptr
->stale_count
> 100) {
884 link_retransmit_failure(l_ptr
, skb
);
888 l_ptr
->last_retransm
= msg_seqno(msg
);
889 l_ptr
->stale_count
= 1;
892 skb_queue_walk_from(&l_ptr
->transmq
, skb
) {
896 msg_set_ack(msg
, mod(l_ptr
->rcv_nxt
- 1));
897 msg_set_bcast_ack(msg
, l_ptr
->owner
->bclink
.last_in
);
898 tipc_bearer_send(l_ptr
->owner
->net
, l_ptr
->bearer_id
, skb
,
901 l_ptr
->stats
.retransmitted
++;
905 static int tipc_link_retransm(struct tipc_link
*l
, int retransm
,
906 struct sk_buff_head
*xmitq
)
908 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
909 struct tipc_msg
*hdr
;
914 /* Detect repeated retransmit failures on same packet */
915 if (likely(l
->last_retransm
!= buf_seqno(skb
))) {
916 l
->last_retransm
= buf_seqno(skb
);
918 } else if (++l
->stale_count
> 100) {
919 link_retransmit_failure(l
, skb
);
920 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
922 skb_queue_walk(&l
->transmq
, skb
) {
926 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
930 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
931 msg_set_bcast_ack(hdr
, l
->owner
->bclink
.last_in
);
932 _skb
->priority
= TC_PRIO_CONTROL
;
933 __skb_queue_tail(xmitq
, _skb
);
935 l
->stats
.retransmitted
++;
940 /* tipc_data_input - deliver data and name distr msgs to upper layer
942 * Consumes buffer if message is of right type
943 * Node lock must be held
945 static bool tipc_data_input(struct tipc_link
*link
, struct sk_buff
*skb
,
946 struct sk_buff_head
*inputq
)
948 struct tipc_node
*node
= link
->owner
;
950 switch (msg_user(buf_msg(skb
))) {
951 case TIPC_LOW_IMPORTANCE
:
952 case TIPC_MEDIUM_IMPORTANCE
:
953 case TIPC_HIGH_IMPORTANCE
:
954 case TIPC_CRITICAL_IMPORTANCE
:
956 __skb_queue_tail(inputq
, skb
);
958 case NAME_DISTRIBUTOR
:
959 node
->bclink
.recv_permitted
= true;
960 skb_queue_tail(link
->namedq
, skb
);
963 case TUNNEL_PROTOCOL
:
968 pr_warn("Dropping received illegal msg type\n");
974 /* tipc_link_input - process packet that has passed link protocol check
978 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
979 struct sk_buff_head
*inputq
)
981 struct tipc_node
*node
= l
->owner
;
982 struct tipc_msg
*hdr
= buf_msg(skb
);
983 struct sk_buff
**reasm_skb
= &l
->reasm_buf
;
984 struct sk_buff
*iskb
;
985 int usr
= msg_user(hdr
);
990 if (unlikely(usr
== TUNNEL_PROTOCOL
)) {
991 if (msg_type(hdr
) == SYNCH_MSG
) {
992 __skb_queue_purge(&l
->deferdq
);
995 if (!tipc_msg_extract(skb
, &iskb
, &ipos
))
1000 if (less(msg_seqno(hdr
), l
->drop_point
))
1002 if (tipc_data_input(l
, skb
, inputq
))
1004 usr
= msg_user(hdr
);
1005 reasm_skb
= &l
->failover_reasm_skb
;
1008 if (usr
== MSG_BUNDLER
) {
1009 l
->stats
.recv_bundles
++;
1010 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1011 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1012 tipc_data_input(l
, iskb
, inputq
);
1014 } else if (usr
== MSG_FRAGMENTER
) {
1015 l
->stats
.recv_fragments
++;
1016 if (tipc_buf_append(reasm_skb
, &skb
)) {
1017 l
->stats
.recv_fragmented
++;
1018 tipc_data_input(l
, skb
, inputq
);
1019 } else if (!*reasm_skb
) {
1020 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1023 } else if (usr
== BCAST_PROTOCOL
) {
1024 tipc_link_sync_rcv(node
, skb
);
1032 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
1034 bool released
= false;
1035 struct sk_buff
*skb
, *tmp
;
1037 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1038 if (more(buf_seqno(skb
), acked
))
1040 __skb_unlink(skb
, &l
->transmq
);
1047 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1048 * @link: the link that should handle the message
1050 * @xmitq: queue to place packets to be sent after this call
1052 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1053 struct sk_buff_head
*xmitq
)
1055 struct sk_buff_head
*arrvq
= &l
->deferdq
;
1056 struct sk_buff_head tmpq
;
1057 struct tipc_msg
*hdr
;
1061 __skb_queue_head_init(&tmpq
);
1063 if (unlikely(!__tipc_skb_queue_sorted(arrvq
, skb
))) {
1064 if (!(skb_queue_len(arrvq
) % TIPC_NACK_INTV
))
1065 tipc_link_build_proto_msg(l
, STATE_MSG
, 0,
1070 while ((skb
= skb_peek(arrvq
))) {
1073 /* Verify and update link state */
1074 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
)) {
1075 __skb_dequeue(arrvq
);
1076 rc
= tipc_link_proto_rcv(l
, skb
, xmitq
);
1080 if (unlikely(!link_is_up(l
))) {
1081 rc
= tipc_link_fsm_evt(l
, LINK_ESTABLISH_EVT
);
1082 if (!link_is_up(l
)) {
1083 kfree_skb(__skb_dequeue(arrvq
));
1088 l
->silent_intv_cnt
= 0;
1090 /* Forward queues and wake up waiting users */
1091 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1092 tipc_link_advance_backlog(l
, xmitq
);
1093 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1094 link_prepare_wakeup(l
);
1097 /* Defer reception if there is a gap in the sequence */
1098 seqno
= msg_seqno(hdr
);
1099 rcv_nxt
= l
->rcv_nxt
;
1100 if (unlikely(less(rcv_nxt
, seqno
))) {
1101 l
->stats
.deferred_recv
++;
1105 __skb_dequeue(arrvq
);
1107 /* Drop if packet already received */
1108 if (unlikely(more(rcv_nxt
, seqno
))) {
1109 l
->stats
.duplicates
++;
1114 /* Packet can be delivered */
1116 l
->stats
.recv_info
++;
1117 if (unlikely(!tipc_data_input(l
, skb
, &tmpq
)))
1118 rc
= tipc_link_input(l
, skb
, &tmpq
);
1120 /* Ack at regular intervals */
1121 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
)) {
1123 l
->stats
.sent_acks
++;
1124 tipc_link_build_proto_msg(l
, STATE_MSG
,
1129 tipc_skb_queue_splice_tail(&tmpq
, l
->inputq
);
1134 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1136 * Returns increase in queue length (i.e. 0 or 1)
1138 u32
tipc_link_defer_pkt(struct sk_buff_head
*list
, struct sk_buff
*skb
)
1140 struct sk_buff
*skb1
;
1141 u16 seq_no
= buf_seqno(skb
);
1144 if (skb_queue_empty(list
)) {
1145 __skb_queue_tail(list
, skb
);
1150 if (less(buf_seqno(skb_peek_tail(list
)), seq_no
)) {
1151 __skb_queue_tail(list
, skb
);
1155 /* Locate insertion point in queue, then insert; discard if duplicate */
1156 skb_queue_walk(list
, skb1
) {
1157 u16 curr_seqno
= buf_seqno(skb1
);
1159 if (seq_no
== curr_seqno
) {
1164 if (less(seq_no
, curr_seqno
))
1168 __skb_queue_before(list
, skb1
, skb
);
1173 * Send protocol message to the other endpoint.
1175 void tipc_link_proto_xmit(struct tipc_link
*l
, u32 msg_typ
, int probe_msg
,
1176 u32 gap
, u32 tolerance
, u32 priority
)
1178 struct sk_buff
*skb
= NULL
;
1179 struct sk_buff_head xmitq
;
1181 __skb_queue_head_init(&xmitq
);
1182 tipc_link_build_proto_msg(l
, msg_typ
, probe_msg
, gap
,
1183 tolerance
, priority
, &xmitq
);
1184 skb
= __skb_dequeue(&xmitq
);
1187 tipc_bearer_send(l
->owner
->net
, l
->bearer_id
, skb
, l
->media_addr
);
1192 /* tipc_link_build_proto_msg: prepare link protocol message for transmission
1194 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1195 u16 rcvgap
, int tolerance
, int priority
,
1196 struct sk_buff_head
*xmitq
)
1198 struct sk_buff
*skb
= NULL
;
1199 struct tipc_msg
*hdr
= l
->pmsg
;
1200 u16 snd_nxt
= l
->snd_nxt
;
1201 u16 rcv_nxt
= l
->rcv_nxt
;
1202 u16 rcv_last
= rcv_nxt
- 1;
1203 int node_up
= l
->owner
->bclink
.recv_permitted
;
1205 /* Don't send protocol message during reset or link failover */
1206 if (tipc_link_is_blocked(l
))
1209 msg_set_type(hdr
, mtyp
);
1210 msg_set_net_plane(hdr
, l
->net_plane
);
1211 msg_set_bcast_ack(hdr
, l
->owner
->bclink
.last_in
);
1212 msg_set_last_bcast(hdr
, tipc_bclink_get_last_sent(l
->owner
->net
));
1213 msg_set_link_tolerance(hdr
, tolerance
);
1214 msg_set_linkprio(hdr
, priority
);
1215 msg_set_redundant_link(hdr
, node_up
);
1216 msg_set_seq_gap(hdr
, 0);
1218 /* Compatibility: created msg must not be in sequence with pkt flow */
1219 msg_set_seqno(hdr
, snd_nxt
+ U16_MAX
/ 2);
1221 if (mtyp
== STATE_MSG
) {
1222 if (!tipc_link_is_up(l
))
1224 msg_set_next_sent(hdr
, snd_nxt
);
1226 /* Override rcvgap if there are packets in deferred queue */
1227 if (!skb_queue_empty(&l
->deferdq
))
1228 rcvgap
= buf_seqno(skb_peek(&l
->deferdq
)) - rcv_nxt
;
1230 msg_set_seq_gap(hdr
, rcvgap
);
1231 l
->stats
.sent_nacks
++;
1233 msg_set_ack(hdr
, rcv_last
);
1234 msg_set_probe(hdr
, probe
);
1236 l
->stats
.sent_probes
++;
1237 l
->stats
.sent_states
++;
1239 /* RESET_MSG or ACTIVATE_MSG */
1240 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1241 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1242 msg_set_next_sent(hdr
, 1);
1244 skb
= tipc_buf_acquire(msg_size(hdr
));
1247 skb_copy_to_linear_data(skb
, hdr
, msg_size(hdr
));
1248 skb
->priority
= TC_PRIO_CONTROL
;
1249 __skb_queue_tail(xmitq
, skb
);
1252 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1253 * with contents of the link's tranmsit and backlog queues.
1255 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1256 int mtyp
, struct sk_buff_head
*xmitq
)
1258 struct sk_buff
*skb
, *tnlskb
;
1259 struct tipc_msg
*hdr
, tnlhdr
;
1260 struct sk_buff_head
*queue
= &l
->transmq
;
1261 struct sk_buff_head tmpxq
, tnlq
;
1262 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1267 skb_queue_head_init(&tnlq
);
1268 skb_queue_head_init(&tmpxq
);
1270 /* At least one packet required for safe algorithm => add dummy */
1271 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1272 BASIC_H_SIZE
, 0, l
->addr
, link_own_addr(l
),
1273 0, 0, TIPC_ERR_NO_PORT
);
1275 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1278 skb_queue_tail(&tnlq
, skb
);
1279 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1280 __skb_queue_purge(&tmpxq
);
1282 /* Initialize reusable tunnel packet header */
1283 tipc_msg_init(link_own_addr(l
), &tnlhdr
, TUNNEL_PROTOCOL
,
1284 mtyp
, INT_H_SIZE
, l
->addr
);
1285 pktcnt
= skb_queue_len(&l
->transmq
) + skb_queue_len(&l
->backlogq
);
1286 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1287 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1289 /* Wrap each packet into a tunnel packet */
1290 skb_queue_walk(queue
, skb
) {
1292 if (queue
== &l
->backlogq
)
1293 msg_set_seqno(hdr
, seqno
++);
1294 pktlen
= msg_size(hdr
);
1295 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1296 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
);
1298 pr_warn("%sunable to send packet\n", link_co_err
);
1301 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1302 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1303 __skb_queue_tail(&tnlq
, tnlskb
);
1305 if (queue
!= &l
->backlogq
) {
1306 queue
= &l
->backlogq
;
1310 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1312 if (mtyp
== FAILOVER_MSG
) {
1313 tnl
->drop_point
= l
->rcv_nxt
;
1314 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1315 l
->reasm_buf
= NULL
;
1319 /* tipc_link_proto_rcv(): receive link level protocol message :
1320 * Note that network plane id propagates through the network, and may
1321 * change at any time. The node with lowest numerical id determines
1324 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1325 struct sk_buff_head
*xmitq
)
1327 struct tipc_msg
*hdr
= buf_msg(skb
);
1329 u16 nacked_gap
= msg_seq_gap(hdr
);
1330 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1331 u16 peers_tol
= msg_link_tolerance(hdr
);
1332 u16 peers_prio
= msg_linkprio(hdr
);
1333 u16 rcv_nxt
= l
->rcv_nxt
;
1337 if (tipc_link_is_blocked(l
))
1340 if (link_own_addr(l
) > msg_prevnode(hdr
))
1341 l
->net_plane
= msg_net_plane(hdr
);
1343 switch (msg_type(hdr
)) {
1346 /* Ignore duplicate RESET with old session number */
1347 if ((less_eq(msg_session(hdr
), l
->peer_session
)) &&
1348 (l
->peer_session
!= WILDCARD_SESSION
))
1354 /* Complete own link name with peer's interface name */
1355 if_name
= strrchr(l
->name
, ':') + 1;
1356 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1358 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1360 strncpy(if_name
, msg_data(hdr
), TIPC_MAX_IF_NAME
);
1362 /* Update own tolerance if peer indicates a non-zero value */
1363 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1364 l
->tolerance
= peers_tol
;
1366 /* Update own priority if peer's priority is higher */
1367 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1368 l
->priority
= peers_prio
;
1370 if (msg_type(hdr
) == RESET_MSG
) {
1371 rc
|= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1372 } else if (!link_is_up(l
)) {
1373 tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1374 rc
|= tipc_link_fsm_evt(l
, LINK_ESTABLISH_EVT
);
1376 l
->peer_session
= msg_session(hdr
);
1377 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1378 if (l
->mtu
> msg_max_pkt(hdr
))
1379 l
->mtu
= msg_max_pkt(hdr
);
1384 /* Update own tolerance if peer indicates a non-zero value */
1385 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1386 l
->tolerance
= peers_tol
;
1388 l
->silent_intv_cnt
= 0;
1389 l
->stats
.recv_states
++;
1391 l
->stats
.recv_probes
++;
1392 rc
= tipc_link_fsm_evt(l
, LINK_ESTABLISH_EVT
);
1396 /* Send NACK if peer has sent pkts we haven't received yet */
1397 if (more(peers_snd_nxt
, rcv_nxt
) && !tipc_link_is_synching(l
))
1398 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1399 if (rcvgap
|| (msg_probe(hdr
)))
1400 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, rcvgap
,
1402 tipc_link_release_pkts(l
, msg_ack(hdr
));
1404 /* If NACK, retransmit will now start at right position */
1406 rc
= tipc_link_retransm(l
, nacked_gap
, xmitq
);
1407 l
->stats
.recv_nacks
++;
1410 tipc_link_advance_backlog(l
, xmitq
);
1411 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1412 link_prepare_wakeup(l
);
1419 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1421 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->mtu
/ ITEM_SIZE
);
1424 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= win
/ 2;
1425 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= win
;
1426 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= win
/ 2 * 3;
1427 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= win
* 2;
1428 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1431 /* tipc_link_find_owner - locate owner node of link by link's name
1432 * @net: the applicable net namespace
1433 * @name: pointer to link name string
1434 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1436 * Returns pointer to node owning the link, or 0 if no matching link is found.
1438 static struct tipc_node
*tipc_link_find_owner(struct net
*net
,
1439 const char *link_name
,
1440 unsigned int *bearer_id
)
1442 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1443 struct tipc_link
*l_ptr
;
1444 struct tipc_node
*n_ptr
;
1445 struct tipc_node
*found_node
= NULL
;
1450 list_for_each_entry_rcu(n_ptr
, &tn
->node_list
, list
) {
1451 tipc_node_lock(n_ptr
);
1452 for (i
= 0; i
< MAX_BEARERS
; i
++) {
1453 l_ptr
= n_ptr
->links
[i
].link
;
1454 if (l_ptr
&& !strcmp(l_ptr
->name
, link_name
)) {
1460 tipc_node_unlock(n_ptr
);
1470 * link_reset_statistics - reset link statistics
1471 * @l_ptr: pointer to link
1473 static void link_reset_statistics(struct tipc_link
*l_ptr
)
1475 memset(&l_ptr
->stats
, 0, sizeof(l_ptr
->stats
));
1476 l_ptr
->stats
.sent_info
= l_ptr
->snd_nxt
;
1477 l_ptr
->stats
.recv_info
= l_ptr
->rcv_nxt
;
1480 static void link_print(struct tipc_link
*l
, const char *str
)
1482 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
1483 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
;
1484 u16 tail
= l
->snd_nxt
- 1;
1486 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
1487 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1488 skb_queue_len(&l
->transmq
), head
, tail
,
1489 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
1492 /* Parse and validate nested (link) properties valid for media, bearer and link
1494 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1498 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1499 tipc_nl_prop_policy
);
1503 if (props
[TIPC_NLA_PROP_PRIO
]) {
1506 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1507 if (prio
> TIPC_MAX_LINK_PRI
)
1511 if (props
[TIPC_NLA_PROP_TOL
]) {
1514 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1515 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1519 if (props
[TIPC_NLA_PROP_WIN
]) {
1522 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1523 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1530 int tipc_nl_link_set(struct sk_buff
*skb
, struct genl_info
*info
)
1536 struct tipc_link
*link
;
1537 struct tipc_node
*node
;
1538 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
1539 struct net
*net
= sock_net(skb
->sk
);
1541 if (!info
->attrs
[TIPC_NLA_LINK
])
1544 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
1545 info
->attrs
[TIPC_NLA_LINK
],
1546 tipc_nl_link_policy
);
1550 if (!attrs
[TIPC_NLA_LINK_NAME
])
1553 name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
1555 if (strcmp(name
, tipc_bclink_name
) == 0)
1556 return tipc_nl_bc_link_set(net
, attrs
);
1558 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
1562 tipc_node_lock(node
);
1564 link
= node
->links
[bearer_id
].link
;
1570 if (attrs
[TIPC_NLA_LINK_PROP
]) {
1571 struct nlattr
*props
[TIPC_NLA_PROP_MAX
+ 1];
1573 err
= tipc_nl_parse_link_prop(attrs
[TIPC_NLA_LINK_PROP
],
1580 if (props
[TIPC_NLA_PROP_TOL
]) {
1583 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1584 link
->tolerance
= tol
;
1585 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, tol
, 0);
1587 if (props
[TIPC_NLA_PROP_PRIO
]) {
1590 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1591 link
->priority
= prio
;
1592 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, 0, prio
);
1594 if (props
[TIPC_NLA_PROP_WIN
]) {
1597 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1598 tipc_link_set_queue_limits(link
, win
);
1603 tipc_node_unlock(node
);
1608 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1611 struct nlattr
*stats
;
1618 struct nla_map map
[] = {
1619 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
1620 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1621 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1622 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1623 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1624 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
1625 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1626 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1627 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1628 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1629 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1630 s
->msg_length_counts
: 1},
1631 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1632 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1633 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1634 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1635 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1636 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1637 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1638 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1639 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1640 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1641 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1642 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1643 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1644 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1645 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1646 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1647 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1648 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1649 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1650 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1651 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1652 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1653 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1656 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1660 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1661 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1664 nla_nest_end(skb
, stats
);
1668 nla_nest_cancel(skb
, stats
);
1673 /* Caller should hold appropriate locks to protect the link */
1674 static int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
1675 struct tipc_link
*link
, int nlflags
)
1679 struct nlattr
*attrs
;
1680 struct nlattr
*prop
;
1681 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1683 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1684 nlflags
, TIPC_NL_LINK_GET
);
1688 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1692 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
1694 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
1695 tipc_cluster_mask(tn
->own_addr
)))
1697 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
1699 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->rcv_nxt
))
1701 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->snd_nxt
))
1704 if (tipc_link_is_up(link
))
1705 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1707 if (tipc_link_is_active(link
))
1708 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
1711 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1714 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1716 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
1718 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
1721 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1723 nla_nest_end(msg
->skb
, prop
);
1725 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
1729 nla_nest_end(msg
->skb
, attrs
);
1730 genlmsg_end(msg
->skb
, hdr
);
1735 nla_nest_cancel(msg
->skb
, prop
);
1737 nla_nest_cancel(msg
->skb
, attrs
);
1739 genlmsg_cancel(msg
->skb
, hdr
);
1744 /* Caller should hold node lock */
1745 static int __tipc_nl_add_node_links(struct net
*net
, struct tipc_nl_msg
*msg
,
1746 struct tipc_node
*node
, u32
*prev_link
)
1751 for (i
= *prev_link
; i
< MAX_BEARERS
; i
++) {
1754 if (!node
->links
[i
].link
)
1757 err
= __tipc_nl_add_link(net
, msg
,
1758 node
->links
[i
].link
, NLM_F_MULTI
);
1767 int tipc_nl_link_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1769 struct net
*net
= sock_net(skb
->sk
);
1770 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1771 struct tipc_node
*node
;
1772 struct tipc_nl_msg msg
;
1773 u32 prev_node
= cb
->args
[0];
1774 u32 prev_link
= cb
->args
[1];
1775 int done
= cb
->args
[2];
1782 msg
.portid
= NETLINK_CB(cb
->skb
).portid
;
1783 msg
.seq
= cb
->nlh
->nlmsg_seq
;
1787 node
= tipc_node_find(net
, prev_node
);
1789 /* We never set seq or call nl_dump_check_consistent()
1790 * this means that setting prev_seq here will cause the
1791 * consistence check to fail in the netlink callback
1792 * handler. Resulting in the last NLMSG_DONE message
1793 * having the NLM_F_DUMP_INTR flag set.
1798 tipc_node_put(node
);
1800 list_for_each_entry_continue_rcu(node
, &tn
->node_list
,
1802 tipc_node_lock(node
);
1803 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
1805 tipc_node_unlock(node
);
1809 prev_node
= node
->addr
;
1812 err
= tipc_nl_add_bc_link(net
, &msg
);
1816 list_for_each_entry_rcu(node
, &tn
->node_list
, list
) {
1817 tipc_node_lock(node
);
1818 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
1820 tipc_node_unlock(node
);
1824 prev_node
= node
->addr
;
1831 cb
->args
[0] = prev_node
;
1832 cb
->args
[1] = prev_link
;
1838 int tipc_nl_link_get(struct sk_buff
*skb
, struct genl_info
*info
)
1840 struct net
*net
= genl_info_net(info
);
1841 struct tipc_nl_msg msg
;
1845 msg
.portid
= info
->snd_portid
;
1846 msg
.seq
= info
->snd_seq
;
1848 if (!info
->attrs
[TIPC_NLA_LINK_NAME
])
1850 name
= nla_data(info
->attrs
[TIPC_NLA_LINK_NAME
]);
1852 msg
.skb
= nlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
1856 if (strcmp(name
, tipc_bclink_name
) == 0) {
1857 err
= tipc_nl_add_bc_link(net
, &msg
);
1859 nlmsg_free(msg
.skb
);
1864 struct tipc_node
*node
;
1865 struct tipc_link
*link
;
1867 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
1871 tipc_node_lock(node
);
1872 link
= node
->links
[bearer_id
].link
;
1874 tipc_node_unlock(node
);
1875 nlmsg_free(msg
.skb
);
1879 err
= __tipc_nl_add_link(net
, &msg
, link
, 0);
1880 tipc_node_unlock(node
);
1882 nlmsg_free(msg
.skb
);
1887 return genlmsg_reply(msg
.skb
, info
);
1890 int tipc_nl_link_reset_stats(struct sk_buff
*skb
, struct genl_info
*info
)
1894 unsigned int bearer_id
;
1895 struct tipc_link
*link
;
1896 struct tipc_node
*node
;
1897 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
1898 struct net
*net
= sock_net(skb
->sk
);
1900 if (!info
->attrs
[TIPC_NLA_LINK
])
1903 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
1904 info
->attrs
[TIPC_NLA_LINK
],
1905 tipc_nl_link_policy
);
1909 if (!attrs
[TIPC_NLA_LINK_NAME
])
1912 link_name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
1914 if (strcmp(link_name
, tipc_bclink_name
) == 0) {
1915 err
= tipc_bclink_reset_stats(net
);
1921 node
= tipc_link_find_owner(net
, link_name
, &bearer_id
);
1925 tipc_node_lock(node
);
1927 link
= node
->links
[bearer_id
].link
;
1929 tipc_node_unlock(node
);
1933 link_reset_statistics(link
);
1935 tipc_node_unlock(node
);