2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err
= "Link tunneling error, ";
52 static const char *link_rst_msg
= "Resetting link ";
54 static const struct nla_policy tipc_nl_link_policy
[TIPC_NLA_LINK_MAX
+ 1] = {
55 [TIPC_NLA_LINK_UNSPEC
] = { .type
= NLA_UNSPEC
},
56 [TIPC_NLA_LINK_NAME
] = {
58 .len
= TIPC_MAX_LINK_NAME
60 [TIPC_NLA_LINK_MTU
] = { .type
= NLA_U32
},
61 [TIPC_NLA_LINK_BROADCAST
] = { .type
= NLA_FLAG
},
62 [TIPC_NLA_LINK_UP
] = { .type
= NLA_FLAG
},
63 [TIPC_NLA_LINK_ACTIVE
] = { .type
= NLA_FLAG
},
64 [TIPC_NLA_LINK_PROP
] = { .type
= NLA_NESTED
},
65 [TIPC_NLA_LINK_STATS
] = { .type
= NLA_NESTED
},
66 [TIPC_NLA_LINK_RX
] = { .type
= NLA_U32
},
67 [TIPC_NLA_LINK_TX
] = { .type
= NLA_U32
}
70 /* Properties valid for media, bearar and link */
71 static const struct nla_policy tipc_nl_prop_policy
[TIPC_NLA_PROP_MAX
+ 1] = {
72 [TIPC_NLA_PROP_UNSPEC
] = { .type
= NLA_UNSPEC
},
73 [TIPC_NLA_PROP_PRIO
] = { .type
= NLA_U32
},
74 [TIPC_NLA_PROP_TOL
] = { .type
= NLA_U32
},
75 [TIPC_NLA_PROP_WIN
] = { .type
= NLA_U32
}
79 * Interval between NACKs when packets arrive out of order
81 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
83 * Out-of-range value for link session numbers
85 #define WILDCARD_SESSION 0x10000
90 LINK_ESTABLISHED
= 0xe,
91 LINK_ESTABLISHING
= 0xe << 4,
92 LINK_RESET
= 0x1 << 8,
93 LINK_RESETTING
= 0x2 << 12,
94 LINK_PEER_RESET
= 0xd << 16,
95 LINK_FAILINGOVER
= 0xf << 20,
96 LINK_SYNCHING
= 0xc << 24
99 /* Link FSM state checking routines
101 static int link_is_up(struct tipc_link
*l
)
103 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
106 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
107 struct sk_buff_head
*xmitq
);
108 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
109 u16 rcvgap
, int tolerance
, int priority
,
110 struct sk_buff_head
*xmitq
);
111 static void link_reset_statistics(struct tipc_link
*l_ptr
);
112 static void link_print(struct tipc_link
*l_ptr
, const char *str
);
113 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
);
116 * Simple non-static link routines (i.e. referenced outside this file)
118 bool tipc_link_is_up(struct tipc_link
*l
)
120 return link_is_up(l
);
123 bool tipc_link_is_reset(struct tipc_link
*l
)
125 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
128 bool tipc_link_is_synching(struct tipc_link
*l
)
130 return l
->state
== LINK_SYNCHING
;
133 bool tipc_link_is_failingover(struct tipc_link
*l
)
135 return l
->state
== LINK_FAILINGOVER
;
138 bool tipc_link_is_blocked(struct tipc_link
*l
)
140 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
143 int tipc_link_is_active(struct tipc_link
*l
)
145 struct tipc_node
*n
= l
->owner
;
147 return (node_active_link(n
, 0) == l
) || (node_active_link(n
, 1) == l
);
151 * tipc_link_create - create a new link
152 * @n_ptr: pointer to associated node
153 * @b_ptr: pointer to associated bearer
154 * @media_addr: media address to use when sending messages over link
156 * Returns pointer to link.
158 struct tipc_link
*tipc_link_create(struct tipc_node
*n_ptr
,
159 struct tipc_bearer
*b_ptr
,
160 const struct tipc_media_addr
*media_addr
,
161 struct sk_buff_head
*inputq
,
162 struct sk_buff_head
*namedq
)
164 struct tipc_net
*tn
= net_generic(n_ptr
->net
, tipc_net_id
);
165 struct tipc_link
*l_ptr
;
166 struct tipc_msg
*msg
;
168 char addr_string
[16];
169 u32 peer
= n_ptr
->addr
;
171 if (n_ptr
->link_cnt
>= MAX_BEARERS
) {
172 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
173 pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
174 n_ptr
->link_cnt
, addr_string
, MAX_BEARERS
);
178 if (n_ptr
->links
[b_ptr
->identity
].link
) {
179 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
180 pr_err("Attempt to establish second link on <%s> to %s\n",
181 b_ptr
->name
, addr_string
);
185 l_ptr
= kzalloc(sizeof(*l_ptr
), GFP_ATOMIC
);
187 pr_warn("Link creation failed, no memory\n");
191 if_name
= strchr(b_ptr
->name
, ':') + 1;
192 sprintf(l_ptr
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
193 tipc_zone(tn
->own_addr
), tipc_cluster(tn
->own_addr
),
194 tipc_node(tn
->own_addr
),
196 tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
197 /* note: peer i/f name is updated by reset/activate message */
198 memcpy(&l_ptr
->media_addr
, media_addr
, sizeof(*media_addr
));
199 l_ptr
->owner
= n_ptr
;
200 l_ptr
->peer_session
= WILDCARD_SESSION
;
201 l_ptr
->bearer_id
= b_ptr
->identity
;
202 l_ptr
->tolerance
= b_ptr
->tolerance
;
205 l_ptr
->state
= LINK_RESET
;
207 l_ptr
->pmsg
= (struct tipc_msg
*)&l_ptr
->proto_msg
;
209 tipc_msg_init(tn
->own_addr
, msg
, LINK_PROTOCOL
, RESET_MSG
, INT_H_SIZE
,
211 msg_set_size(msg
, sizeof(l_ptr
->proto_msg
));
212 msg_set_session(msg
, (tn
->random
& 0xffff));
213 msg_set_bearer_id(msg
, b_ptr
->identity
);
214 strcpy((char *)msg_data(msg
), if_name
);
215 l_ptr
->net_plane
= b_ptr
->net_plane
;
216 l_ptr
->advertised_mtu
= b_ptr
->mtu
;
217 l_ptr
->mtu
= l_ptr
->advertised_mtu
;
218 l_ptr
->priority
= b_ptr
->priority
;
219 tipc_link_set_queue_limits(l_ptr
, b_ptr
->window
);
221 __skb_queue_head_init(&l_ptr
->transmq
);
222 __skb_queue_head_init(&l_ptr
->backlogq
);
223 __skb_queue_head_init(&l_ptr
->deferdq
);
224 skb_queue_head_init(&l_ptr
->wakeupq
);
225 l_ptr
->inputq
= inputq
;
226 l_ptr
->namedq
= namedq
;
227 skb_queue_head_init(l_ptr
->inputq
);
228 link_reset_statistics(l_ptr
);
229 tipc_node_attach_link(n_ptr
, l_ptr
);
233 /* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
235 * Give a newly added peer node the sequence number where it should
236 * start receiving and acking broadcast packets.
238 void tipc_link_build_bcast_sync_msg(struct tipc_link
*l
,
239 struct sk_buff_head
*xmitq
)
242 struct sk_buff_head list
;
245 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
246 0, l
->addr
, link_own_addr(l
), 0, 0, 0);
249 last_sent
= tipc_bclink_get_last_sent(l
->owner
->net
);
250 msg_set_last_bcast(buf_msg(skb
), last_sent
);
251 __skb_queue_head_init(&list
);
252 __skb_queue_tail(&list
, skb
);
253 tipc_link_xmit(l
, &list
, xmitq
);
257 * tipc_link_fsm_evt - link finite state machine
258 * @l: pointer to link
259 * @evt: state machine event to be processed
261 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
268 case LINK_PEER_RESET_EVT
:
269 l
->state
= LINK_PEER_RESET
;
272 l
->state
= LINK_RESET
;
274 case LINK_FAILURE_EVT
:
275 case LINK_FAILOVER_BEGIN_EVT
:
276 case LINK_ESTABLISH_EVT
:
277 case LINK_FAILOVER_END_EVT
:
278 case LINK_SYNCH_BEGIN_EVT
:
279 case LINK_SYNCH_END_EVT
:
286 case LINK_PEER_RESET_EVT
:
287 l
->state
= LINK_ESTABLISHING
;
289 case LINK_FAILOVER_BEGIN_EVT
:
290 l
->state
= LINK_FAILINGOVER
;
291 case LINK_FAILURE_EVT
:
293 case LINK_ESTABLISH_EVT
:
294 case LINK_FAILOVER_END_EVT
:
296 case LINK_SYNCH_BEGIN_EVT
:
297 case LINK_SYNCH_END_EVT
:
302 case LINK_PEER_RESET
:
305 l
->state
= LINK_ESTABLISHING
;
307 case LINK_PEER_RESET_EVT
:
308 case LINK_ESTABLISH_EVT
:
309 case LINK_FAILURE_EVT
:
311 case LINK_SYNCH_BEGIN_EVT
:
312 case LINK_SYNCH_END_EVT
:
313 case LINK_FAILOVER_BEGIN_EVT
:
314 case LINK_FAILOVER_END_EVT
:
319 case LINK_FAILINGOVER
:
321 case LINK_FAILOVER_END_EVT
:
322 l
->state
= LINK_RESET
;
324 case LINK_PEER_RESET_EVT
:
326 case LINK_ESTABLISH_EVT
:
327 case LINK_FAILURE_EVT
:
329 case LINK_FAILOVER_BEGIN_EVT
:
330 case LINK_SYNCH_BEGIN_EVT
:
331 case LINK_SYNCH_END_EVT
:
336 case LINK_ESTABLISHING
:
338 case LINK_ESTABLISH_EVT
:
339 l
->state
= LINK_ESTABLISHED
;
340 rc
|= TIPC_LINK_UP_EVT
;
342 case LINK_FAILOVER_BEGIN_EVT
:
343 l
->state
= LINK_FAILINGOVER
;
345 case LINK_PEER_RESET_EVT
:
347 case LINK_FAILURE_EVT
:
348 case LINK_SYNCH_BEGIN_EVT
:
349 case LINK_FAILOVER_END_EVT
:
351 case LINK_SYNCH_END_EVT
:
356 case LINK_ESTABLISHED
:
358 case LINK_PEER_RESET_EVT
:
359 l
->state
= LINK_PEER_RESET
;
360 rc
|= TIPC_LINK_DOWN_EVT
;
362 case LINK_FAILURE_EVT
:
363 l
->state
= LINK_RESETTING
;
364 rc
|= TIPC_LINK_DOWN_EVT
;
367 l
->state
= LINK_RESET
;
369 case LINK_ESTABLISH_EVT
:
371 case LINK_SYNCH_BEGIN_EVT
:
372 l
->state
= LINK_SYNCHING
;
374 case LINK_SYNCH_END_EVT
:
375 case LINK_FAILOVER_BEGIN_EVT
:
376 case LINK_FAILOVER_END_EVT
:
383 case LINK_PEER_RESET_EVT
:
384 l
->state
= LINK_PEER_RESET
;
385 rc
|= TIPC_LINK_DOWN_EVT
;
387 case LINK_FAILURE_EVT
:
388 l
->state
= LINK_RESETTING
;
389 rc
|= TIPC_LINK_DOWN_EVT
;
392 l
->state
= LINK_RESET
;
394 case LINK_ESTABLISH_EVT
:
395 case LINK_SYNCH_BEGIN_EVT
:
397 case LINK_SYNCH_END_EVT
:
398 l
->state
= LINK_ESTABLISHED
;
400 case LINK_FAILOVER_BEGIN_EVT
:
401 case LINK_FAILOVER_END_EVT
:
407 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
411 pr_err("Illegal FSM event %x in state %x on link %s\n",
412 evt
, l
->state
, l
->name
);
416 /* link_profile_stats - update statistical profiling of traffic
418 static void link_profile_stats(struct tipc_link
*l
)
421 struct tipc_msg
*msg
;
424 /* Update counters used in statistical profiling of send traffic */
425 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
426 l
->stats
.queue_sz_counts
++;
428 skb
= skb_peek(&l
->transmq
);
432 length
= msg_size(msg
);
434 if (msg_user(msg
) == MSG_FRAGMENTER
) {
435 if (msg_type(msg
) != FIRST_FRAGMENT
)
437 length
= msg_size(msg_get_wrapped(msg
));
439 l
->stats
.msg_lengths_total
+= length
;
440 l
->stats
.msg_length_counts
++;
442 l
->stats
.msg_length_profile
[0]++;
443 else if (length
<= 256)
444 l
->stats
.msg_length_profile
[1]++;
445 else if (length
<= 1024)
446 l
->stats
.msg_length_profile
[2]++;
447 else if (length
<= 4096)
448 l
->stats
.msg_length_profile
[3]++;
449 else if (length
<= 16384)
450 l
->stats
.msg_length_profile
[4]++;
451 else if (length
<= 32768)
452 l
->stats
.msg_length_profile
[5]++;
454 l
->stats
.msg_length_profile
[6]++;
457 /* tipc_link_timeout - perform periodic task as instructed from node timeout
459 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
462 int mtyp
= STATE_MSG
;
466 link_profile_stats(l
);
469 case LINK_ESTABLISHED
:
471 if (!l
->silent_intv_cnt
) {
472 if (tipc_bclink_acks_missing(l
->owner
))
474 } else if (l
->silent_intv_cnt
<= l
->abort_limit
) {
478 rc
|= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
480 l
->silent_intv_cnt
++;
486 case LINK_ESTABLISHING
:
490 case LINK_PEER_RESET
:
492 case LINK_FAILINGOVER
:
499 tipc_link_build_proto_msg(l
, mtyp
, prb
, 0, 0, 0, xmitq
);
505 * link_schedule_user - schedule a message sender for wakeup after congestion
506 * @link: congested link
507 * @list: message that was attempted sent
508 * Create pseudo msg to send back to user when congestion abates
509 * Does not consume buffer list
511 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
513 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
514 int imp
= msg_importance(msg
);
515 u32 oport
= msg_origport(msg
);
516 u32 addr
= link_own_addr(link
);
519 /* This really cannot happen... */
520 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
521 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
524 /* Non-blocking sender: */
525 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
528 /* Create and schedule wakeup pseudo message */
529 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
530 addr
, addr
, oport
, 0, 0);
533 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
534 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
535 skb_queue_tail(&link
->wakeupq
, skb
);
536 link
->stats
.link_congs
++;
541 * link_prepare_wakeup - prepare users for wakeup after congestion
542 * @link: congested link
543 * Move a number of waiting users, as permitted by available space in
544 * the send queue, from link wait queue to node wait queue for wakeup
546 void link_prepare_wakeup(struct tipc_link
*l
)
548 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
550 struct sk_buff
*skb
, *tmp
;
552 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
553 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
554 lim
= l
->window
+ l
->backlog
[imp
].limit
;
555 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
556 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
558 skb_unlink(skb
, &l
->wakeupq
);
559 skb_queue_tail(l
->inputq
, skb
);
564 * tipc_link_reset_fragments - purge link's inbound message fragments queue
565 * @l_ptr: pointer to link
567 void tipc_link_reset_fragments(struct tipc_link
*l_ptr
)
569 kfree_skb(l_ptr
->reasm_buf
);
570 l_ptr
->reasm_buf
= NULL
;
573 void tipc_link_purge_backlog(struct tipc_link
*l
)
575 __skb_queue_purge(&l
->backlogq
);
576 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
577 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
578 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
579 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
580 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
584 * tipc_link_purge_queues - purge all pkt queues associated with link
585 * @l_ptr: pointer to link
587 void tipc_link_purge_queues(struct tipc_link
*l_ptr
)
589 __skb_queue_purge(&l_ptr
->deferdq
);
590 __skb_queue_purge(&l_ptr
->transmq
);
591 tipc_link_purge_backlog(l_ptr
);
592 tipc_link_reset_fragments(l_ptr
);
595 void tipc_link_reset(struct tipc_link
*l
)
597 tipc_link_fsm_evt(l
, LINK_RESET_EVT
);
599 /* Link is down, accept any session */
600 l
->peer_session
= WILDCARD_SESSION
;
602 /* If peer is up, it only accepts an incremented session number */
603 msg_set_session(l
->pmsg
, msg_session(l
->pmsg
) + 1);
605 /* Prepare for renewed mtu size negotiation */
606 l
->mtu
= l
->advertised_mtu
;
608 /* Clean up all queues: */
609 __skb_queue_purge(&l
->transmq
);
610 __skb_queue_purge(&l
->deferdq
);
611 skb_queue_splice_init(&l
->wakeupq
, l
->inputq
);
613 tipc_link_purge_backlog(l
);
614 kfree_skb(l
->reasm_buf
);
615 kfree_skb(l
->failover_reasm_skb
);
617 l
->failover_reasm_skb
= NULL
;
621 l
->silent_intv_cnt
= 0;
622 l
->stats
.recv_info
= 0;
624 link_reset_statistics(l
);
628 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
630 * @list: chain of buffers containing message
632 * Consumes the buffer chain, except when returning an error code,
633 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
634 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
636 int __tipc_link_xmit(struct net
*net
, struct tipc_link
*link
,
637 struct sk_buff_head
*list
)
639 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
640 unsigned int maxwin
= link
->window
;
641 unsigned int i
, imp
= msg_importance(msg
);
642 uint mtu
= link
->mtu
;
643 u16 ack
= mod(link
->rcv_nxt
- 1);
644 u16 seqno
= link
->snd_nxt
;
645 u16 bc_last_in
= link
->owner
->bclink
.last_in
;
646 struct tipc_media_addr
*addr
= &link
->media_addr
;
647 struct sk_buff_head
*transmq
= &link
->transmq
;
648 struct sk_buff_head
*backlogq
= &link
->backlogq
;
649 struct sk_buff
*skb
, *bskb
;
651 /* Match msg importance against this and all higher backlog limits: */
652 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
653 if (unlikely(link
->backlog
[i
].len
>= link
->backlog
[i
].limit
))
654 return link_schedule_user(link
, list
);
656 if (unlikely(msg_size(msg
) > mtu
))
659 /* Prepare each packet for sending, and add to relevant queue: */
660 while (skb_queue_len(list
)) {
661 skb
= skb_peek(list
);
663 msg_set_seqno(msg
, seqno
);
664 msg_set_ack(msg
, ack
);
665 msg_set_bcast_ack(msg
, bc_last_in
);
667 if (likely(skb_queue_len(transmq
) < maxwin
)) {
669 __skb_queue_tail(transmq
, skb
);
670 tipc_bearer_send(net
, link
->bearer_id
, skb
, addr
);
671 link
->rcv_unacked
= 0;
675 if (tipc_msg_bundle(skb_peek_tail(backlogq
), msg
, mtu
)) {
676 kfree_skb(__skb_dequeue(list
));
677 link
->stats
.sent_bundled
++;
680 if (tipc_msg_make_bundle(&bskb
, msg
, mtu
, link
->addr
)) {
681 kfree_skb(__skb_dequeue(list
));
682 __skb_queue_tail(backlogq
, bskb
);
683 link
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
684 link
->stats
.sent_bundled
++;
685 link
->stats
.sent_bundles
++;
688 link
->backlog
[imp
].len
+= skb_queue_len(list
);
689 skb_queue_splice_tail_init(list
, backlogq
);
691 link
->snd_nxt
= seqno
;
696 * tipc_link_xmit(): enqueue buffer list according to queue situation
698 * @list: chain of buffers containing message
699 * @xmitq: returned list of packets to be sent by caller
701 * Consumes the buffer chain, except when returning -ELINKCONG,
702 * since the caller then may want to make more send attempts.
703 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
704 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
706 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
707 struct sk_buff_head
*xmitq
)
709 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
710 unsigned int maxwin
= l
->window
;
711 unsigned int i
, imp
= msg_importance(hdr
);
712 unsigned int mtu
= l
->mtu
;
713 u16 ack
= l
->rcv_nxt
- 1;
714 u16 seqno
= l
->snd_nxt
;
715 u16 bc_last_in
= l
->owner
->bclink
.last_in
;
716 struct sk_buff_head
*transmq
= &l
->transmq
;
717 struct sk_buff_head
*backlogq
= &l
->backlogq
;
718 struct sk_buff
*skb
, *_skb
, *bskb
;
720 /* Match msg importance against this and all higher backlog limits: */
721 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
722 if (unlikely(l
->backlog
[i
].len
>= l
->backlog
[i
].limit
))
723 return link_schedule_user(l
, list
);
725 if (unlikely(msg_size(hdr
) > mtu
))
728 /* Prepare each packet for sending, and add to relevant queue: */
729 while (skb_queue_len(list
)) {
730 skb
= skb_peek(list
);
732 msg_set_seqno(hdr
, seqno
);
733 msg_set_ack(hdr
, ack
);
734 msg_set_bcast_ack(hdr
, bc_last_in
);
736 if (likely(skb_queue_len(transmq
) < maxwin
)) {
737 _skb
= skb_clone(skb
, GFP_ATOMIC
);
741 __skb_queue_tail(transmq
, skb
);
742 __skb_queue_tail(xmitq
, _skb
);
747 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
748 kfree_skb(__skb_dequeue(list
));
749 l
->stats
.sent_bundled
++;
752 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
753 kfree_skb(__skb_dequeue(list
));
754 __skb_queue_tail(backlogq
, bskb
);
755 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
756 l
->stats
.sent_bundled
++;
757 l
->stats
.sent_bundles
++;
760 l
->backlog
[imp
].len
+= skb_queue_len(list
);
761 skb_queue_splice_tail_init(list
, backlogq
);
768 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
769 * Receive the sequence number where we should start receiving and
770 * acking broadcast packets from a newly added peer node, and open
771 * up for reception of such packets.
773 * Called with node locked
775 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
)
777 struct tipc_msg
*msg
= buf_msg(buf
);
779 n
->bclink
.last_sent
= n
->bclink
.last_in
= msg_last_bcast(msg
);
780 n
->bclink
.recv_permitted
= true;
785 * tipc_link_push_packets - push unsent packets to bearer
787 * Push out the unsent messages of a link where congestion
788 * has abated. Node is locked.
790 * Called with node locked
792 void tipc_link_push_packets(struct tipc_link
*link
)
795 struct tipc_msg
*msg
;
796 u16 seqno
= link
->snd_nxt
;
797 u16 ack
= mod(link
->rcv_nxt
- 1);
799 while (skb_queue_len(&link
->transmq
) < link
->window
) {
800 skb
= __skb_dequeue(&link
->backlogq
);
804 link
->backlog
[msg_importance(msg
)].len
--;
805 msg_set_ack(msg
, ack
);
806 msg_set_seqno(msg
, seqno
);
807 seqno
= mod(seqno
+ 1);
808 msg_set_bcast_ack(msg
, link
->owner
->bclink
.last_in
);
809 link
->rcv_unacked
= 0;
810 __skb_queue_tail(&link
->transmq
, skb
);
811 tipc_bearer_send(link
->owner
->net
, link
->bearer_id
,
812 skb
, &link
->media_addr
);
814 link
->snd_nxt
= seqno
;
817 void tipc_link_advance_backlog(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
819 struct sk_buff
*skb
, *_skb
;
820 struct tipc_msg
*hdr
;
821 u16 seqno
= l
->snd_nxt
;
822 u16 ack
= l
->rcv_nxt
- 1;
824 while (skb_queue_len(&l
->transmq
) < l
->window
) {
825 skb
= skb_peek(&l
->backlogq
);
828 _skb
= skb_clone(skb
, GFP_ATOMIC
);
831 __skb_dequeue(&l
->backlogq
);
833 l
->backlog
[msg_importance(hdr
)].len
--;
834 __skb_queue_tail(&l
->transmq
, skb
);
835 __skb_queue_tail(xmitq
, _skb
);
836 msg_set_ack(hdr
, ack
);
837 msg_set_seqno(hdr
, seqno
);
838 msg_set_bcast_ack(hdr
, l
->owner
->bclink
.last_in
);
845 static void link_retransmit_failure(struct tipc_link
*l_ptr
,
848 struct tipc_msg
*msg
= buf_msg(buf
);
849 struct net
*net
= l_ptr
->owner
->net
;
851 pr_warn("Retransmission failure on link <%s>\n", l_ptr
->name
);
854 /* Handle failure on standard link */
855 link_print(l_ptr
, "Resetting link ");
856 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
857 msg_user(msg
), msg_type(msg
), msg_size(msg
),
859 pr_info("sqno %u, prev: %x, src: %x\n",
860 msg_seqno(msg
), msg_prevnode(msg
), msg_orignode(msg
));
862 /* Handle failure on broadcast link */
863 struct tipc_node
*n_ptr
;
864 char addr_string
[16];
866 pr_info("Msg seq number: %u, ", msg_seqno(msg
));
867 pr_cont("Outstanding acks: %lu\n",
868 (unsigned long) TIPC_SKB_CB(buf
)->handle
);
870 n_ptr
= tipc_bclink_retransmit_to(net
);
872 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
873 pr_info("Broadcast link info for %s\n", addr_string
);
874 pr_info("Reception permitted: %d, Acked: %u\n",
875 n_ptr
->bclink
.recv_permitted
,
876 n_ptr
->bclink
.acked
);
877 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
878 n_ptr
->bclink
.last_in
,
879 n_ptr
->bclink
.oos_state
,
880 n_ptr
->bclink
.last_sent
);
882 n_ptr
->action_flags
|= TIPC_BCAST_RESET
;
883 l_ptr
->stale_count
= 0;
887 void tipc_link_retransmit(struct tipc_link
*l_ptr
, struct sk_buff
*skb
,
890 struct tipc_msg
*msg
;
897 /* Detect repeated retransmit failures */
898 if (l_ptr
->last_retransm
== msg_seqno(msg
)) {
899 if (++l_ptr
->stale_count
> 100) {
900 link_retransmit_failure(l_ptr
, skb
);
904 l_ptr
->last_retransm
= msg_seqno(msg
);
905 l_ptr
->stale_count
= 1;
908 skb_queue_walk_from(&l_ptr
->transmq
, skb
) {
912 msg_set_ack(msg
, mod(l_ptr
->rcv_nxt
- 1));
913 msg_set_bcast_ack(msg
, l_ptr
->owner
->bclink
.last_in
);
914 tipc_bearer_send(l_ptr
->owner
->net
, l_ptr
->bearer_id
, skb
,
917 l_ptr
->stats
.retransmitted
++;
921 static int tipc_link_retransm(struct tipc_link
*l
, int retransm
,
922 struct sk_buff_head
*xmitq
)
924 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
925 struct tipc_msg
*hdr
;
930 /* Detect repeated retransmit failures on same packet */
931 if (likely(l
->last_retransm
!= buf_seqno(skb
))) {
932 l
->last_retransm
= buf_seqno(skb
);
934 } else if (++l
->stale_count
> 100) {
935 link_retransmit_failure(l
, skb
);
936 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
938 skb_queue_walk(&l
->transmq
, skb
) {
942 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
946 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
947 msg_set_bcast_ack(hdr
, l
->owner
->bclink
.last_in
);
948 _skb
->priority
= TC_PRIO_CONTROL
;
949 __skb_queue_tail(xmitq
, _skb
);
951 l
->stats
.retransmitted
++;
956 /* tipc_data_input - deliver data and name distr msgs to upper layer
958 * Consumes buffer if message is of right type
959 * Node lock must be held
961 static bool tipc_data_input(struct tipc_link
*link
, struct sk_buff
*skb
,
962 struct sk_buff_head
*inputq
)
964 struct tipc_node
*node
= link
->owner
;
966 switch (msg_user(buf_msg(skb
))) {
967 case TIPC_LOW_IMPORTANCE
:
968 case TIPC_MEDIUM_IMPORTANCE
:
969 case TIPC_HIGH_IMPORTANCE
:
970 case TIPC_CRITICAL_IMPORTANCE
:
972 __skb_queue_tail(inputq
, skb
);
974 case NAME_DISTRIBUTOR
:
975 node
->bclink
.recv_permitted
= true;
976 skb_queue_tail(link
->namedq
, skb
);
979 case TUNNEL_PROTOCOL
:
984 pr_warn("Dropping received illegal msg type\n");
990 /* tipc_link_input - process packet that has passed link protocol check
994 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
995 struct sk_buff_head
*inputq
)
997 struct tipc_node
*node
= l
->owner
;
998 struct tipc_msg
*hdr
= buf_msg(skb
);
999 struct sk_buff
**reasm_skb
= &l
->reasm_buf
;
1000 struct sk_buff
*iskb
;
1001 int usr
= msg_user(hdr
);
1006 if (unlikely(usr
== TUNNEL_PROTOCOL
)) {
1007 if (msg_type(hdr
) == SYNCH_MSG
) {
1008 __skb_queue_purge(&l
->deferdq
);
1011 if (!tipc_msg_extract(skb
, &iskb
, &ipos
))
1016 if (less(msg_seqno(hdr
), l
->drop_point
))
1018 if (tipc_data_input(l
, skb
, inputq
))
1020 usr
= msg_user(hdr
);
1021 reasm_skb
= &l
->failover_reasm_skb
;
1024 if (usr
== MSG_BUNDLER
) {
1025 l
->stats
.recv_bundles
++;
1026 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1027 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1028 tipc_data_input(l
, iskb
, inputq
);
1030 } else if (usr
== MSG_FRAGMENTER
) {
1031 l
->stats
.recv_fragments
++;
1032 if (tipc_buf_append(reasm_skb
, &skb
)) {
1033 l
->stats
.recv_fragmented
++;
1034 tipc_data_input(l
, skb
, inputq
);
1035 } else if (!*reasm_skb
) {
1036 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1039 } else if (usr
== BCAST_PROTOCOL
) {
1040 tipc_link_sync_rcv(node
, skb
);
1048 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
1050 bool released
= false;
1051 struct sk_buff
*skb
, *tmp
;
1053 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1054 if (more(buf_seqno(skb
), acked
))
1056 __skb_unlink(skb
, &l
->transmq
);
1063 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1064 * @link: the link that should handle the message
1066 * @xmitq: queue to place packets to be sent after this call
1068 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1069 struct sk_buff_head
*xmitq
)
1071 struct sk_buff_head
*arrvq
= &l
->deferdq
;
1072 struct sk_buff_head tmpq
;
1073 struct tipc_msg
*hdr
;
1077 __skb_queue_head_init(&tmpq
);
1079 if (unlikely(!__tipc_skb_queue_sorted(arrvq
, skb
))) {
1080 if (!(skb_queue_len(arrvq
) % TIPC_NACK_INTV
))
1081 tipc_link_build_proto_msg(l
, STATE_MSG
, 0,
1086 while ((skb
= skb_peek(arrvq
))) {
1089 /* Verify and update link state */
1090 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
)) {
1091 __skb_dequeue(arrvq
);
1092 rc
= tipc_link_proto_rcv(l
, skb
, xmitq
);
1096 if (unlikely(!link_is_up(l
))) {
1097 rc
= tipc_link_fsm_evt(l
, LINK_ESTABLISH_EVT
);
1098 if (!link_is_up(l
)) {
1099 kfree_skb(__skb_dequeue(arrvq
));
1104 l
->silent_intv_cnt
= 0;
1106 /* Forward queues and wake up waiting users */
1107 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1108 tipc_link_advance_backlog(l
, xmitq
);
1109 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1110 link_prepare_wakeup(l
);
1113 /* Defer reception if there is a gap in the sequence */
1114 seqno
= msg_seqno(hdr
);
1115 rcv_nxt
= l
->rcv_nxt
;
1116 if (unlikely(less(rcv_nxt
, seqno
))) {
1117 l
->stats
.deferred_recv
++;
1121 __skb_dequeue(arrvq
);
1123 /* Drop if packet already received */
1124 if (unlikely(more(rcv_nxt
, seqno
))) {
1125 l
->stats
.duplicates
++;
1130 /* Packet can be delivered */
1132 l
->stats
.recv_info
++;
1133 if (unlikely(!tipc_data_input(l
, skb
, &tmpq
)))
1134 rc
= tipc_link_input(l
, skb
, &tmpq
);
1136 /* Ack at regular intervals */
1137 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
)) {
1139 l
->stats
.sent_acks
++;
1140 tipc_link_build_proto_msg(l
, STATE_MSG
,
1145 tipc_skb_queue_splice_tail(&tmpq
, l
->inputq
);
1150 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1152 * Returns increase in queue length (i.e. 0 or 1)
1154 u32
tipc_link_defer_pkt(struct sk_buff_head
*list
, struct sk_buff
*skb
)
1156 struct sk_buff
*skb1
;
1157 u16 seq_no
= buf_seqno(skb
);
1160 if (skb_queue_empty(list
)) {
1161 __skb_queue_tail(list
, skb
);
1166 if (less(buf_seqno(skb_peek_tail(list
)), seq_no
)) {
1167 __skb_queue_tail(list
, skb
);
1171 /* Locate insertion point in queue, then insert; discard if duplicate */
1172 skb_queue_walk(list
, skb1
) {
1173 u16 curr_seqno
= buf_seqno(skb1
);
1175 if (seq_no
== curr_seqno
) {
1180 if (less(seq_no
, curr_seqno
))
1184 __skb_queue_before(list
, skb1
, skb
);
1189 * Send protocol message to the other endpoint.
1191 void tipc_link_proto_xmit(struct tipc_link
*l
, u32 msg_typ
, int probe_msg
,
1192 u32 gap
, u32 tolerance
, u32 priority
)
1194 struct sk_buff
*skb
= NULL
;
1195 struct sk_buff_head xmitq
;
1197 __skb_queue_head_init(&xmitq
);
1198 tipc_link_build_proto_msg(l
, msg_typ
, probe_msg
, gap
,
1199 tolerance
, priority
, &xmitq
);
1200 skb
= __skb_dequeue(&xmitq
);
1203 tipc_bearer_send(l
->owner
->net
, l
->bearer_id
, skb
, &l
->media_addr
);
1208 /* tipc_link_build_proto_msg: prepare link protocol message for transmission
1210 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1211 u16 rcvgap
, int tolerance
, int priority
,
1212 struct sk_buff_head
*xmitq
)
1214 struct sk_buff
*skb
= NULL
;
1215 struct tipc_msg
*hdr
= l
->pmsg
;
1216 u16 snd_nxt
= l
->snd_nxt
;
1217 u16 rcv_nxt
= l
->rcv_nxt
;
1218 u16 rcv_last
= rcv_nxt
- 1;
1219 int node_up
= l
->owner
->bclink
.recv_permitted
;
1221 /* Don't send protocol message during reset or link failover */
1222 if (tipc_link_is_blocked(l
))
1225 msg_set_type(hdr
, mtyp
);
1226 msg_set_net_plane(hdr
, l
->net_plane
);
1227 msg_set_bcast_ack(hdr
, l
->owner
->bclink
.last_in
);
1228 msg_set_last_bcast(hdr
, tipc_bclink_get_last_sent(l
->owner
->net
));
1229 msg_set_link_tolerance(hdr
, tolerance
);
1230 msg_set_linkprio(hdr
, priority
);
1231 msg_set_redundant_link(hdr
, node_up
);
1232 msg_set_seq_gap(hdr
, 0);
1234 /* Compatibility: created msg must not be in sequence with pkt flow */
1235 msg_set_seqno(hdr
, snd_nxt
+ U16_MAX
/ 2);
1237 if (mtyp
== STATE_MSG
) {
1238 if (!tipc_link_is_up(l
))
1240 msg_set_next_sent(hdr
, snd_nxt
);
1242 /* Override rcvgap if there are packets in deferred queue */
1243 if (!skb_queue_empty(&l
->deferdq
))
1244 rcvgap
= buf_seqno(skb_peek(&l
->deferdq
)) - rcv_nxt
;
1246 msg_set_seq_gap(hdr
, rcvgap
);
1247 l
->stats
.sent_nacks
++;
1249 msg_set_ack(hdr
, rcv_last
);
1250 msg_set_probe(hdr
, probe
);
1252 l
->stats
.sent_probes
++;
1253 l
->stats
.sent_states
++;
1255 /* RESET_MSG or ACTIVATE_MSG */
1256 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1257 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1258 msg_set_next_sent(hdr
, 1);
1260 skb
= tipc_buf_acquire(msg_size(hdr
));
1263 skb_copy_to_linear_data(skb
, hdr
, msg_size(hdr
));
1264 skb
->priority
= TC_PRIO_CONTROL
;
1265 __skb_queue_tail(xmitq
, skb
);
1268 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1269 * with contents of the link's tranmsit and backlog queues.
1271 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1272 int mtyp
, struct sk_buff_head
*xmitq
)
1274 struct sk_buff
*skb
, *tnlskb
;
1275 struct tipc_msg
*hdr
, tnlhdr
;
1276 struct sk_buff_head
*queue
= &l
->transmq
;
1277 struct sk_buff_head tmpxq
, tnlq
;
1278 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1283 skb_queue_head_init(&tnlq
);
1284 skb_queue_head_init(&tmpxq
);
1286 /* At least one packet required for safe algorithm => add dummy */
1287 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1288 BASIC_H_SIZE
, 0, l
->addr
, link_own_addr(l
),
1289 0, 0, TIPC_ERR_NO_PORT
);
1291 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1294 skb_queue_tail(&tnlq
, skb
);
1295 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1296 __skb_queue_purge(&tmpxq
);
1298 /* Initialize reusable tunnel packet header */
1299 tipc_msg_init(link_own_addr(l
), &tnlhdr
, TUNNEL_PROTOCOL
,
1300 mtyp
, INT_H_SIZE
, l
->addr
);
1301 pktcnt
= skb_queue_len(&l
->transmq
) + skb_queue_len(&l
->backlogq
);
1302 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1303 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1305 /* Wrap each packet into a tunnel packet */
1306 skb_queue_walk(queue
, skb
) {
1308 if (queue
== &l
->backlogq
)
1309 msg_set_seqno(hdr
, seqno
++);
1310 pktlen
= msg_size(hdr
);
1311 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1312 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
);
1314 pr_warn("%sunable to send packet\n", link_co_err
);
1317 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1318 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1319 __skb_queue_tail(&tnlq
, tnlskb
);
1321 if (queue
!= &l
->backlogq
) {
1322 queue
= &l
->backlogq
;
1326 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1328 if (mtyp
== FAILOVER_MSG
) {
1329 tnl
->drop_point
= l
->rcv_nxt
;
1330 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1331 l
->reasm_buf
= NULL
;
1335 /* tipc_link_proto_rcv(): receive link level protocol message :
1336 * Note that network plane id propagates through the network, and may
1337 * change at any time. The node with lowest numerical id determines
1340 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1341 struct sk_buff_head
*xmitq
)
1343 struct tipc_msg
*hdr
= buf_msg(skb
);
1345 u16 nacked_gap
= msg_seq_gap(hdr
);
1346 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1347 u16 peers_tol
= msg_link_tolerance(hdr
);
1348 u16 peers_prio
= msg_linkprio(hdr
);
1352 if (tipc_link_is_blocked(l
))
1355 if (link_own_addr(l
) > msg_prevnode(hdr
))
1356 l
->net_plane
= msg_net_plane(hdr
);
1358 switch (msg_type(hdr
)) {
1361 /* Ignore duplicate RESET with old session number */
1362 if ((less_eq(msg_session(hdr
), l
->peer_session
)) &&
1363 (l
->peer_session
!= WILDCARD_SESSION
))
1369 /* Complete own link name with peer's interface name */
1370 if_name
= strrchr(l
->name
, ':') + 1;
1371 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1373 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1375 strncpy(if_name
, msg_data(hdr
), TIPC_MAX_IF_NAME
);
1377 /* Update own tolerance if peer indicates a non-zero value */
1378 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1379 l
->tolerance
= peers_tol
;
1381 /* Update own priority if peer's priority is higher */
1382 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1383 l
->priority
= peers_prio
;
1385 if (msg_type(hdr
) == RESET_MSG
) {
1386 rc
|= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1387 } else if (!link_is_up(l
)) {
1388 tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1389 rc
|= tipc_link_fsm_evt(l
, LINK_ESTABLISH_EVT
);
1391 l
->peer_session
= msg_session(hdr
);
1392 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1393 if (l
->mtu
> msg_max_pkt(hdr
))
1394 l
->mtu
= msg_max_pkt(hdr
);
1399 /* Update own tolerance if peer indicates a non-zero value */
1400 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1401 l
->tolerance
= peers_tol
;
1403 l
->silent_intv_cnt
= 0;
1404 l
->stats
.recv_states
++;
1406 l
->stats
.recv_probes
++;
1407 rc
= tipc_link_fsm_evt(l
, LINK_ESTABLISH_EVT
);
1411 /* Send NACK if peer has sent pkts we haven't received yet */
1412 if (more(peers_snd_nxt
, l
->rcv_nxt
))
1413 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1414 if (rcvgap
|| (msg_probe(hdr
)))
1415 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, rcvgap
,
1417 tipc_link_release_pkts(l
, msg_ack(hdr
));
1419 /* If NACK, retransmit will now start at right position */
1421 rc
= tipc_link_retransm(l
, nacked_gap
, xmitq
);
1422 l
->stats
.recv_nacks
++;
1425 tipc_link_advance_backlog(l
, xmitq
);
1426 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1427 link_prepare_wakeup(l
);
1434 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1436 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->mtu
/ ITEM_SIZE
);
1439 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= win
/ 2;
1440 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= win
;
1441 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= win
/ 2 * 3;
1442 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= win
* 2;
1443 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1446 /* tipc_link_find_owner - locate owner node of link by link's name
1447 * @net: the applicable net namespace
1448 * @name: pointer to link name string
1449 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1451 * Returns pointer to node owning the link, or 0 if no matching link is found.
1453 static struct tipc_node
*tipc_link_find_owner(struct net
*net
,
1454 const char *link_name
,
1455 unsigned int *bearer_id
)
1457 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1458 struct tipc_link
*l_ptr
;
1459 struct tipc_node
*n_ptr
;
1460 struct tipc_node
*found_node
= NULL
;
1465 list_for_each_entry_rcu(n_ptr
, &tn
->node_list
, list
) {
1466 tipc_node_lock(n_ptr
);
1467 for (i
= 0; i
< MAX_BEARERS
; i
++) {
1468 l_ptr
= n_ptr
->links
[i
].link
;
1469 if (l_ptr
&& !strcmp(l_ptr
->name
, link_name
)) {
1475 tipc_node_unlock(n_ptr
);
1485 * link_reset_statistics - reset link statistics
1486 * @l_ptr: pointer to link
1488 static void link_reset_statistics(struct tipc_link
*l_ptr
)
1490 memset(&l_ptr
->stats
, 0, sizeof(l_ptr
->stats
));
1491 l_ptr
->stats
.sent_info
= l_ptr
->snd_nxt
;
1492 l_ptr
->stats
.recv_info
= l_ptr
->rcv_nxt
;
1495 static void link_print(struct tipc_link
*l
, const char *str
)
1497 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
1498 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
;
1499 u16 tail
= l
->snd_nxt
- 1;
1501 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
1502 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1503 skb_queue_len(&l
->transmq
), head
, tail
,
1504 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
1507 /* Parse and validate nested (link) properties valid for media, bearer and link
1509 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1513 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1514 tipc_nl_prop_policy
);
1518 if (props
[TIPC_NLA_PROP_PRIO
]) {
1521 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1522 if (prio
> TIPC_MAX_LINK_PRI
)
1526 if (props
[TIPC_NLA_PROP_TOL
]) {
1529 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1530 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1534 if (props
[TIPC_NLA_PROP_WIN
]) {
1537 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1538 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1545 int tipc_nl_link_set(struct sk_buff
*skb
, struct genl_info
*info
)
1551 struct tipc_link
*link
;
1552 struct tipc_node
*node
;
1553 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
1554 struct net
*net
= sock_net(skb
->sk
);
1556 if (!info
->attrs
[TIPC_NLA_LINK
])
1559 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
1560 info
->attrs
[TIPC_NLA_LINK
],
1561 tipc_nl_link_policy
);
1565 if (!attrs
[TIPC_NLA_LINK_NAME
])
1568 name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
1570 if (strcmp(name
, tipc_bclink_name
) == 0)
1571 return tipc_nl_bc_link_set(net
, attrs
);
1573 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
1577 tipc_node_lock(node
);
1579 link
= node
->links
[bearer_id
].link
;
1585 if (attrs
[TIPC_NLA_LINK_PROP
]) {
1586 struct nlattr
*props
[TIPC_NLA_PROP_MAX
+ 1];
1588 err
= tipc_nl_parse_link_prop(attrs
[TIPC_NLA_LINK_PROP
],
1595 if (props
[TIPC_NLA_PROP_TOL
]) {
1598 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1599 link
->tolerance
= tol
;
1600 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, tol
, 0);
1602 if (props
[TIPC_NLA_PROP_PRIO
]) {
1605 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1606 link
->priority
= prio
;
1607 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, 0, prio
);
1609 if (props
[TIPC_NLA_PROP_WIN
]) {
1612 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1613 tipc_link_set_queue_limits(link
, win
);
1618 tipc_node_unlock(node
);
1623 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1626 struct nlattr
*stats
;
1633 struct nla_map map
[] = {
1634 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
1635 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1636 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1637 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1638 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1639 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
1640 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1641 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1642 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1643 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1644 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1645 s
->msg_length_counts
: 1},
1646 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1647 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1648 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1649 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1650 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1651 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1652 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1653 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1654 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1655 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1656 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1657 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1658 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1659 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1660 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1661 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1662 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1663 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1664 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1665 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1666 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1667 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1668 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1671 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1675 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1676 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1679 nla_nest_end(skb
, stats
);
1683 nla_nest_cancel(skb
, stats
);
1688 /* Caller should hold appropriate locks to protect the link */
1689 static int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
1690 struct tipc_link
*link
, int nlflags
)
1694 struct nlattr
*attrs
;
1695 struct nlattr
*prop
;
1696 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1698 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1699 nlflags
, TIPC_NL_LINK_GET
);
1703 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1707 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
1709 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
1710 tipc_cluster_mask(tn
->own_addr
)))
1712 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
1714 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->rcv_nxt
))
1716 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->snd_nxt
))
1719 if (tipc_link_is_up(link
))
1720 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1722 if (tipc_link_is_active(link
))
1723 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
1726 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1729 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1731 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
1733 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
1736 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1738 nla_nest_end(msg
->skb
, prop
);
1740 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
1744 nla_nest_end(msg
->skb
, attrs
);
1745 genlmsg_end(msg
->skb
, hdr
);
1750 nla_nest_cancel(msg
->skb
, prop
);
1752 nla_nest_cancel(msg
->skb
, attrs
);
1754 genlmsg_cancel(msg
->skb
, hdr
);
1759 /* Caller should hold node lock */
1760 static int __tipc_nl_add_node_links(struct net
*net
, struct tipc_nl_msg
*msg
,
1761 struct tipc_node
*node
, u32
*prev_link
)
1766 for (i
= *prev_link
; i
< MAX_BEARERS
; i
++) {
1769 if (!node
->links
[i
].link
)
1772 err
= __tipc_nl_add_link(net
, msg
,
1773 node
->links
[i
].link
, NLM_F_MULTI
);
1782 int tipc_nl_link_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1784 struct net
*net
= sock_net(skb
->sk
);
1785 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1786 struct tipc_node
*node
;
1787 struct tipc_nl_msg msg
;
1788 u32 prev_node
= cb
->args
[0];
1789 u32 prev_link
= cb
->args
[1];
1790 int done
= cb
->args
[2];
1797 msg
.portid
= NETLINK_CB(cb
->skb
).portid
;
1798 msg
.seq
= cb
->nlh
->nlmsg_seq
;
1802 node
= tipc_node_find(net
, prev_node
);
1804 /* We never set seq or call nl_dump_check_consistent()
1805 * this means that setting prev_seq here will cause the
1806 * consistence check to fail in the netlink callback
1807 * handler. Resulting in the last NLMSG_DONE message
1808 * having the NLM_F_DUMP_INTR flag set.
1813 tipc_node_put(node
);
1815 list_for_each_entry_continue_rcu(node
, &tn
->node_list
,
1817 tipc_node_lock(node
);
1818 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
1820 tipc_node_unlock(node
);
1824 prev_node
= node
->addr
;
1827 err
= tipc_nl_add_bc_link(net
, &msg
);
1831 list_for_each_entry_rcu(node
, &tn
->node_list
, list
) {
1832 tipc_node_lock(node
);
1833 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
1835 tipc_node_unlock(node
);
1839 prev_node
= node
->addr
;
1846 cb
->args
[0] = prev_node
;
1847 cb
->args
[1] = prev_link
;
1853 int tipc_nl_link_get(struct sk_buff
*skb
, struct genl_info
*info
)
1855 struct net
*net
= genl_info_net(info
);
1856 struct tipc_nl_msg msg
;
1860 msg
.portid
= info
->snd_portid
;
1861 msg
.seq
= info
->snd_seq
;
1863 if (!info
->attrs
[TIPC_NLA_LINK_NAME
])
1865 name
= nla_data(info
->attrs
[TIPC_NLA_LINK_NAME
]);
1867 msg
.skb
= nlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
1871 if (strcmp(name
, tipc_bclink_name
) == 0) {
1872 err
= tipc_nl_add_bc_link(net
, &msg
);
1874 nlmsg_free(msg
.skb
);
1879 struct tipc_node
*node
;
1880 struct tipc_link
*link
;
1882 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
1886 tipc_node_lock(node
);
1887 link
= node
->links
[bearer_id
].link
;
1889 tipc_node_unlock(node
);
1890 nlmsg_free(msg
.skb
);
1894 err
= __tipc_nl_add_link(net
, &msg
, link
, 0);
1895 tipc_node_unlock(node
);
1897 nlmsg_free(msg
.skb
);
1902 return genlmsg_reply(msg
.skb
, info
);
1905 int tipc_nl_link_reset_stats(struct sk_buff
*skb
, struct genl_info
*info
)
1909 unsigned int bearer_id
;
1910 struct tipc_link
*link
;
1911 struct tipc_node
*node
;
1912 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
1913 struct net
*net
= sock_net(skb
->sk
);
1915 if (!info
->attrs
[TIPC_NLA_LINK
])
1918 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
1919 info
->attrs
[TIPC_NLA_LINK
],
1920 tipc_nl_link_policy
);
1924 if (!attrs
[TIPC_NLA_LINK_NAME
])
1927 link_name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
1929 if (strcmp(link_name
, tipc_bclink_name
) == 0) {
1930 err
= tipc_bclink_reset_stats(net
);
1936 node
= tipc_link_find_owner(net
, link_name
, &bearer_id
);
1940 tipc_node_lock(node
);
1942 link
= node
->links
[bearer_id
].link
;
1944 tipc_node_unlock(node
);
1948 link_reset_statistics(link
);
1950 tipc_node_unlock(node
);