2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err
= "Link changeover error, ";
52 static const char *link_rst_msg
= "Resetting link ";
53 static const char *link_unk_evt
= "Unknown link event ";
55 static const struct nla_policy tipc_nl_link_policy
[TIPC_NLA_LINK_MAX
+ 1] = {
56 [TIPC_NLA_LINK_UNSPEC
] = { .type
= NLA_UNSPEC
},
57 [TIPC_NLA_LINK_NAME
] = {
59 .len
= TIPC_MAX_LINK_NAME
61 [TIPC_NLA_LINK_MTU
] = { .type
= NLA_U32
},
62 [TIPC_NLA_LINK_BROADCAST
] = { .type
= NLA_FLAG
},
63 [TIPC_NLA_LINK_UP
] = { .type
= NLA_FLAG
},
64 [TIPC_NLA_LINK_ACTIVE
] = { .type
= NLA_FLAG
},
65 [TIPC_NLA_LINK_PROP
] = { .type
= NLA_NESTED
},
66 [TIPC_NLA_LINK_STATS
] = { .type
= NLA_NESTED
},
67 [TIPC_NLA_LINK_RX
] = { .type
= NLA_U32
},
68 [TIPC_NLA_LINK_TX
] = { .type
= NLA_U32
}
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy
[TIPC_NLA_PROP_MAX
+ 1] = {
73 [TIPC_NLA_PROP_UNSPEC
] = { .type
= NLA_UNSPEC
},
74 [TIPC_NLA_PROP_PRIO
] = { .type
= NLA_U32
},
75 [TIPC_NLA_PROP_TOL
] = { .type
= NLA_U32
},
76 [TIPC_NLA_PROP_WIN
] = { .type
= NLA_U32
}
80 * Out-of-range value for link session numbers
82 #define INVALID_SESSION 0x10000
87 #define STARTING_EVT 856384768 /* link processing trigger */
88 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
89 #define TIMEOUT_EVT 560817u /* link timer expired */
92 * The following two 'message types' is really just implementation
93 * data conveniently stored in the message header.
94 * They must not be considered part of the protocol
100 * State value stored in 'exp_msg_count'
102 #define START_CHANGEOVER 100000u
104 static void link_handle_out_of_seq_msg(struct tipc_link
*link
,
105 struct sk_buff
*skb
);
106 static void tipc_link_proto_rcv(struct tipc_link
*link
,
107 struct sk_buff
*skb
);
108 static int tipc_link_tunnel_rcv(struct tipc_node
*node
,
109 struct sk_buff
**skb
);
110 static void link_set_supervision_props(struct tipc_link
*l_ptr
, u32 tol
);
111 static void link_state_event(struct tipc_link
*l_ptr
, u32 event
);
112 static void link_reset_statistics(struct tipc_link
*l_ptr
);
113 static void link_print(struct tipc_link
*l_ptr
, const char *str
);
114 static void tipc_link_sync_xmit(struct tipc_link
*l
);
115 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
);
116 static void tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
);
117 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
);
120 * Simple link routines
122 static unsigned int align(unsigned int i
)
124 return (i
+ 3) & ~3u;
127 static void tipc_link_release(struct kref
*kref
)
129 kfree(container_of(kref
, struct tipc_link
, ref
));
132 static void tipc_link_get(struct tipc_link
*l_ptr
)
134 kref_get(&l_ptr
->ref
);
137 static void tipc_link_put(struct tipc_link
*l_ptr
)
139 kref_put(&l_ptr
->ref
, tipc_link_release
);
142 static void link_init_max_pkt(struct tipc_link
*l_ptr
)
144 struct tipc_node
*node
= l_ptr
->owner
;
145 struct tipc_net
*tn
= net_generic(node
->net
, tipc_net_id
);
146 struct tipc_bearer
*b_ptr
;
150 b_ptr
= rcu_dereference_rtnl(tn
->bearer_list
[l_ptr
->bearer_id
]);
155 max_pkt
= (b_ptr
->mtu
& ~3);
158 if (max_pkt
> MAX_MSG_SIZE
)
159 max_pkt
= MAX_MSG_SIZE
;
161 l_ptr
->max_pkt_target
= max_pkt
;
162 if (l_ptr
->max_pkt_target
< MAX_PKT_DEFAULT
)
163 l_ptr
->max_pkt
= l_ptr
->max_pkt_target
;
165 l_ptr
->max_pkt
= MAX_PKT_DEFAULT
;
167 l_ptr
->max_pkt_probes
= 0;
171 * Simple non-static link routines (i.e. referenced outside this file)
173 int tipc_link_is_up(struct tipc_link
*l_ptr
)
177 return link_working_working(l_ptr
) || link_working_unknown(l_ptr
);
180 int tipc_link_is_active(struct tipc_link
*l_ptr
)
182 return (l_ptr
->owner
->active_links
[0] == l_ptr
) ||
183 (l_ptr
->owner
->active_links
[1] == l_ptr
);
187 * link_timeout - handle expiration of link timer
188 * @l_ptr: pointer to link
190 static void link_timeout(unsigned long data
)
192 struct tipc_link
*l_ptr
= (struct tipc_link
*)data
;
195 tipc_node_lock(l_ptr
->owner
);
197 /* update counters used in statistical profiling of send traffic */
198 l_ptr
->stats
.accu_queue_sz
+= skb_queue_len(&l_ptr
->transmq
);
199 l_ptr
->stats
.queue_sz_counts
++;
201 skb
= skb_peek(&l_ptr
->transmq
);
203 struct tipc_msg
*msg
= buf_msg(skb
);
204 u32 length
= msg_size(msg
);
206 if ((msg_user(msg
) == MSG_FRAGMENTER
) &&
207 (msg_type(msg
) == FIRST_FRAGMENT
)) {
208 length
= msg_size(msg_get_wrapped(msg
));
211 l_ptr
->stats
.msg_lengths_total
+= length
;
212 l_ptr
->stats
.msg_length_counts
++;
214 l_ptr
->stats
.msg_length_profile
[0]++;
215 else if (length
<= 256)
216 l_ptr
->stats
.msg_length_profile
[1]++;
217 else if (length
<= 1024)
218 l_ptr
->stats
.msg_length_profile
[2]++;
219 else if (length
<= 4096)
220 l_ptr
->stats
.msg_length_profile
[3]++;
221 else if (length
<= 16384)
222 l_ptr
->stats
.msg_length_profile
[4]++;
223 else if (length
<= 32768)
224 l_ptr
->stats
.msg_length_profile
[5]++;
226 l_ptr
->stats
.msg_length_profile
[6]++;
230 /* do all other link processing performed on a periodic basis */
231 link_state_event(l_ptr
, TIMEOUT_EVT
);
233 if (skb_queue_len(&l_ptr
->backlogq
))
234 tipc_link_push_packets(l_ptr
);
236 tipc_node_unlock(l_ptr
->owner
);
237 tipc_link_put(l_ptr
);
240 static void link_set_timer(struct tipc_link
*link
, unsigned long time
)
242 if (!mod_timer(&link
->timer
, jiffies
+ time
))
247 * tipc_link_create - create a new link
248 * @n_ptr: pointer to associated node
249 * @b_ptr: pointer to associated bearer
250 * @media_addr: media address to use when sending messages over link
252 * Returns pointer to link.
254 struct tipc_link
*tipc_link_create(struct tipc_node
*n_ptr
,
255 struct tipc_bearer
*b_ptr
,
256 const struct tipc_media_addr
*media_addr
)
258 struct tipc_net
*tn
= net_generic(n_ptr
->net
, tipc_net_id
);
259 struct tipc_link
*l_ptr
;
260 struct tipc_msg
*msg
;
262 char addr_string
[16];
263 u32 peer
= n_ptr
->addr
;
265 if (n_ptr
->link_cnt
>= MAX_BEARERS
) {
266 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
267 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
268 n_ptr
->link_cnt
, addr_string
, MAX_BEARERS
);
272 if (n_ptr
->links
[b_ptr
->identity
]) {
273 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
274 pr_err("Attempt to establish second link on <%s> to %s\n",
275 b_ptr
->name
, addr_string
);
279 l_ptr
= kzalloc(sizeof(*l_ptr
), GFP_ATOMIC
);
281 pr_warn("Link creation failed, no memory\n");
284 kref_init(&l_ptr
->ref
);
286 if_name
= strchr(b_ptr
->name
, ':') + 1;
287 sprintf(l_ptr
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
288 tipc_zone(tn
->own_addr
), tipc_cluster(tn
->own_addr
),
289 tipc_node(tn
->own_addr
),
291 tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
292 /* note: peer i/f name is updated by reset/activate message */
293 memcpy(&l_ptr
->media_addr
, media_addr
, sizeof(*media_addr
));
294 l_ptr
->owner
= n_ptr
;
295 l_ptr
->checkpoint
= 1;
296 l_ptr
->peer_session
= INVALID_SESSION
;
297 l_ptr
->bearer_id
= b_ptr
->identity
;
298 link_set_supervision_props(l_ptr
, b_ptr
->tolerance
);
299 l_ptr
->state
= RESET_UNKNOWN
;
301 l_ptr
->pmsg
= (struct tipc_msg
*)&l_ptr
->proto_msg
;
303 tipc_msg_init(tn
->own_addr
, msg
, LINK_PROTOCOL
, RESET_MSG
, INT_H_SIZE
,
305 msg_set_size(msg
, sizeof(l_ptr
->proto_msg
));
306 msg_set_session(msg
, (tn
->random
& 0xffff));
307 msg_set_bearer_id(msg
, b_ptr
->identity
);
308 strcpy((char *)msg_data(msg
), if_name
);
309 l_ptr
->net_plane
= b_ptr
->net_plane
;
310 link_init_max_pkt(l_ptr
);
311 l_ptr
->priority
= b_ptr
->priority
;
312 tipc_link_set_queue_limits(l_ptr
, b_ptr
->window
);
313 l_ptr
->next_out_no
= 1;
314 __skb_queue_head_init(&l_ptr
->transmq
);
315 __skb_queue_head_init(&l_ptr
->backlogq
);
316 __skb_queue_head_init(&l_ptr
->deferdq
);
317 skb_queue_head_init(&l_ptr
->wakeupq
);
318 skb_queue_head_init(&l_ptr
->inputq
);
319 skb_queue_head_init(&l_ptr
->namedq
);
320 link_reset_statistics(l_ptr
);
321 tipc_node_attach_link(n_ptr
, l_ptr
);
322 setup_timer(&l_ptr
->timer
, link_timeout
, (unsigned long)l_ptr
);
323 link_state_event(l_ptr
, STARTING_EVT
);
329 * link_delete - Conditional deletion of link.
330 * If timer still running, real delete is done when it expires
331 * @link: link to be deleted
333 void tipc_link_delete(struct tipc_link
*link
)
335 tipc_link_reset_fragments(link
);
336 tipc_node_detach_link(link
->owner
, link
);
340 void tipc_link_delete_list(struct net
*net
, unsigned int bearer_id
,
343 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
344 struct tipc_link
*link
;
345 struct tipc_node
*node
;
349 list_for_each_entry_rcu(node
, &tn
->node_list
, list
) {
350 tipc_node_lock(node
);
351 link
= node
->links
[bearer_id
];
353 tipc_node_unlock(node
);
356 del_link
= !tipc_link_is_up(link
) && !link
->exp_msg_count
;
357 tipc_link_reset(link
);
358 if (del_timer(&link
->timer
))
360 link
->flags
|= LINK_STOPPED
;
361 /* Delete link now, or when failover is finished: */
362 if (shutting_down
|| !tipc_node_is_up(node
) || del_link
)
363 tipc_link_delete(link
);
364 tipc_node_unlock(node
);
370 * link_schedule_user - schedule a message sender for wakeup after congestion
371 * @link: congested link
372 * @list: message that was attempted sent
373 * Create pseudo msg to send back to user when congestion abates
374 * Only consumes message if there is an error
376 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
378 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
379 int imp
= msg_importance(msg
);
380 u32 oport
= msg_origport(msg
);
381 u32 addr
= link_own_addr(link
);
384 /* This really cannot happen... */
385 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
386 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
387 tipc_link_reset(link
);
390 /* Non-blocking sender: */
391 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
394 /* Create and schedule wakeup pseudo message */
395 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
396 addr
, addr
, oport
, 0, 0);
399 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
400 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
401 skb_queue_tail(&link
->wakeupq
, skb
);
402 link
->stats
.link_congs
++;
405 __skb_queue_purge(list
);
410 * link_prepare_wakeup - prepare users for wakeup after congestion
411 * @link: congested link
412 * Move a number of waiting users, as permitted by available space in
413 * the send queue, from link wait queue to node wait queue for wakeup
415 void link_prepare_wakeup(struct tipc_link
*l
)
417 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
419 struct sk_buff
*skb
, *tmp
;
421 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
422 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
423 lim
= l
->window
+ l
->backlog
[imp
].limit
;
424 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
425 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
427 skb_unlink(skb
, &l
->wakeupq
);
428 skb_queue_tail(&l
->inputq
, skb
);
429 l
->owner
->inputq
= &l
->inputq
;
430 l
->owner
->action_flags
|= TIPC_MSG_EVT
;
435 * tipc_link_reset_fragments - purge link's inbound message fragments queue
436 * @l_ptr: pointer to link
438 void tipc_link_reset_fragments(struct tipc_link
*l_ptr
)
440 kfree_skb(l_ptr
->reasm_buf
);
441 l_ptr
->reasm_buf
= NULL
;
444 static void tipc_link_purge_backlog(struct tipc_link
*l
)
446 __skb_queue_purge(&l
->backlogq
);
447 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
448 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
449 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
450 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
451 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
455 * tipc_link_purge_queues - purge all pkt queues associated with link
456 * @l_ptr: pointer to link
458 void tipc_link_purge_queues(struct tipc_link
*l_ptr
)
460 __skb_queue_purge(&l_ptr
->deferdq
);
461 __skb_queue_purge(&l_ptr
->transmq
);
462 tipc_link_purge_backlog(l_ptr
);
463 tipc_link_reset_fragments(l_ptr
);
466 void tipc_link_reset(struct tipc_link
*l_ptr
)
468 u32 prev_state
= l_ptr
->state
;
469 u32 checkpoint
= l_ptr
->next_in_no
;
470 int was_active_link
= tipc_link_is_active(l_ptr
);
471 struct tipc_node
*owner
= l_ptr
->owner
;
473 msg_set_session(l_ptr
->pmsg
, ((msg_session(l_ptr
->pmsg
) + 1) & 0xffff));
475 /* Link is down, accept any session */
476 l_ptr
->peer_session
= INVALID_SESSION
;
478 /* Prepare for max packet size negotiation */
479 link_init_max_pkt(l_ptr
);
481 l_ptr
->state
= RESET_UNKNOWN
;
483 if ((prev_state
== RESET_UNKNOWN
) || (prev_state
== RESET_RESET
))
486 tipc_node_link_down(l_ptr
->owner
, l_ptr
);
487 tipc_bearer_remove_dest(owner
->net
, l_ptr
->bearer_id
, l_ptr
->addr
);
489 if (was_active_link
&& tipc_node_active_links(l_ptr
->owner
)) {
490 l_ptr
->reset_checkpoint
= checkpoint
;
491 l_ptr
->exp_msg_count
= START_CHANGEOVER
;
494 /* Clean up all queues, except inputq: */
495 __skb_queue_purge(&l_ptr
->transmq
);
496 __skb_queue_purge(&l_ptr
->deferdq
);
498 owner
->inputq
= &l_ptr
->inputq
;
499 skb_queue_splice_init(&l_ptr
->wakeupq
, owner
->inputq
);
500 if (!skb_queue_empty(owner
->inputq
))
501 owner
->action_flags
|= TIPC_MSG_EVT
;
502 tipc_link_purge_backlog(l_ptr
);
503 l_ptr
->rcv_unacked
= 0;
504 l_ptr
->checkpoint
= 1;
505 l_ptr
->next_out_no
= 1;
506 l_ptr
->fsm_msg_cnt
= 0;
507 l_ptr
->stale_count
= 0;
508 link_reset_statistics(l_ptr
);
511 void tipc_link_reset_list(struct net
*net
, unsigned int bearer_id
)
513 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
514 struct tipc_link
*l_ptr
;
515 struct tipc_node
*n_ptr
;
518 list_for_each_entry_rcu(n_ptr
, &tn
->node_list
, list
) {
519 tipc_node_lock(n_ptr
);
520 l_ptr
= n_ptr
->links
[bearer_id
];
522 tipc_link_reset(l_ptr
);
523 tipc_node_unlock(n_ptr
);
528 static void link_activate(struct tipc_link
*link
)
530 struct tipc_node
*node
= link
->owner
;
532 link
->next_in_no
= 1;
533 link
->stats
.recv_info
= 1;
534 tipc_node_link_up(node
, link
);
535 tipc_bearer_add_dest(node
->net
, link
->bearer_id
, link
->addr
);
539 * link_state_event - link finite state machine
540 * @l_ptr: pointer to link
541 * @event: state machine event to process
543 static void link_state_event(struct tipc_link
*l_ptr
, unsigned int event
)
545 struct tipc_link
*other
;
546 unsigned long cont_intv
= l_ptr
->cont_intv
;
548 if (l_ptr
->flags
& LINK_STOPPED
)
551 if (!(l_ptr
->flags
& LINK_STARTED
) && (event
!= STARTING_EVT
))
552 return; /* Not yet. */
554 /* Check whether changeover is going on */
555 if (l_ptr
->exp_msg_count
) {
556 if (event
== TIMEOUT_EVT
)
557 link_set_timer(l_ptr
, cont_intv
);
561 switch (l_ptr
->state
) {
562 case WORKING_WORKING
:
564 case TRAFFIC_MSG_EVT
:
568 if (l_ptr
->next_in_no
!= l_ptr
->checkpoint
) {
569 l_ptr
->checkpoint
= l_ptr
->next_in_no
;
570 if (tipc_bclink_acks_missing(l_ptr
->owner
)) {
571 tipc_link_proto_xmit(l_ptr
, STATE_MSG
,
573 l_ptr
->fsm_msg_cnt
++;
574 } else if (l_ptr
->max_pkt
< l_ptr
->max_pkt_target
) {
575 tipc_link_proto_xmit(l_ptr
, STATE_MSG
,
577 l_ptr
->fsm_msg_cnt
++;
579 link_set_timer(l_ptr
, cont_intv
);
582 l_ptr
->state
= WORKING_UNKNOWN
;
583 l_ptr
->fsm_msg_cnt
= 0;
584 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 1, 0, 0, 0, 0);
585 l_ptr
->fsm_msg_cnt
++;
586 link_set_timer(l_ptr
, cont_intv
/ 4);
589 pr_debug("%s<%s>, requested by peer\n",
590 link_rst_msg
, l_ptr
->name
);
591 tipc_link_reset(l_ptr
);
592 l_ptr
->state
= RESET_RESET
;
593 l_ptr
->fsm_msg_cnt
= 0;
594 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
596 l_ptr
->fsm_msg_cnt
++;
597 link_set_timer(l_ptr
, cont_intv
);
600 pr_debug("%s%u in WW state\n", link_unk_evt
, event
);
603 case WORKING_UNKNOWN
:
605 case TRAFFIC_MSG_EVT
:
607 l_ptr
->state
= WORKING_WORKING
;
608 l_ptr
->fsm_msg_cnt
= 0;
609 link_set_timer(l_ptr
, cont_intv
);
612 pr_debug("%s<%s>, requested by peer while probing\n",
613 link_rst_msg
, l_ptr
->name
);
614 tipc_link_reset(l_ptr
);
615 l_ptr
->state
= RESET_RESET
;
616 l_ptr
->fsm_msg_cnt
= 0;
617 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
619 l_ptr
->fsm_msg_cnt
++;
620 link_set_timer(l_ptr
, cont_intv
);
623 if (l_ptr
->next_in_no
!= l_ptr
->checkpoint
) {
624 l_ptr
->state
= WORKING_WORKING
;
625 l_ptr
->fsm_msg_cnt
= 0;
626 l_ptr
->checkpoint
= l_ptr
->next_in_no
;
627 if (tipc_bclink_acks_missing(l_ptr
->owner
)) {
628 tipc_link_proto_xmit(l_ptr
, STATE_MSG
,
630 l_ptr
->fsm_msg_cnt
++;
632 link_set_timer(l_ptr
, cont_intv
);
633 } else if (l_ptr
->fsm_msg_cnt
< l_ptr
->abort_limit
) {
634 tipc_link_proto_xmit(l_ptr
, STATE_MSG
,
636 l_ptr
->fsm_msg_cnt
++;
637 link_set_timer(l_ptr
, cont_intv
/ 4);
638 } else { /* Link has failed */
639 pr_debug("%s<%s>, peer not responding\n",
640 link_rst_msg
, l_ptr
->name
);
641 tipc_link_reset(l_ptr
);
642 l_ptr
->state
= RESET_UNKNOWN
;
643 l_ptr
->fsm_msg_cnt
= 0;
644 tipc_link_proto_xmit(l_ptr
, RESET_MSG
,
646 l_ptr
->fsm_msg_cnt
++;
647 link_set_timer(l_ptr
, cont_intv
);
651 pr_err("%s%u in WU state\n", link_unk_evt
, event
);
656 case TRAFFIC_MSG_EVT
:
659 other
= l_ptr
->owner
->active_links
[0];
660 if (other
&& link_working_unknown(other
))
662 l_ptr
->state
= WORKING_WORKING
;
663 l_ptr
->fsm_msg_cnt
= 0;
664 link_activate(l_ptr
);
665 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 1, 0, 0, 0, 0);
666 l_ptr
->fsm_msg_cnt
++;
667 if (l_ptr
->owner
->working_links
== 1)
668 tipc_link_sync_xmit(l_ptr
);
669 link_set_timer(l_ptr
, cont_intv
);
672 l_ptr
->state
= RESET_RESET
;
673 l_ptr
->fsm_msg_cnt
= 0;
674 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
676 l_ptr
->fsm_msg_cnt
++;
677 link_set_timer(l_ptr
, cont_intv
);
680 l_ptr
->flags
|= LINK_STARTED
;
681 l_ptr
->fsm_msg_cnt
++;
682 link_set_timer(l_ptr
, cont_intv
);
685 tipc_link_proto_xmit(l_ptr
, RESET_MSG
, 0, 0, 0, 0, 0);
686 l_ptr
->fsm_msg_cnt
++;
687 link_set_timer(l_ptr
, cont_intv
);
690 pr_err("%s%u in RU state\n", link_unk_evt
, event
);
695 case TRAFFIC_MSG_EVT
:
697 other
= l_ptr
->owner
->active_links
[0];
698 if (other
&& link_working_unknown(other
))
700 l_ptr
->state
= WORKING_WORKING
;
701 l_ptr
->fsm_msg_cnt
= 0;
702 link_activate(l_ptr
);
703 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 1, 0, 0, 0, 0);
704 l_ptr
->fsm_msg_cnt
++;
705 if (l_ptr
->owner
->working_links
== 1)
706 tipc_link_sync_xmit(l_ptr
);
707 link_set_timer(l_ptr
, cont_intv
);
712 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
714 l_ptr
->fsm_msg_cnt
++;
715 link_set_timer(l_ptr
, cont_intv
);
718 pr_err("%s%u in RR state\n", link_unk_evt
, event
);
722 pr_err("Unknown link state %u/%u\n", l_ptr
->state
, event
);
727 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
729 * @list: chain of buffers containing message
731 * Consumes the buffer chain, except when returning -ELINKCONG,
732 * since the caller then may want to make more send attempts.
733 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
734 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
736 int __tipc_link_xmit(struct net
*net
, struct tipc_link
*link
,
737 struct sk_buff_head
*list
)
739 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
740 unsigned int maxwin
= link
->window
;
741 unsigned int imp
= msg_importance(msg
);
742 uint mtu
= link
->max_pkt
;
743 uint ack
= mod(link
->next_in_no
- 1);
744 uint seqno
= link
->next_out_no
;
745 uint bc_last_in
= link
->owner
->bclink
.last_in
;
746 struct tipc_media_addr
*addr
= &link
->media_addr
;
747 struct sk_buff_head
*transmq
= &link
->transmq
;
748 struct sk_buff_head
*backlogq
= &link
->backlogq
;
749 struct sk_buff
*skb
, *tmp
;
751 /* Match backlog limit against msg importance: */
752 if (unlikely(link
->backlog
[imp
].len
>= link
->backlog
[imp
].limit
))
753 return link_schedule_user(link
, list
);
755 if (unlikely(msg_size(msg
) > mtu
)) {
756 __skb_queue_purge(list
);
759 /* Prepare each packet for sending, and add to relevant queue: */
760 skb_queue_walk_safe(list
, skb
, tmp
) {
761 __skb_unlink(skb
, list
);
763 msg_set_seqno(msg
, seqno
);
764 msg_set_ack(msg
, ack
);
765 msg_set_bcast_ack(msg
, bc_last_in
);
767 if (likely(skb_queue_len(transmq
) < maxwin
)) {
768 __skb_queue_tail(transmq
, skb
);
769 tipc_bearer_send(net
, link
->bearer_id
, skb
, addr
);
770 link
->rcv_unacked
= 0;
774 if (tipc_msg_bundle(skb_peek_tail(backlogq
), skb
, mtu
)) {
775 link
->stats
.sent_bundled
++;
778 if (tipc_msg_make_bundle(&skb
, mtu
, link
->addr
)) {
779 link
->stats
.sent_bundled
++;
780 link
->stats
.sent_bundles
++;
781 imp
= msg_importance(buf_msg(skb
));
783 __skb_queue_tail(backlogq
, skb
);
784 link
->backlog
[imp
].len
++;
787 link
->next_out_no
= seqno
;
791 static void skb2list(struct sk_buff
*skb
, struct sk_buff_head
*list
)
793 skb_queue_head_init(list
);
794 __skb_queue_tail(list
, skb
);
797 static int __tipc_link_xmit_skb(struct tipc_link
*link
, struct sk_buff
*skb
)
799 struct sk_buff_head head
;
801 skb2list(skb
, &head
);
802 return __tipc_link_xmit(link
->owner
->net
, link
, &head
);
805 /* tipc_link_xmit_skb(): send single buffer to destination
806 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
807 * messages, which will not be rejected
808 * The only exception is datagram messages rerouted after secondary
809 * lookup, which are rare and safe to dispose of anyway.
810 * TODO: Return real return value, and let callers use
811 * tipc_wait_for_sendpkt() where applicable
813 int tipc_link_xmit_skb(struct net
*net
, struct sk_buff
*skb
, u32 dnode
,
816 struct sk_buff_head head
;
819 skb2list(skb
, &head
);
820 rc
= tipc_link_xmit(net
, &head
, dnode
, selector
);
821 if (rc
== -ELINKCONG
)
827 * tipc_link_xmit() is the general link level function for message sending
828 * @net: the applicable net namespace
829 * @list: chain of buffers containing message
830 * @dsz: amount of user data to be sent
831 * @dnode: address of destination node
832 * @selector: a number used for deterministic link selection
833 * Consumes the buffer chain, except when returning -ELINKCONG
834 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
836 int tipc_link_xmit(struct net
*net
, struct sk_buff_head
*list
, u32 dnode
,
839 struct tipc_link
*link
= NULL
;
840 struct tipc_node
*node
;
841 int rc
= -EHOSTUNREACH
;
843 node
= tipc_node_find(net
, dnode
);
845 tipc_node_lock(node
);
846 link
= node
->active_links
[selector
& 1];
848 rc
= __tipc_link_xmit(net
, link
, list
);
849 tipc_node_unlock(node
);
854 if (likely(in_own_node(net
, dnode
))) {
855 tipc_sk_rcv(net
, list
);
859 __skb_queue_purge(list
);
864 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
866 * Give a newly added peer node the sequence number where it should
867 * start receiving and acking broadcast packets.
869 * Called with node locked
871 static void tipc_link_sync_xmit(struct tipc_link
*link
)
874 struct tipc_msg
*msg
;
876 skb
= tipc_buf_acquire(INT_H_SIZE
);
881 tipc_msg_init(link_own_addr(link
), msg
, BCAST_PROTOCOL
, STATE_MSG
,
882 INT_H_SIZE
, link
->addr
);
883 msg_set_last_bcast(msg
, link
->owner
->bclink
.acked
);
884 __tipc_link_xmit_skb(link
, skb
);
888 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
889 * Receive the sequence number where we should start receiving and
890 * acking broadcast packets from a newly added peer node, and open
891 * up for reception of such packets.
893 * Called with node locked
895 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
)
897 struct tipc_msg
*msg
= buf_msg(buf
);
899 n
->bclink
.last_sent
= n
->bclink
.last_in
= msg_last_bcast(msg
);
900 n
->bclink
.recv_permitted
= true;
905 * tipc_link_push_packets - push unsent packets to bearer
907 * Push out the unsent messages of a link where congestion
908 * has abated. Node is locked.
910 * Called with node locked
912 void tipc_link_push_packets(struct tipc_link
*link
)
915 struct tipc_msg
*msg
;
916 unsigned int ack
= mod(link
->next_in_no
- 1);
918 while (skb_queue_len(&link
->transmq
) < link
->window
) {
919 skb
= __skb_dequeue(&link
->backlogq
);
923 link
->backlog
[msg_importance(msg
)].len
--;
924 msg_set_ack(msg
, ack
);
925 msg_set_bcast_ack(msg
, link
->owner
->bclink
.last_in
);
926 link
->rcv_unacked
= 0;
927 __skb_queue_tail(&link
->transmq
, skb
);
928 tipc_bearer_send(link
->owner
->net
, link
->bearer_id
,
929 skb
, &link
->media_addr
);
933 void tipc_link_reset_all(struct tipc_node
*node
)
935 char addr_string
[16];
938 tipc_node_lock(node
);
940 pr_warn("Resetting all links to %s\n",
941 tipc_addr_string_fill(addr_string
, node
->addr
));
943 for (i
= 0; i
< MAX_BEARERS
; i
++) {
944 if (node
->links
[i
]) {
945 link_print(node
->links
[i
], "Resetting link\n");
946 tipc_link_reset(node
->links
[i
]);
950 tipc_node_unlock(node
);
953 static void link_retransmit_failure(struct tipc_link
*l_ptr
,
956 struct tipc_msg
*msg
= buf_msg(buf
);
957 struct net
*net
= l_ptr
->owner
->net
;
959 pr_warn("Retransmission failure on link <%s>\n", l_ptr
->name
);
962 /* Handle failure on standard link */
963 link_print(l_ptr
, "Resetting link\n");
964 tipc_link_reset(l_ptr
);
967 /* Handle failure on broadcast link */
968 struct tipc_node
*n_ptr
;
969 char addr_string
[16];
971 pr_info("Msg seq number: %u, ", msg_seqno(msg
));
972 pr_cont("Outstanding acks: %lu\n",
973 (unsigned long) TIPC_SKB_CB(buf
)->handle
);
975 n_ptr
= tipc_bclink_retransmit_to(net
);
976 tipc_node_lock(n_ptr
);
978 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
979 pr_info("Broadcast link info for %s\n", addr_string
);
980 pr_info("Reception permitted: %d, Acked: %u\n",
981 n_ptr
->bclink
.recv_permitted
,
982 n_ptr
->bclink
.acked
);
983 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
984 n_ptr
->bclink
.last_in
,
985 n_ptr
->bclink
.oos_state
,
986 n_ptr
->bclink
.last_sent
);
988 tipc_node_unlock(n_ptr
);
990 tipc_bclink_set_flags(net
, TIPC_BCLINK_RESET
);
991 l_ptr
->stale_count
= 0;
995 void tipc_link_retransmit(struct tipc_link
*l_ptr
, struct sk_buff
*skb
,
998 struct tipc_msg
*msg
;
1005 /* Detect repeated retransmit failures */
1006 if (l_ptr
->last_retransmitted
== msg_seqno(msg
)) {
1007 if (++l_ptr
->stale_count
> 100) {
1008 link_retransmit_failure(l_ptr
, skb
);
1012 l_ptr
->last_retransmitted
= msg_seqno(msg
);
1013 l_ptr
->stale_count
= 1;
1016 skb_queue_walk_from(&l_ptr
->transmq
, skb
) {
1020 msg_set_ack(msg
, mod(l_ptr
->next_in_no
- 1));
1021 msg_set_bcast_ack(msg
, l_ptr
->owner
->bclink
.last_in
);
1022 tipc_bearer_send(l_ptr
->owner
->net
, l_ptr
->bearer_id
, skb
,
1023 &l_ptr
->media_addr
);
1025 l_ptr
->stats
.retransmitted
++;
1029 static void link_retrieve_defq(struct tipc_link
*link
,
1030 struct sk_buff_head
*list
)
1034 if (skb_queue_empty(&link
->deferdq
))
1037 seq_no
= buf_seqno(skb_peek(&link
->deferdq
));
1038 if (seq_no
== mod(link
->next_in_no
))
1039 skb_queue_splice_tail_init(&link
->deferdq
, list
);
1043 * tipc_rcv - process TIPC packets/messages arriving from off-node
1044 * @net: the applicable net namespace
1046 * @b_ptr: pointer to bearer message arrived on
1048 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1049 * structure (i.e. cannot be NULL), but bearer can be inactive.
1051 void tipc_rcv(struct net
*net
, struct sk_buff
*skb
, struct tipc_bearer
*b_ptr
)
1053 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1054 struct sk_buff_head head
;
1055 struct tipc_node
*n_ptr
;
1056 struct tipc_link
*l_ptr
;
1057 struct sk_buff
*skb1
, *tmp
;
1058 struct tipc_msg
*msg
;
1063 skb2list(skb
, &head
);
1065 while ((skb
= __skb_dequeue(&head
))) {
1066 /* Ensure message is well-formed */
1067 if (unlikely(!tipc_msg_validate(skb
)))
1070 /* Handle arrival of a non-unicast link message */
1072 if (unlikely(msg_non_seq(msg
))) {
1073 if (msg_user(msg
) == LINK_CONFIG
)
1074 tipc_disc_rcv(net
, skb
, b_ptr
);
1076 tipc_bclink_rcv(net
, skb
);
1080 /* Discard unicast link messages destined for another node */
1081 if (unlikely(!msg_short(msg
) &&
1082 (msg_destnode(msg
) != tn
->own_addr
)))
1085 /* Locate neighboring node that sent message */
1086 n_ptr
= tipc_node_find(net
, msg_prevnode(msg
));
1087 if (unlikely(!n_ptr
))
1089 tipc_node_lock(n_ptr
);
1091 /* Locate unicast link endpoint that should handle message */
1092 l_ptr
= n_ptr
->links
[b_ptr
->identity
];
1093 if (unlikely(!l_ptr
))
1096 /* Verify that communication with node is currently allowed */
1097 if ((n_ptr
->action_flags
& TIPC_WAIT_PEER_LINKS_DOWN
) &&
1098 msg_user(msg
) == LINK_PROTOCOL
&&
1099 (msg_type(msg
) == RESET_MSG
||
1100 msg_type(msg
) == ACTIVATE_MSG
) &&
1101 !msg_redundant_link(msg
))
1102 n_ptr
->action_flags
&= ~TIPC_WAIT_PEER_LINKS_DOWN
;
1104 if (tipc_node_blocked(n_ptr
))
1107 /* Validate message sequence number info */
1108 seq_no
= msg_seqno(msg
);
1109 ackd
= msg_ack(msg
);
1111 /* Release acked messages */
1112 if (unlikely(n_ptr
->bclink
.acked
!= msg_bcast_ack(msg
)))
1113 tipc_bclink_acknowledge(n_ptr
, msg_bcast_ack(msg
));
1116 skb_queue_walk_safe(&l_ptr
->transmq
, skb1
, tmp
) {
1117 if (more(buf_seqno(skb1
), ackd
))
1119 __skb_unlink(skb1
, &l_ptr
->transmq
);
1124 /* Try sending any messages link endpoint has pending */
1125 if (unlikely(skb_queue_len(&l_ptr
->backlogq
)))
1126 tipc_link_push_packets(l_ptr
);
1128 if (released
&& !skb_queue_empty(&l_ptr
->wakeupq
))
1129 link_prepare_wakeup(l_ptr
);
1131 /* Process the incoming packet */
1132 if (unlikely(!link_working_working(l_ptr
))) {
1133 if (msg_user(msg
) == LINK_PROTOCOL
) {
1134 tipc_link_proto_rcv(l_ptr
, skb
);
1135 link_retrieve_defq(l_ptr
, &head
);
1140 /* Traffic message. Conditionally activate link */
1141 link_state_event(l_ptr
, TRAFFIC_MSG_EVT
);
1143 if (link_working_working(l_ptr
)) {
1144 /* Re-insert buffer in front of queue */
1145 __skb_queue_head(&head
, skb
);
1152 /* Link is now in state WORKING_WORKING */
1153 if (unlikely(seq_no
!= mod(l_ptr
->next_in_no
))) {
1154 link_handle_out_of_seq_msg(l_ptr
, skb
);
1155 link_retrieve_defq(l_ptr
, &head
);
1159 l_ptr
->next_in_no
++;
1160 if (unlikely(!skb_queue_empty(&l_ptr
->deferdq
)))
1161 link_retrieve_defq(l_ptr
, &head
);
1162 if (unlikely(++l_ptr
->rcv_unacked
>= TIPC_MIN_LINK_WIN
)) {
1163 l_ptr
->stats
.sent_acks
++;
1164 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 0, 0, 0, 0, 0);
1166 tipc_link_input(l_ptr
, skb
);
1169 tipc_node_unlock(n_ptr
);
1176 /* tipc_data_input - deliver data and name distr msgs to upper layer
1178 * Consumes buffer if message is of right type
1179 * Node lock must be held
1181 static bool tipc_data_input(struct tipc_link
*link
, struct sk_buff
*skb
)
1183 struct tipc_node
*node
= link
->owner
;
1184 struct tipc_msg
*msg
= buf_msg(skb
);
1185 u32 dport
= msg_destport(msg
);
1187 switch (msg_user(msg
)) {
1188 case TIPC_LOW_IMPORTANCE
:
1189 case TIPC_MEDIUM_IMPORTANCE
:
1190 case TIPC_HIGH_IMPORTANCE
:
1191 case TIPC_CRITICAL_IMPORTANCE
:
1193 if (tipc_skb_queue_tail(&link
->inputq
, skb
, dport
)) {
1194 node
->inputq
= &link
->inputq
;
1195 node
->action_flags
|= TIPC_MSG_EVT
;
1198 case NAME_DISTRIBUTOR
:
1199 node
->bclink
.recv_permitted
= true;
1200 node
->namedq
= &link
->namedq
;
1201 skb_queue_tail(&link
->namedq
, skb
);
1202 if (skb_queue_len(&link
->namedq
) == 1)
1203 node
->action_flags
|= TIPC_NAMED_MSG_EVT
;
1206 case CHANGEOVER_PROTOCOL
:
1207 case MSG_FRAGMENTER
:
1208 case BCAST_PROTOCOL
:
1211 pr_warn("Dropping received illegal msg type\n");
1217 /* tipc_link_input - process packet that has passed link protocol check
1220 * Node lock must be held
1222 static void tipc_link_input(struct tipc_link
*link
, struct sk_buff
*skb
)
1224 struct tipc_node
*node
= link
->owner
;
1225 struct tipc_msg
*msg
= buf_msg(skb
);
1226 struct sk_buff
*iskb
;
1229 if (likely(tipc_data_input(link
, skb
)))
1232 switch (msg_user(msg
)) {
1233 case CHANGEOVER_PROTOCOL
:
1234 if (!tipc_link_tunnel_rcv(node
, &skb
))
1236 if (msg_user(buf_msg(skb
)) != MSG_BUNDLER
) {
1237 tipc_data_input(link
, skb
);
1241 link
->stats
.recv_bundles
++;
1242 link
->stats
.recv_bundled
+= msg_msgcnt(msg
);
1244 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1245 tipc_data_input(link
, iskb
);
1247 case MSG_FRAGMENTER
:
1248 link
->stats
.recv_fragments
++;
1249 if (tipc_buf_append(&link
->reasm_buf
, &skb
)) {
1250 link
->stats
.recv_fragmented
++;
1251 tipc_data_input(link
, skb
);
1252 } else if (!link
->reasm_buf
) {
1253 tipc_link_reset(link
);
1256 case BCAST_PROTOCOL
:
1257 tipc_link_sync_rcv(node
, skb
);
1265 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1267 * Returns increase in queue length (i.e. 0 or 1)
1269 u32
tipc_link_defer_pkt(struct sk_buff_head
*list
, struct sk_buff
*skb
)
1271 struct sk_buff
*skb1
;
1272 u32 seq_no
= buf_seqno(skb
);
1275 if (skb_queue_empty(list
)) {
1276 __skb_queue_tail(list
, skb
);
1281 if (less(buf_seqno(skb_peek_tail(list
)), seq_no
)) {
1282 __skb_queue_tail(list
, skb
);
1286 /* Locate insertion point in queue, then insert; discard if duplicate */
1287 skb_queue_walk(list
, skb1
) {
1288 u32 curr_seqno
= buf_seqno(skb1
);
1290 if (seq_no
== curr_seqno
) {
1295 if (less(seq_no
, curr_seqno
))
1299 __skb_queue_before(list
, skb1
, skb
);
1304 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1306 static void link_handle_out_of_seq_msg(struct tipc_link
*l_ptr
,
1307 struct sk_buff
*buf
)
1309 u32 seq_no
= buf_seqno(buf
);
1311 if (likely(msg_user(buf_msg(buf
)) == LINK_PROTOCOL
)) {
1312 tipc_link_proto_rcv(l_ptr
, buf
);
1316 /* Record OOS packet arrival (force mismatch on next timeout) */
1317 l_ptr
->checkpoint
--;
1320 * Discard packet if a duplicate; otherwise add it to deferred queue
1321 * and notify peer of gap as per protocol specification
1323 if (less(seq_no
, mod(l_ptr
->next_in_no
))) {
1324 l_ptr
->stats
.duplicates
++;
1329 if (tipc_link_defer_pkt(&l_ptr
->deferdq
, buf
)) {
1330 l_ptr
->stats
.deferred_recv
++;
1331 if ((skb_queue_len(&l_ptr
->deferdq
) % TIPC_MIN_LINK_WIN
) == 1)
1332 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 0, 0, 0, 0, 0);
1334 l_ptr
->stats
.duplicates
++;
1339 * Send protocol message to the other endpoint.
1341 void tipc_link_proto_xmit(struct tipc_link
*l_ptr
, u32 msg_typ
, int probe_msg
,
1342 u32 gap
, u32 tolerance
, u32 priority
, u32 ack_mtu
)
1344 struct sk_buff
*buf
= NULL
;
1345 struct tipc_msg
*msg
= l_ptr
->pmsg
;
1346 u32 msg_size
= sizeof(l_ptr
->proto_msg
);
1349 /* Don't send protocol message during link changeover */
1350 if (l_ptr
->exp_msg_count
)
1353 /* Abort non-RESET send if communication with node is prohibited */
1354 if ((tipc_node_blocked(l_ptr
->owner
)) && (msg_typ
!= RESET_MSG
))
1357 /* Create protocol message with "out-of-sequence" sequence number */
1358 msg_set_type(msg
, msg_typ
);
1359 msg_set_net_plane(msg
, l_ptr
->net_plane
);
1360 msg_set_bcast_ack(msg
, l_ptr
->owner
->bclink
.last_in
);
1361 msg_set_last_bcast(msg
, tipc_bclink_get_last_sent(l_ptr
->owner
->net
));
1363 if (msg_typ
== STATE_MSG
) {
1364 u32 next_sent
= mod(l_ptr
->next_out_no
);
1366 if (!tipc_link_is_up(l_ptr
))
1368 if (skb_queue_len(&l_ptr
->backlogq
))
1369 next_sent
= buf_seqno(skb_peek(&l_ptr
->backlogq
));
1370 msg_set_next_sent(msg
, next_sent
);
1371 if (!skb_queue_empty(&l_ptr
->deferdq
)) {
1372 u32 rec
= buf_seqno(skb_peek(&l_ptr
->deferdq
));
1373 gap
= mod(rec
- mod(l_ptr
->next_in_no
));
1375 msg_set_seq_gap(msg
, gap
);
1377 l_ptr
->stats
.sent_nacks
++;
1378 msg_set_link_tolerance(msg
, tolerance
);
1379 msg_set_linkprio(msg
, priority
);
1380 msg_set_max_pkt(msg
, ack_mtu
);
1381 msg_set_ack(msg
, mod(l_ptr
->next_in_no
- 1));
1382 msg_set_probe(msg
, probe_msg
!= 0);
1384 u32 mtu
= l_ptr
->max_pkt
;
1386 if ((mtu
< l_ptr
->max_pkt_target
) &&
1387 link_working_working(l_ptr
) &&
1388 l_ptr
->fsm_msg_cnt
) {
1389 msg_size
= (mtu
+ (l_ptr
->max_pkt_target
- mtu
)/2 + 2) & ~3;
1390 if (l_ptr
->max_pkt_probes
== 10) {
1391 l_ptr
->max_pkt_target
= (msg_size
- 4);
1392 l_ptr
->max_pkt_probes
= 0;
1393 msg_size
= (mtu
+ (l_ptr
->max_pkt_target
- mtu
)/2 + 2) & ~3;
1395 l_ptr
->max_pkt_probes
++;
1398 l_ptr
->stats
.sent_probes
++;
1400 l_ptr
->stats
.sent_states
++;
1401 } else { /* RESET_MSG or ACTIVATE_MSG */
1402 msg_set_ack(msg
, mod(l_ptr
->reset_checkpoint
- 1));
1403 msg_set_seq_gap(msg
, 0);
1404 msg_set_next_sent(msg
, 1);
1405 msg_set_probe(msg
, 0);
1406 msg_set_link_tolerance(msg
, l_ptr
->tolerance
);
1407 msg_set_linkprio(msg
, l_ptr
->priority
);
1408 msg_set_max_pkt(msg
, l_ptr
->max_pkt_target
);
1411 r_flag
= (l_ptr
->owner
->working_links
> tipc_link_is_up(l_ptr
));
1412 msg_set_redundant_link(msg
, r_flag
);
1413 msg_set_linkprio(msg
, l_ptr
->priority
);
1414 msg_set_size(msg
, msg_size
);
1416 msg_set_seqno(msg
, mod(l_ptr
->next_out_no
+ (0xffff/2)));
1418 buf
= tipc_buf_acquire(msg_size
);
1422 skb_copy_to_linear_data(buf
, msg
, sizeof(l_ptr
->proto_msg
));
1423 buf
->priority
= TC_PRIO_CONTROL
;
1424 tipc_bearer_send(l_ptr
->owner
->net
, l_ptr
->bearer_id
, buf
,
1425 &l_ptr
->media_addr
);
1426 l_ptr
->rcv_unacked
= 0;
1431 * Receive protocol message :
1432 * Note that network plane id propagates through the network, and may
1433 * change at any time. The node with lowest address rules
1435 static void tipc_link_proto_rcv(struct tipc_link
*l_ptr
,
1436 struct sk_buff
*buf
)
1442 struct tipc_msg
*msg
= buf_msg(buf
);
1444 /* Discard protocol message during link changeover */
1445 if (l_ptr
->exp_msg_count
)
1448 if (l_ptr
->net_plane
!= msg_net_plane(msg
))
1449 if (link_own_addr(l_ptr
) > msg_prevnode(msg
))
1450 l_ptr
->net_plane
= msg_net_plane(msg
);
1452 switch (msg_type(msg
)) {
1455 if (!link_working_unknown(l_ptr
) &&
1456 (l_ptr
->peer_session
!= INVALID_SESSION
)) {
1457 if (less_eq(msg_session(msg
), l_ptr
->peer_session
))
1458 break; /* duplicate or old reset: ignore */
1461 if (!msg_redundant_link(msg
) && (link_working_working(l_ptr
) ||
1462 link_working_unknown(l_ptr
))) {
1464 * peer has lost contact -- don't allow peer's links
1465 * to reactivate before we recognize loss & clean up
1467 l_ptr
->owner
->action_flags
|= TIPC_WAIT_OWN_LINKS_DOWN
;
1470 link_state_event(l_ptr
, RESET_MSG
);
1474 /* Update link settings according other endpoint's values */
1475 strcpy((strrchr(l_ptr
->name
, ':') + 1), (char *)msg_data(msg
));
1477 msg_tol
= msg_link_tolerance(msg
);
1478 if (msg_tol
> l_ptr
->tolerance
)
1479 link_set_supervision_props(l_ptr
, msg_tol
);
1481 if (msg_linkprio(msg
) > l_ptr
->priority
)
1482 l_ptr
->priority
= msg_linkprio(msg
);
1484 max_pkt_info
= msg_max_pkt(msg
);
1486 if (max_pkt_info
< l_ptr
->max_pkt_target
)
1487 l_ptr
->max_pkt_target
= max_pkt_info
;
1488 if (l_ptr
->max_pkt
> l_ptr
->max_pkt_target
)
1489 l_ptr
->max_pkt
= l_ptr
->max_pkt_target
;
1491 l_ptr
->max_pkt
= l_ptr
->max_pkt_target
;
1494 /* Synchronize broadcast link info, if not done previously */
1495 if (!tipc_node_is_up(l_ptr
->owner
)) {
1496 l_ptr
->owner
->bclink
.last_sent
=
1497 l_ptr
->owner
->bclink
.last_in
=
1498 msg_last_bcast(msg
);
1499 l_ptr
->owner
->bclink
.oos_state
= 0;
1502 l_ptr
->peer_session
= msg_session(msg
);
1503 l_ptr
->peer_bearer_id
= msg_bearer_id(msg
);
1505 if (msg_type(msg
) == ACTIVATE_MSG
)
1506 link_state_event(l_ptr
, ACTIVATE_MSG
);
1510 msg_tol
= msg_link_tolerance(msg
);
1512 link_set_supervision_props(l_ptr
, msg_tol
);
1514 if (msg_linkprio(msg
) &&
1515 (msg_linkprio(msg
) != l_ptr
->priority
)) {
1516 pr_debug("%s<%s>, priority change %u->%u\n",
1517 link_rst_msg
, l_ptr
->name
,
1518 l_ptr
->priority
, msg_linkprio(msg
));
1519 l_ptr
->priority
= msg_linkprio(msg
);
1520 tipc_link_reset(l_ptr
); /* Enforce change to take effect */
1524 /* Record reception; force mismatch at next timeout: */
1525 l_ptr
->checkpoint
--;
1527 link_state_event(l_ptr
, TRAFFIC_MSG_EVT
);
1528 l_ptr
->stats
.recv_states
++;
1529 if (link_reset_unknown(l_ptr
))
1532 if (less_eq(mod(l_ptr
->next_in_no
), msg_next_sent(msg
))) {
1533 rec_gap
= mod(msg_next_sent(msg
) -
1534 mod(l_ptr
->next_in_no
));
1537 max_pkt_ack
= msg_max_pkt(msg
);
1538 if (max_pkt_ack
> l_ptr
->max_pkt
) {
1539 l_ptr
->max_pkt
= max_pkt_ack
;
1540 l_ptr
->max_pkt_probes
= 0;
1544 if (msg_probe(msg
)) {
1545 l_ptr
->stats
.recv_probes
++;
1546 if (msg_size(msg
) > sizeof(l_ptr
->proto_msg
))
1547 max_pkt_ack
= msg_size(msg
);
1550 /* Protocol message before retransmits, reduce loss risk */
1551 if (l_ptr
->owner
->bclink
.recv_permitted
)
1552 tipc_bclink_update_link_state(l_ptr
->owner
,
1553 msg_last_bcast(msg
));
1555 if (rec_gap
|| (msg_probe(msg
))) {
1556 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 0, rec_gap
, 0,
1559 if (msg_seq_gap(msg
)) {
1560 l_ptr
->stats
.recv_nacks
++;
1561 tipc_link_retransmit(l_ptr
, skb_peek(&l_ptr
->transmq
),
1571 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1572 * a different bearer. Owner node is locked.
1574 static void tipc_link_tunnel_xmit(struct tipc_link
*l_ptr
,
1575 struct tipc_msg
*tunnel_hdr
,
1576 struct tipc_msg
*msg
,
1579 struct tipc_link
*tunnel
;
1580 struct sk_buff
*skb
;
1581 u32 length
= msg_size(msg
);
1583 tunnel
= l_ptr
->owner
->active_links
[selector
& 1];
1584 if (!tipc_link_is_up(tunnel
)) {
1585 pr_warn("%stunnel link no longer available\n", link_co_err
);
1588 msg_set_size(tunnel_hdr
, length
+ INT_H_SIZE
);
1589 skb
= tipc_buf_acquire(length
+ INT_H_SIZE
);
1591 pr_warn("%sunable to send tunnel msg\n", link_co_err
);
1594 skb_copy_to_linear_data(skb
, tunnel_hdr
, INT_H_SIZE
);
1595 skb_copy_to_linear_data_offset(skb
, INT_H_SIZE
, msg
, length
);
1596 __tipc_link_xmit_skb(tunnel
, skb
);
1600 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1601 * link is still active. We can do failover. Tunnel the failing link's
1602 * whole send queue via the remaining link. This way, we don't lose
1603 * any packets, and sequence order is preserved for subsequent traffic
1604 * sent over the remaining link. Owner node is locked.
1606 void tipc_link_failover_send_queue(struct tipc_link
*l_ptr
)
1609 struct tipc_link
*tunnel
= l_ptr
->owner
->active_links
[0];
1610 struct tipc_msg tunnel_hdr
;
1611 struct sk_buff
*skb
;
1617 tipc_msg_init(link_own_addr(l_ptr
), &tunnel_hdr
, CHANGEOVER_PROTOCOL
,
1618 ORIGINAL_MSG
, INT_H_SIZE
, l_ptr
->addr
);
1619 skb_queue_splice_tail_init(&l_ptr
->backlogq
, &l_ptr
->transmq
);
1620 tipc_link_purge_backlog(l_ptr
);
1621 msgcount
= skb_queue_len(&l_ptr
->transmq
);
1622 msg_set_bearer_id(&tunnel_hdr
, l_ptr
->peer_bearer_id
);
1623 msg_set_msgcnt(&tunnel_hdr
, msgcount
);
1625 if (skb_queue_empty(&l_ptr
->transmq
)) {
1626 skb
= tipc_buf_acquire(INT_H_SIZE
);
1628 skb_copy_to_linear_data(skb
, &tunnel_hdr
, INT_H_SIZE
);
1629 msg_set_size(&tunnel_hdr
, INT_H_SIZE
);
1630 __tipc_link_xmit_skb(tunnel
, skb
);
1632 pr_warn("%sunable to send changeover msg\n",
1638 split_bundles
= (l_ptr
->owner
->active_links
[0] !=
1639 l_ptr
->owner
->active_links
[1]);
1641 skb_queue_walk(&l_ptr
->transmq
, skb
) {
1642 struct tipc_msg
*msg
= buf_msg(skb
);
1644 if ((msg_user(msg
) == MSG_BUNDLER
) && split_bundles
) {
1645 struct tipc_msg
*m
= msg_get_wrapped(msg
);
1646 unchar
*pos
= (unchar
*)m
;
1648 msgcount
= msg_msgcnt(msg
);
1649 while (msgcount
--) {
1650 msg_set_seqno(m
, msg_seqno(msg
));
1651 tipc_link_tunnel_xmit(l_ptr
, &tunnel_hdr
, m
,
1652 msg_link_selector(m
));
1653 pos
+= align(msg_size(m
));
1654 m
= (struct tipc_msg
*)pos
;
1657 tipc_link_tunnel_xmit(l_ptr
, &tunnel_hdr
, msg
,
1658 msg_link_selector(msg
));
1663 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1664 * duplicate of the first link's send queue via the new link. This way, we
1665 * are guaranteed that currently queued packets from a socket are delivered
1666 * before future traffic from the same socket, even if this is using the
1667 * new link. The last arriving copy of each duplicate packet is dropped at
1668 * the receiving end by the regular protocol check, so packet cardinality
1669 * and sequence order is preserved per sender/receiver socket pair.
1670 * Owner node is locked.
1672 void tipc_link_dup_queue_xmit(struct tipc_link
*link
,
1673 struct tipc_link
*tnl
)
1675 struct sk_buff
*skb
;
1676 struct tipc_msg tnl_hdr
;
1677 struct sk_buff_head
*queue
= &link
->transmq
;
1680 tipc_msg_init(link_own_addr(link
), &tnl_hdr
, CHANGEOVER_PROTOCOL
,
1681 DUPLICATE_MSG
, INT_H_SIZE
, link
->addr
);
1682 mcnt
= skb_queue_len(&link
->transmq
) + skb_queue_len(&link
->backlogq
);
1683 msg_set_msgcnt(&tnl_hdr
, mcnt
);
1684 msg_set_bearer_id(&tnl_hdr
, link
->peer_bearer_id
);
1687 skb_queue_walk(queue
, skb
) {
1688 struct sk_buff
*outskb
;
1689 struct tipc_msg
*msg
= buf_msg(skb
);
1690 u32 len
= msg_size(msg
);
1692 msg_set_ack(msg
, mod(link
->next_in_no
- 1));
1693 msg_set_bcast_ack(msg
, link
->owner
->bclink
.last_in
);
1694 msg_set_size(&tnl_hdr
, len
+ INT_H_SIZE
);
1695 outskb
= tipc_buf_acquire(len
+ INT_H_SIZE
);
1696 if (outskb
== NULL
) {
1697 pr_warn("%sunable to send duplicate msg\n",
1701 skb_copy_to_linear_data(outskb
, &tnl_hdr
, INT_H_SIZE
);
1702 skb_copy_to_linear_data_offset(outskb
, INT_H_SIZE
,
1704 __tipc_link_xmit_skb(tnl
, outskb
);
1705 if (!tipc_link_is_up(link
))
1708 if (queue
== &link
->backlogq
)
1710 queue
= &link
->backlogq
;
1714 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1715 * Owner node is locked.
1717 static void tipc_link_dup_rcv(struct tipc_link
*link
,
1718 struct sk_buff
*skb
)
1720 struct sk_buff
*iskb
;
1723 if (!tipc_link_is_up(link
))
1726 if (!tipc_msg_extract(skb
, &iskb
, &pos
)) {
1727 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err
);
1730 /* Append buffer to deferred queue, if applicable: */
1731 link_handle_out_of_seq_msg(link
, iskb
);
1734 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1735 * Owner node is locked.
1737 static struct sk_buff
*tipc_link_failover_rcv(struct tipc_link
*l_ptr
,
1738 struct sk_buff
*t_buf
)
1740 struct tipc_msg
*t_msg
= buf_msg(t_buf
);
1741 struct sk_buff
*buf
= NULL
;
1742 struct tipc_msg
*msg
;
1745 if (tipc_link_is_up(l_ptr
))
1746 tipc_link_reset(l_ptr
);
1748 /* First failover packet? */
1749 if (l_ptr
->exp_msg_count
== START_CHANGEOVER
)
1750 l_ptr
->exp_msg_count
= msg_msgcnt(t_msg
);
1752 /* Should there be an inner packet? */
1753 if (l_ptr
->exp_msg_count
) {
1754 l_ptr
->exp_msg_count
--;
1755 if (!tipc_msg_extract(t_buf
, &buf
, &pos
)) {
1756 pr_warn("%sno inner failover pkt\n", link_co_err
);
1761 if (less(msg_seqno(msg
), l_ptr
->reset_checkpoint
)) {
1766 if (msg_user(msg
) == MSG_FRAGMENTER
) {
1767 l_ptr
->stats
.recv_fragments
++;
1768 tipc_buf_append(&l_ptr
->reasm_buf
, &buf
);
1772 if ((!l_ptr
->exp_msg_count
) && (l_ptr
->flags
& LINK_STOPPED
))
1773 tipc_link_delete(l_ptr
);
1777 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1778 * via other link as result of a failover (ORIGINAL_MSG) or
1779 * a new active link (DUPLICATE_MSG). Failover packets are
1780 * returned to the active link for delivery upwards.
1781 * Owner node is locked.
1783 static int tipc_link_tunnel_rcv(struct tipc_node
*n_ptr
,
1784 struct sk_buff
**buf
)
1786 struct sk_buff
*t_buf
= *buf
;
1787 struct tipc_link
*l_ptr
;
1788 struct tipc_msg
*t_msg
= buf_msg(t_buf
);
1789 u32 bearer_id
= msg_bearer_id(t_msg
);
1793 if (bearer_id
>= MAX_BEARERS
)
1796 l_ptr
= n_ptr
->links
[bearer_id
];
1800 if (msg_type(t_msg
) == DUPLICATE_MSG
)
1801 tipc_link_dup_rcv(l_ptr
, t_buf
);
1802 else if (msg_type(t_msg
) == ORIGINAL_MSG
)
1803 *buf
= tipc_link_failover_rcv(l_ptr
, t_buf
);
1805 pr_warn("%sunknown tunnel pkt received\n", link_co_err
);
1808 return *buf
!= NULL
;
1811 static void link_set_supervision_props(struct tipc_link
*l_ptr
, u32 tol
)
1813 unsigned long intv
= ((tol
/ 4) > 500) ? 500 : tol
/ 4;
1815 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1818 l_ptr
->tolerance
= tol
;
1819 l_ptr
->cont_intv
= msecs_to_jiffies(intv
);
1820 l_ptr
->abort_limit
= tol
/ (jiffies_to_msecs(l_ptr
->cont_intv
) / 4);
1823 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1825 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->max_pkt
/ ITEM_SIZE
);
1828 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= win
/ 2;
1829 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= win
;
1830 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= win
/ 2 * 3;
1831 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= win
* 2;
1832 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1835 /* tipc_link_find_owner - locate owner node of link by link's name
1836 * @net: the applicable net namespace
1837 * @name: pointer to link name string
1838 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1840 * Returns pointer to node owning the link, or 0 if no matching link is found.
1842 static struct tipc_node
*tipc_link_find_owner(struct net
*net
,
1843 const char *link_name
,
1844 unsigned int *bearer_id
)
1846 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1847 struct tipc_link
*l_ptr
;
1848 struct tipc_node
*n_ptr
;
1849 struct tipc_node
*found_node
= NULL
;
1854 list_for_each_entry_rcu(n_ptr
, &tn
->node_list
, list
) {
1855 tipc_node_lock(n_ptr
);
1856 for (i
= 0; i
< MAX_BEARERS
; i
++) {
1857 l_ptr
= n_ptr
->links
[i
];
1858 if (l_ptr
&& !strcmp(l_ptr
->name
, link_name
)) {
1864 tipc_node_unlock(n_ptr
);
1874 * link_reset_statistics - reset link statistics
1875 * @l_ptr: pointer to link
1877 static void link_reset_statistics(struct tipc_link
*l_ptr
)
1879 memset(&l_ptr
->stats
, 0, sizeof(l_ptr
->stats
));
1880 l_ptr
->stats
.sent_info
= l_ptr
->next_out_no
;
1881 l_ptr
->stats
.recv_info
= l_ptr
->next_in_no
;
1884 static void link_print(struct tipc_link
*l_ptr
, const char *str
)
1886 struct tipc_net
*tn
= net_generic(l_ptr
->owner
->net
, tipc_net_id
);
1887 struct tipc_bearer
*b_ptr
;
1890 b_ptr
= rcu_dereference_rtnl(tn
->bearer_list
[l_ptr
->bearer_id
]);
1892 pr_info("%s Link %x<%s>:", str
, l_ptr
->addr
, b_ptr
->name
);
1895 if (link_working_unknown(l_ptr
))
1897 else if (link_reset_reset(l_ptr
))
1899 else if (link_reset_unknown(l_ptr
))
1901 else if (link_working_working(l_ptr
))
1907 /* Parse and validate nested (link) properties valid for media, bearer and link
1909 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1913 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1914 tipc_nl_prop_policy
);
1918 if (props
[TIPC_NLA_PROP_PRIO
]) {
1921 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1922 if (prio
> TIPC_MAX_LINK_PRI
)
1926 if (props
[TIPC_NLA_PROP_TOL
]) {
1929 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1930 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1934 if (props
[TIPC_NLA_PROP_WIN
]) {
1937 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1938 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1945 int tipc_nl_link_set(struct sk_buff
*skb
, struct genl_info
*info
)
1951 struct tipc_link
*link
;
1952 struct tipc_node
*node
;
1953 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
1954 struct net
*net
= sock_net(skb
->sk
);
1956 if (!info
->attrs
[TIPC_NLA_LINK
])
1959 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
1960 info
->attrs
[TIPC_NLA_LINK
],
1961 tipc_nl_link_policy
);
1965 if (!attrs
[TIPC_NLA_LINK_NAME
])
1968 name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
1970 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
1974 tipc_node_lock(node
);
1976 link
= node
->links
[bearer_id
];
1982 if (attrs
[TIPC_NLA_LINK_PROP
]) {
1983 struct nlattr
*props
[TIPC_NLA_PROP_MAX
+ 1];
1985 err
= tipc_nl_parse_link_prop(attrs
[TIPC_NLA_LINK_PROP
],
1992 if (props
[TIPC_NLA_PROP_TOL
]) {
1995 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1996 link_set_supervision_props(link
, tol
);
1997 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, tol
, 0, 0);
1999 if (props
[TIPC_NLA_PROP_PRIO
]) {
2002 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
2003 link
->priority
= prio
;
2004 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, 0, prio
, 0);
2006 if (props
[TIPC_NLA_PROP_WIN
]) {
2009 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
2010 tipc_link_set_queue_limits(link
, win
);
2015 tipc_node_unlock(node
);
2020 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
2023 struct nlattr
*stats
;
2030 struct nla_map map
[] = {
2031 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
2032 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
2033 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
2034 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
2035 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
2036 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
2037 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
2038 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
2039 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
2040 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
2041 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
2042 s
->msg_length_counts
: 1},
2043 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
2044 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
2045 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
2046 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
2047 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
2048 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
2049 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
2050 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
2051 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
2052 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
2053 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
2054 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
2055 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
2056 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
2057 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
2058 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
2059 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
2060 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
2061 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
2062 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
2063 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
2064 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
2065 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
2068 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
2072 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2073 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2076 nla_nest_end(skb
, stats
);
2080 nla_nest_cancel(skb
, stats
);
2085 /* Caller should hold appropriate locks to protect the link */
2086 static int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
2087 struct tipc_link
*link
)
2091 struct nlattr
*attrs
;
2092 struct nlattr
*prop
;
2093 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2095 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2096 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
2100 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
2104 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
2106 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
2107 tipc_cluster_mask(tn
->own_addr
)))
2109 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->max_pkt
))
2111 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->next_in_no
))
2113 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->next_out_no
))
2116 if (tipc_link_is_up(link
))
2117 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2119 if (tipc_link_is_active(link
))
2120 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
2123 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
2126 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2128 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
2130 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
2133 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2135 nla_nest_end(msg
->skb
, prop
);
2137 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
2141 nla_nest_end(msg
->skb
, attrs
);
2142 genlmsg_end(msg
->skb
, hdr
);
2147 nla_nest_cancel(msg
->skb
, prop
);
2149 nla_nest_cancel(msg
->skb
, attrs
);
2151 genlmsg_cancel(msg
->skb
, hdr
);
2156 /* Caller should hold node lock */
2157 static int __tipc_nl_add_node_links(struct net
*net
, struct tipc_nl_msg
*msg
,
2158 struct tipc_node
*node
, u32
*prev_link
)
2163 for (i
= *prev_link
; i
< MAX_BEARERS
; i
++) {
2166 if (!node
->links
[i
])
2169 err
= __tipc_nl_add_link(net
, msg
, node
->links
[i
]);
2178 int tipc_nl_link_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2180 struct net
*net
= sock_net(skb
->sk
);
2181 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2182 struct tipc_node
*node
;
2183 struct tipc_nl_msg msg
;
2184 u32 prev_node
= cb
->args
[0];
2185 u32 prev_link
= cb
->args
[1];
2186 int done
= cb
->args
[2];
2193 msg
.portid
= NETLINK_CB(cb
->skb
).portid
;
2194 msg
.seq
= cb
->nlh
->nlmsg_seq
;
2199 node
= tipc_node_find(net
, prev_node
);
2201 /* We never set seq or call nl_dump_check_consistent()
2202 * this means that setting prev_seq here will cause the
2203 * consistence check to fail in the netlink callback
2204 * handler. Resulting in the last NLMSG_DONE message
2205 * having the NLM_F_DUMP_INTR flag set.
2211 list_for_each_entry_continue_rcu(node
, &tn
->node_list
,
2213 tipc_node_lock(node
);
2214 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
2216 tipc_node_unlock(node
);
2220 prev_node
= node
->addr
;
2223 err
= tipc_nl_add_bc_link(net
, &msg
);
2227 list_for_each_entry_rcu(node
, &tn
->node_list
, list
) {
2228 tipc_node_lock(node
);
2229 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
2231 tipc_node_unlock(node
);
2235 prev_node
= node
->addr
;
2242 cb
->args
[0] = prev_node
;
2243 cb
->args
[1] = prev_link
;
2249 int tipc_nl_link_get(struct sk_buff
*skb
, struct genl_info
*info
)
2251 struct net
*net
= genl_info_net(info
);
2252 struct sk_buff
*ans_skb
;
2253 struct tipc_nl_msg msg
;
2254 struct tipc_link
*link
;
2255 struct tipc_node
*node
;
2260 if (!info
->attrs
[TIPC_NLA_LINK_NAME
])
2263 name
= nla_data(info
->attrs
[TIPC_NLA_LINK_NAME
]);
2264 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
2268 ans_skb
= nlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
2273 msg
.portid
= info
->snd_portid
;
2274 msg
.seq
= info
->snd_seq
;
2276 tipc_node_lock(node
);
2277 link
= node
->links
[bearer_id
];
2283 err
= __tipc_nl_add_link(net
, &msg
, link
);
2287 tipc_node_unlock(node
);
2289 return genlmsg_reply(ans_skb
, info
);
2292 tipc_node_unlock(node
);
2293 nlmsg_free(ans_skb
);
2298 int tipc_nl_link_reset_stats(struct sk_buff
*skb
, struct genl_info
*info
)
2302 unsigned int bearer_id
;
2303 struct tipc_link
*link
;
2304 struct tipc_node
*node
;
2305 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
2306 struct net
*net
= sock_net(skb
->sk
);
2308 if (!info
->attrs
[TIPC_NLA_LINK
])
2311 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
2312 info
->attrs
[TIPC_NLA_LINK
],
2313 tipc_nl_link_policy
);
2317 if (!attrs
[TIPC_NLA_LINK_NAME
])
2320 link_name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
2322 if (strcmp(link_name
, tipc_bclink_name
) == 0) {
2323 err
= tipc_bclink_reset_stats(net
);
2329 node
= tipc_link_find_owner(net
, link_name
, &bearer_id
);
2333 tipc_node_lock(node
);
2335 link
= node
->links
[bearer_id
];
2337 tipc_node_unlock(node
);
2341 link_reset_statistics(link
);
2343 tipc_node_unlock(node
);