Merge remote-tracking branch 'asoc/topic/davinci' into asoc-next
[deliverable/linux.git] / net / tipc / link.c
1 /*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45
46 #include <linux/pkt_sched.h>
47
48 /*
49 * Error message prefixes
50 */
51 static const char *link_co_err = "Link changeover error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char *link_unk_evt = "Unknown link event ";
54
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
58 .type = NLA_STRING,
59 .len = TIPC_MAX_LINK_NAME
60 },
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
69 };
70
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
77 };
78
79 /*
80 * Out-of-range value for link session numbers
81 */
82 #define INVALID_SESSION 0x10000
83
84 /*
85 * Link state events:
86 */
87 #define STARTING_EVT 856384768 /* link processing trigger */
88 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
89 #define TIMEOUT_EVT 560817u /* link timer expired */
90
91 /*
92 * State value stored in 'failover_pkts'
93 */
94 #define FIRST_FAILOVER 0xffffu
95
96 static void link_handle_out_of_seq_msg(struct tipc_link *link,
97 struct sk_buff *skb);
98 static void tipc_link_proto_rcv(struct tipc_link *link,
99 struct sk_buff *skb);
100 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
101 static void link_state_event(struct tipc_link *l_ptr, u32 event);
102 static void link_reset_statistics(struct tipc_link *l_ptr);
103 static void link_print(struct tipc_link *l_ptr, const char *str);
104 static void tipc_link_sync_xmit(struct tipc_link *l);
105 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
106 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
107 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
108 static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
109 /*
110 * Simple link routines
111 */
112 static unsigned int align(unsigned int i)
113 {
114 return (i + 3) & ~3u;
115 }
116
117 static void tipc_link_release(struct kref *kref)
118 {
119 kfree(container_of(kref, struct tipc_link, ref));
120 }
121
122 static void tipc_link_get(struct tipc_link *l_ptr)
123 {
124 kref_get(&l_ptr->ref);
125 }
126
127 static void tipc_link_put(struct tipc_link *l_ptr)
128 {
129 kref_put(&l_ptr->ref, tipc_link_release);
130 }
131
132 static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
133 {
134 if (l->owner->active_links[0] != l)
135 return l->owner->active_links[0];
136 return l->owner->active_links[1];
137 }
138
139 /*
140 * Simple non-static link routines (i.e. referenced outside this file)
141 */
142 int tipc_link_is_up(struct tipc_link *l_ptr)
143 {
144 if (!l_ptr)
145 return 0;
146 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
147 }
148
149 int tipc_link_is_active(struct tipc_link *l_ptr)
150 {
151 return (l_ptr->owner->active_links[0] == l_ptr) ||
152 (l_ptr->owner->active_links[1] == l_ptr);
153 }
154
155 /**
156 * link_timeout - handle expiration of link timer
157 * @l_ptr: pointer to link
158 */
159 static void link_timeout(unsigned long data)
160 {
161 struct tipc_link *l_ptr = (struct tipc_link *)data;
162 struct sk_buff *skb;
163
164 tipc_node_lock(l_ptr->owner);
165
166 /* update counters used in statistical profiling of send traffic */
167 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
168 l_ptr->stats.queue_sz_counts++;
169
170 skb = skb_peek(&l_ptr->transmq);
171 if (skb) {
172 struct tipc_msg *msg = buf_msg(skb);
173 u32 length = msg_size(msg);
174
175 if ((msg_user(msg) == MSG_FRAGMENTER) &&
176 (msg_type(msg) == FIRST_FRAGMENT)) {
177 length = msg_size(msg_get_wrapped(msg));
178 }
179 if (length) {
180 l_ptr->stats.msg_lengths_total += length;
181 l_ptr->stats.msg_length_counts++;
182 if (length <= 64)
183 l_ptr->stats.msg_length_profile[0]++;
184 else if (length <= 256)
185 l_ptr->stats.msg_length_profile[1]++;
186 else if (length <= 1024)
187 l_ptr->stats.msg_length_profile[2]++;
188 else if (length <= 4096)
189 l_ptr->stats.msg_length_profile[3]++;
190 else if (length <= 16384)
191 l_ptr->stats.msg_length_profile[4]++;
192 else if (length <= 32768)
193 l_ptr->stats.msg_length_profile[5]++;
194 else
195 l_ptr->stats.msg_length_profile[6]++;
196 }
197 }
198
199 /* do all other link processing performed on a periodic basis */
200 link_state_event(l_ptr, TIMEOUT_EVT);
201
202 if (skb_queue_len(&l_ptr->backlogq))
203 tipc_link_push_packets(l_ptr);
204
205 tipc_node_unlock(l_ptr->owner);
206 tipc_link_put(l_ptr);
207 }
208
209 static void link_set_timer(struct tipc_link *link, unsigned long time)
210 {
211 if (!mod_timer(&link->timer, jiffies + time))
212 tipc_link_get(link);
213 }
214
215 /**
216 * tipc_link_create - create a new link
217 * @n_ptr: pointer to associated node
218 * @b_ptr: pointer to associated bearer
219 * @media_addr: media address to use when sending messages over link
220 *
221 * Returns pointer to link.
222 */
223 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
224 struct tipc_bearer *b_ptr,
225 const struct tipc_media_addr *media_addr)
226 {
227 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
228 struct tipc_link *l_ptr;
229 struct tipc_msg *msg;
230 char *if_name;
231 char addr_string[16];
232 u32 peer = n_ptr->addr;
233
234 if (n_ptr->link_cnt >= MAX_BEARERS) {
235 tipc_addr_string_fill(addr_string, n_ptr->addr);
236 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
237 n_ptr->link_cnt, addr_string, MAX_BEARERS);
238 return NULL;
239 }
240
241 if (n_ptr->links[b_ptr->identity]) {
242 tipc_addr_string_fill(addr_string, n_ptr->addr);
243 pr_err("Attempt to establish second link on <%s> to %s\n",
244 b_ptr->name, addr_string);
245 return NULL;
246 }
247
248 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
249 if (!l_ptr) {
250 pr_warn("Link creation failed, no memory\n");
251 return NULL;
252 }
253 kref_init(&l_ptr->ref);
254 l_ptr->addr = peer;
255 if_name = strchr(b_ptr->name, ':') + 1;
256 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
257 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
258 tipc_node(tn->own_addr),
259 if_name,
260 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
261 /* note: peer i/f name is updated by reset/activate message */
262 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
263 l_ptr->owner = n_ptr;
264 l_ptr->checkpoint = 1;
265 l_ptr->peer_session = INVALID_SESSION;
266 l_ptr->bearer_id = b_ptr->identity;
267 link_set_supervision_props(l_ptr, b_ptr->tolerance);
268 l_ptr->state = RESET_UNKNOWN;
269
270 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
271 msg = l_ptr->pmsg;
272 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
273 l_ptr->addr);
274 msg_set_size(msg, sizeof(l_ptr->proto_msg));
275 msg_set_session(msg, (tn->random & 0xffff));
276 msg_set_bearer_id(msg, b_ptr->identity);
277 strcpy((char *)msg_data(msg), if_name);
278 l_ptr->net_plane = b_ptr->net_plane;
279 l_ptr->advertised_mtu = b_ptr->mtu;
280 l_ptr->mtu = l_ptr->advertised_mtu;
281 l_ptr->priority = b_ptr->priority;
282 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
283 l_ptr->next_out_no = 1;
284 __skb_queue_head_init(&l_ptr->transmq);
285 __skb_queue_head_init(&l_ptr->backlogq);
286 __skb_queue_head_init(&l_ptr->deferdq);
287 skb_queue_head_init(&l_ptr->wakeupq);
288 skb_queue_head_init(&l_ptr->inputq);
289 skb_queue_head_init(&l_ptr->namedq);
290 link_reset_statistics(l_ptr);
291 tipc_node_attach_link(n_ptr, l_ptr);
292 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
293 link_state_event(l_ptr, STARTING_EVT);
294
295 return l_ptr;
296 }
297
298 /**
299 * tipc_link_delete - Delete a link
300 * @l: link to be deleted
301 */
302 void tipc_link_delete(struct tipc_link *l)
303 {
304 tipc_link_reset(l);
305 if (del_timer(&l->timer))
306 tipc_link_put(l);
307 l->flags |= LINK_STOPPED;
308 /* Delete link now, or when timer is finished: */
309 tipc_link_reset_fragments(l);
310 tipc_node_detach_link(l->owner, l);
311 tipc_link_put(l);
312 }
313
314 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
315 bool shutting_down)
316 {
317 struct tipc_net *tn = net_generic(net, tipc_net_id);
318 struct tipc_link *link;
319 struct tipc_node *node;
320
321 rcu_read_lock();
322 list_for_each_entry_rcu(node, &tn->node_list, list) {
323 tipc_node_lock(node);
324 link = node->links[bearer_id];
325 if (link)
326 tipc_link_delete(link);
327 tipc_node_unlock(node);
328 }
329 rcu_read_unlock();
330 }
331
332 /**
333 * link_schedule_user - schedule a message sender for wakeup after congestion
334 * @link: congested link
335 * @list: message that was attempted sent
336 * Create pseudo msg to send back to user when congestion abates
337 * Only consumes message if there is an error
338 */
339 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
340 {
341 struct tipc_msg *msg = buf_msg(skb_peek(list));
342 int imp = msg_importance(msg);
343 u32 oport = msg_origport(msg);
344 u32 addr = link_own_addr(link);
345 struct sk_buff *skb;
346
347 /* This really cannot happen... */
348 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
349 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
350 tipc_link_reset(link);
351 goto err;
352 }
353 /* Non-blocking sender: */
354 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
355 return -ELINKCONG;
356
357 /* Create and schedule wakeup pseudo message */
358 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
359 addr, addr, oport, 0, 0);
360 if (!skb)
361 goto err;
362 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
363 TIPC_SKB_CB(skb)->chain_imp = imp;
364 skb_queue_tail(&link->wakeupq, skb);
365 link->stats.link_congs++;
366 return -ELINKCONG;
367 err:
368 __skb_queue_purge(list);
369 return -ENOBUFS;
370 }
371
372 /**
373 * link_prepare_wakeup - prepare users for wakeup after congestion
374 * @link: congested link
375 * Move a number of waiting users, as permitted by available space in
376 * the send queue, from link wait queue to node wait queue for wakeup
377 */
378 void link_prepare_wakeup(struct tipc_link *l)
379 {
380 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
381 int imp, lim;
382 struct sk_buff *skb, *tmp;
383
384 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
385 imp = TIPC_SKB_CB(skb)->chain_imp;
386 lim = l->window + l->backlog[imp].limit;
387 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
388 if ((pnd[imp] + l->backlog[imp].len) >= lim)
389 break;
390 skb_unlink(skb, &l->wakeupq);
391 skb_queue_tail(&l->inputq, skb);
392 l->owner->inputq = &l->inputq;
393 l->owner->action_flags |= TIPC_MSG_EVT;
394 }
395 }
396
397 /**
398 * tipc_link_reset_fragments - purge link's inbound message fragments queue
399 * @l_ptr: pointer to link
400 */
401 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
402 {
403 kfree_skb(l_ptr->reasm_buf);
404 l_ptr->reasm_buf = NULL;
405 }
406
407 static void tipc_link_purge_backlog(struct tipc_link *l)
408 {
409 __skb_queue_purge(&l->backlogq);
410 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
411 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
412 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
413 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
414 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
415 }
416
417 /**
418 * tipc_link_purge_queues - purge all pkt queues associated with link
419 * @l_ptr: pointer to link
420 */
421 void tipc_link_purge_queues(struct tipc_link *l_ptr)
422 {
423 __skb_queue_purge(&l_ptr->deferdq);
424 __skb_queue_purge(&l_ptr->transmq);
425 tipc_link_purge_backlog(l_ptr);
426 tipc_link_reset_fragments(l_ptr);
427 }
428
429 void tipc_link_reset(struct tipc_link *l_ptr)
430 {
431 u32 prev_state = l_ptr->state;
432 int was_active_link = tipc_link_is_active(l_ptr);
433 struct tipc_node *owner = l_ptr->owner;
434 struct tipc_link *pl = tipc_parallel_link(l_ptr);
435
436 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
437
438 /* Link is down, accept any session */
439 l_ptr->peer_session = INVALID_SESSION;
440
441 /* Prepare for renewed mtu size negotiation */
442 l_ptr->mtu = l_ptr->advertised_mtu;
443
444 l_ptr->state = RESET_UNKNOWN;
445
446 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
447 return;
448
449 tipc_node_link_down(l_ptr->owner, l_ptr);
450 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
451
452 if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
453 l_ptr->flags |= LINK_FAILINGOVER;
454 l_ptr->failover_checkpt = l_ptr->next_in_no;
455 pl->failover_pkts = FIRST_FAILOVER;
456 pl->failover_checkpt = l_ptr->next_in_no;
457 pl->failover_skb = l_ptr->reasm_buf;
458 } else {
459 kfree_skb(l_ptr->reasm_buf);
460 }
461 /* Clean up all queues, except inputq: */
462 __skb_queue_purge(&l_ptr->transmq);
463 __skb_queue_purge(&l_ptr->deferdq);
464 if (!owner->inputq)
465 owner->inputq = &l_ptr->inputq;
466 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
467 if (!skb_queue_empty(owner->inputq))
468 owner->action_flags |= TIPC_MSG_EVT;
469 tipc_link_purge_backlog(l_ptr);
470 l_ptr->reasm_buf = NULL;
471 l_ptr->rcv_unacked = 0;
472 l_ptr->checkpoint = 1;
473 l_ptr->next_out_no = 1;
474 l_ptr->fsm_msg_cnt = 0;
475 l_ptr->stale_count = 0;
476 link_reset_statistics(l_ptr);
477 }
478
479 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
480 {
481 struct tipc_net *tn = net_generic(net, tipc_net_id);
482 struct tipc_link *l_ptr;
483 struct tipc_node *n_ptr;
484
485 rcu_read_lock();
486 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
487 tipc_node_lock(n_ptr);
488 l_ptr = n_ptr->links[bearer_id];
489 if (l_ptr)
490 tipc_link_reset(l_ptr);
491 tipc_node_unlock(n_ptr);
492 }
493 rcu_read_unlock();
494 }
495
496 static void link_activate(struct tipc_link *link)
497 {
498 struct tipc_node *node = link->owner;
499
500 link->next_in_no = 1;
501 link->stats.recv_info = 1;
502 tipc_node_link_up(node, link);
503 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
504 }
505
506 /**
507 * link_state_event - link finite state machine
508 * @l_ptr: pointer to link
509 * @event: state machine event to process
510 */
511 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
512 {
513 struct tipc_link *other;
514 unsigned long cont_intv = l_ptr->cont_intv;
515
516 if (l_ptr->flags & LINK_STOPPED)
517 return;
518
519 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
520 return; /* Not yet. */
521
522 if (l_ptr->flags & LINK_FAILINGOVER) {
523 if (event == TIMEOUT_EVT)
524 link_set_timer(l_ptr, cont_intv);
525 return;
526 }
527
528 switch (l_ptr->state) {
529 case WORKING_WORKING:
530 switch (event) {
531 case TRAFFIC_MSG_EVT:
532 case ACTIVATE_MSG:
533 break;
534 case TIMEOUT_EVT:
535 if (l_ptr->next_in_no != l_ptr->checkpoint) {
536 l_ptr->checkpoint = l_ptr->next_in_no;
537 if (tipc_bclink_acks_missing(l_ptr->owner)) {
538 tipc_link_proto_xmit(l_ptr, STATE_MSG,
539 0, 0, 0, 0);
540 l_ptr->fsm_msg_cnt++;
541 }
542 link_set_timer(l_ptr, cont_intv);
543 break;
544 }
545 l_ptr->state = WORKING_UNKNOWN;
546 l_ptr->fsm_msg_cnt = 0;
547 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
548 l_ptr->fsm_msg_cnt++;
549 link_set_timer(l_ptr, cont_intv / 4);
550 break;
551 case RESET_MSG:
552 pr_debug("%s<%s>, requested by peer\n",
553 link_rst_msg, l_ptr->name);
554 tipc_link_reset(l_ptr);
555 l_ptr->state = RESET_RESET;
556 l_ptr->fsm_msg_cnt = 0;
557 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
558 0, 0, 0, 0);
559 l_ptr->fsm_msg_cnt++;
560 link_set_timer(l_ptr, cont_intv);
561 break;
562 default:
563 pr_debug("%s%u in WW state\n", link_unk_evt, event);
564 }
565 break;
566 case WORKING_UNKNOWN:
567 switch (event) {
568 case TRAFFIC_MSG_EVT:
569 case ACTIVATE_MSG:
570 l_ptr->state = WORKING_WORKING;
571 l_ptr->fsm_msg_cnt = 0;
572 link_set_timer(l_ptr, cont_intv);
573 break;
574 case RESET_MSG:
575 pr_debug("%s<%s>, requested by peer while probing\n",
576 link_rst_msg, l_ptr->name);
577 tipc_link_reset(l_ptr);
578 l_ptr->state = RESET_RESET;
579 l_ptr->fsm_msg_cnt = 0;
580 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
581 0, 0, 0, 0);
582 l_ptr->fsm_msg_cnt++;
583 link_set_timer(l_ptr, cont_intv);
584 break;
585 case TIMEOUT_EVT:
586 if (l_ptr->next_in_no != l_ptr->checkpoint) {
587 l_ptr->state = WORKING_WORKING;
588 l_ptr->fsm_msg_cnt = 0;
589 l_ptr->checkpoint = l_ptr->next_in_no;
590 if (tipc_bclink_acks_missing(l_ptr->owner)) {
591 tipc_link_proto_xmit(l_ptr, STATE_MSG,
592 0, 0, 0, 0);
593 l_ptr->fsm_msg_cnt++;
594 }
595 link_set_timer(l_ptr, cont_intv);
596 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
597 tipc_link_proto_xmit(l_ptr, STATE_MSG,
598 1, 0, 0, 0);
599 l_ptr->fsm_msg_cnt++;
600 link_set_timer(l_ptr, cont_intv / 4);
601 } else { /* Link has failed */
602 pr_debug("%s<%s>, peer not responding\n",
603 link_rst_msg, l_ptr->name);
604 tipc_link_reset(l_ptr);
605 l_ptr->state = RESET_UNKNOWN;
606 l_ptr->fsm_msg_cnt = 0;
607 tipc_link_proto_xmit(l_ptr, RESET_MSG,
608 0, 0, 0, 0);
609 l_ptr->fsm_msg_cnt++;
610 link_set_timer(l_ptr, cont_intv);
611 }
612 break;
613 default:
614 pr_err("%s%u in WU state\n", link_unk_evt, event);
615 }
616 break;
617 case RESET_UNKNOWN:
618 switch (event) {
619 case TRAFFIC_MSG_EVT:
620 break;
621 case ACTIVATE_MSG:
622 other = l_ptr->owner->active_links[0];
623 if (other && link_working_unknown(other))
624 break;
625 l_ptr->state = WORKING_WORKING;
626 l_ptr->fsm_msg_cnt = 0;
627 link_activate(l_ptr);
628 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
629 l_ptr->fsm_msg_cnt++;
630 if (l_ptr->owner->working_links == 1)
631 tipc_link_sync_xmit(l_ptr);
632 link_set_timer(l_ptr, cont_intv);
633 break;
634 case RESET_MSG:
635 l_ptr->state = RESET_RESET;
636 l_ptr->fsm_msg_cnt = 0;
637 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
638 1, 0, 0, 0);
639 l_ptr->fsm_msg_cnt++;
640 link_set_timer(l_ptr, cont_intv);
641 break;
642 case STARTING_EVT:
643 l_ptr->flags |= LINK_STARTED;
644 l_ptr->fsm_msg_cnt++;
645 link_set_timer(l_ptr, cont_intv);
646 break;
647 case TIMEOUT_EVT:
648 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
649 l_ptr->fsm_msg_cnt++;
650 link_set_timer(l_ptr, cont_intv);
651 break;
652 default:
653 pr_err("%s%u in RU state\n", link_unk_evt, event);
654 }
655 break;
656 case RESET_RESET:
657 switch (event) {
658 case TRAFFIC_MSG_EVT:
659 case ACTIVATE_MSG:
660 other = l_ptr->owner->active_links[0];
661 if (other && link_working_unknown(other))
662 break;
663 l_ptr->state = WORKING_WORKING;
664 l_ptr->fsm_msg_cnt = 0;
665 link_activate(l_ptr);
666 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
667 l_ptr->fsm_msg_cnt++;
668 if (l_ptr->owner->working_links == 1)
669 tipc_link_sync_xmit(l_ptr);
670 link_set_timer(l_ptr, cont_intv);
671 break;
672 case RESET_MSG:
673 break;
674 case TIMEOUT_EVT:
675 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
676 0, 0, 0, 0);
677 l_ptr->fsm_msg_cnt++;
678 link_set_timer(l_ptr, cont_intv);
679 break;
680 default:
681 pr_err("%s%u in RR state\n", link_unk_evt, event);
682 }
683 break;
684 default:
685 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
686 }
687 }
688
689 /**
690 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
691 * @link: link to use
692 * @list: chain of buffers containing message
693 *
694 * Consumes the buffer chain, except when returning -ELINKCONG,
695 * since the caller then may want to make more send attempts.
696 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
697 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
698 */
699 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
700 struct sk_buff_head *list)
701 {
702 struct tipc_msg *msg = buf_msg(skb_peek(list));
703 unsigned int maxwin = link->window;
704 unsigned int imp = msg_importance(msg);
705 uint mtu = link->mtu;
706 uint ack = mod(link->next_in_no - 1);
707 uint seqno = link->next_out_no;
708 uint bc_last_in = link->owner->bclink.last_in;
709 struct tipc_media_addr *addr = &link->media_addr;
710 struct sk_buff_head *transmq = &link->transmq;
711 struct sk_buff_head *backlogq = &link->backlogq;
712 struct sk_buff *skb, *tmp;
713
714 /* Match backlog limit against msg importance: */
715 if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
716 return link_schedule_user(link, list);
717
718 if (unlikely(msg_size(msg) > mtu)) {
719 __skb_queue_purge(list);
720 return -EMSGSIZE;
721 }
722 /* Prepare each packet for sending, and add to relevant queue: */
723 skb_queue_walk_safe(list, skb, tmp) {
724 __skb_unlink(skb, list);
725 msg = buf_msg(skb);
726 msg_set_seqno(msg, seqno);
727 msg_set_ack(msg, ack);
728 msg_set_bcast_ack(msg, bc_last_in);
729
730 if (likely(skb_queue_len(transmq) < maxwin)) {
731 __skb_queue_tail(transmq, skb);
732 tipc_bearer_send(net, link->bearer_id, skb, addr);
733 link->rcv_unacked = 0;
734 seqno++;
735 continue;
736 }
737 if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
738 link->stats.sent_bundled++;
739 continue;
740 }
741 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
742 link->stats.sent_bundled++;
743 link->stats.sent_bundles++;
744 imp = msg_importance(buf_msg(skb));
745 }
746 __skb_queue_tail(backlogq, skb);
747 link->backlog[imp].len++;
748 seqno++;
749 }
750 link->next_out_no = seqno;
751 return 0;
752 }
753
754 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
755 {
756 skb_queue_head_init(list);
757 __skb_queue_tail(list, skb);
758 }
759
760 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
761 {
762 struct sk_buff_head head;
763
764 skb2list(skb, &head);
765 return __tipc_link_xmit(link->owner->net, link, &head);
766 }
767
768 /* tipc_link_xmit_skb(): send single buffer to destination
769 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
770 * messages, which will not be rejected
771 * The only exception is datagram messages rerouted after secondary
772 * lookup, which are rare and safe to dispose of anyway.
773 * TODO: Return real return value, and let callers use
774 * tipc_wait_for_sendpkt() where applicable
775 */
776 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
777 u32 selector)
778 {
779 struct sk_buff_head head;
780 int rc;
781
782 skb2list(skb, &head);
783 rc = tipc_link_xmit(net, &head, dnode, selector);
784 if (rc == -ELINKCONG)
785 kfree_skb(skb);
786 return 0;
787 }
788
789 /**
790 * tipc_link_xmit() is the general link level function for message sending
791 * @net: the applicable net namespace
792 * @list: chain of buffers containing message
793 * @dsz: amount of user data to be sent
794 * @dnode: address of destination node
795 * @selector: a number used for deterministic link selection
796 * Consumes the buffer chain, except when returning -ELINKCONG
797 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
798 */
799 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
800 u32 selector)
801 {
802 struct tipc_link *link = NULL;
803 struct tipc_node *node;
804 int rc = -EHOSTUNREACH;
805
806 node = tipc_node_find(net, dnode);
807 if (node) {
808 tipc_node_lock(node);
809 link = node->active_links[selector & 1];
810 if (link)
811 rc = __tipc_link_xmit(net, link, list);
812 tipc_node_unlock(node);
813 tipc_node_put(node);
814 }
815 if (link)
816 return rc;
817
818 if (likely(in_own_node(net, dnode))) {
819 tipc_sk_rcv(net, list);
820 return 0;
821 }
822
823 __skb_queue_purge(list);
824 return rc;
825 }
826
827 /*
828 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
829 *
830 * Give a newly added peer node the sequence number where it should
831 * start receiving and acking broadcast packets.
832 *
833 * Called with node locked
834 */
835 static void tipc_link_sync_xmit(struct tipc_link *link)
836 {
837 struct sk_buff *skb;
838 struct tipc_msg *msg;
839
840 skb = tipc_buf_acquire(INT_H_SIZE);
841 if (!skb)
842 return;
843
844 msg = buf_msg(skb);
845 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
846 INT_H_SIZE, link->addr);
847 msg_set_last_bcast(msg, link->owner->bclink.acked);
848 __tipc_link_xmit_skb(link, skb);
849 }
850
851 /*
852 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
853 * Receive the sequence number where we should start receiving and
854 * acking broadcast packets from a newly added peer node, and open
855 * up for reception of such packets.
856 *
857 * Called with node locked
858 */
859 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
860 {
861 struct tipc_msg *msg = buf_msg(buf);
862
863 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
864 n->bclink.recv_permitted = true;
865 kfree_skb(buf);
866 }
867
868 /*
869 * tipc_link_push_packets - push unsent packets to bearer
870 *
871 * Push out the unsent messages of a link where congestion
872 * has abated. Node is locked.
873 *
874 * Called with node locked
875 */
876 void tipc_link_push_packets(struct tipc_link *link)
877 {
878 struct sk_buff *skb;
879 struct tipc_msg *msg;
880 unsigned int ack = mod(link->next_in_no - 1);
881
882 while (skb_queue_len(&link->transmq) < link->window) {
883 skb = __skb_dequeue(&link->backlogq);
884 if (!skb)
885 break;
886 msg = buf_msg(skb);
887 link->backlog[msg_importance(msg)].len--;
888 msg_set_ack(msg, ack);
889 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
890 link->rcv_unacked = 0;
891 __skb_queue_tail(&link->transmq, skb);
892 tipc_bearer_send(link->owner->net, link->bearer_id,
893 skb, &link->media_addr);
894 }
895 }
896
897 void tipc_link_reset_all(struct tipc_node *node)
898 {
899 char addr_string[16];
900 u32 i;
901
902 tipc_node_lock(node);
903
904 pr_warn("Resetting all links to %s\n",
905 tipc_addr_string_fill(addr_string, node->addr));
906
907 for (i = 0; i < MAX_BEARERS; i++) {
908 if (node->links[i]) {
909 link_print(node->links[i], "Resetting link\n");
910 tipc_link_reset(node->links[i]);
911 }
912 }
913
914 tipc_node_unlock(node);
915 }
916
917 static void link_retransmit_failure(struct tipc_link *l_ptr,
918 struct sk_buff *buf)
919 {
920 struct tipc_msg *msg = buf_msg(buf);
921 struct net *net = l_ptr->owner->net;
922
923 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
924
925 if (l_ptr->addr) {
926 /* Handle failure on standard link */
927 link_print(l_ptr, "Resetting link\n");
928 tipc_link_reset(l_ptr);
929
930 } else {
931 /* Handle failure on broadcast link */
932 struct tipc_node *n_ptr;
933 char addr_string[16];
934
935 pr_info("Msg seq number: %u, ", msg_seqno(msg));
936 pr_cont("Outstanding acks: %lu\n",
937 (unsigned long) TIPC_SKB_CB(buf)->handle);
938
939 n_ptr = tipc_bclink_retransmit_to(net);
940
941 tipc_addr_string_fill(addr_string, n_ptr->addr);
942 pr_info("Broadcast link info for %s\n", addr_string);
943 pr_info("Reception permitted: %d, Acked: %u\n",
944 n_ptr->bclink.recv_permitted,
945 n_ptr->bclink.acked);
946 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
947 n_ptr->bclink.last_in,
948 n_ptr->bclink.oos_state,
949 n_ptr->bclink.last_sent);
950
951 n_ptr->action_flags |= TIPC_BCAST_RESET;
952 l_ptr->stale_count = 0;
953 }
954 }
955
956 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
957 u32 retransmits)
958 {
959 struct tipc_msg *msg;
960
961 if (!skb)
962 return;
963
964 msg = buf_msg(skb);
965
966 /* Detect repeated retransmit failures */
967 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
968 if (++l_ptr->stale_count > 100) {
969 link_retransmit_failure(l_ptr, skb);
970 return;
971 }
972 } else {
973 l_ptr->last_retransmitted = msg_seqno(msg);
974 l_ptr->stale_count = 1;
975 }
976
977 skb_queue_walk_from(&l_ptr->transmq, skb) {
978 if (!retransmits)
979 break;
980 msg = buf_msg(skb);
981 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
982 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
983 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
984 &l_ptr->media_addr);
985 retransmits--;
986 l_ptr->stats.retransmitted++;
987 }
988 }
989
990 /* link_synch(): check if all packets arrived before the synch
991 * point have been consumed
992 * Returns true if the parallel links are synched, otherwise false
993 */
994 static bool link_synch(struct tipc_link *l)
995 {
996 unsigned int post_synch;
997 struct tipc_link *pl;
998
999 pl = tipc_parallel_link(l);
1000 if (pl == l)
1001 goto synched;
1002
1003 /* Was last pre-synch packet added to input queue ? */
1004 if (less_eq(pl->next_in_no, l->synch_point))
1005 return false;
1006
1007 /* Is it still in the input queue ? */
1008 post_synch = mod(pl->next_in_no - l->synch_point) - 1;
1009 if (skb_queue_len(&pl->inputq) > post_synch)
1010 return false;
1011 synched:
1012 l->flags &= ~LINK_SYNCHING;
1013 return true;
1014 }
1015
1016 static void link_retrieve_defq(struct tipc_link *link,
1017 struct sk_buff_head *list)
1018 {
1019 u32 seq_no;
1020
1021 if (skb_queue_empty(&link->deferdq))
1022 return;
1023
1024 seq_no = buf_seqno(skb_peek(&link->deferdq));
1025 if (seq_no == mod(link->next_in_no))
1026 skb_queue_splice_tail_init(&link->deferdq, list);
1027 }
1028
1029 /**
1030 * tipc_rcv - process TIPC packets/messages arriving from off-node
1031 * @net: the applicable net namespace
1032 * @skb: TIPC packet
1033 * @b_ptr: pointer to bearer message arrived on
1034 *
1035 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1036 * structure (i.e. cannot be NULL), but bearer can be inactive.
1037 */
1038 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1039 {
1040 struct tipc_net *tn = net_generic(net, tipc_net_id);
1041 struct sk_buff_head head;
1042 struct tipc_node *n_ptr;
1043 struct tipc_link *l_ptr;
1044 struct sk_buff *skb1, *tmp;
1045 struct tipc_msg *msg;
1046 u32 seq_no;
1047 u32 ackd;
1048 u32 released;
1049
1050 skb2list(skb, &head);
1051
1052 while ((skb = __skb_dequeue(&head))) {
1053 /* Ensure message is well-formed */
1054 if (unlikely(!tipc_msg_validate(skb)))
1055 goto discard;
1056
1057 /* Handle arrival of a non-unicast link message */
1058 msg = buf_msg(skb);
1059 if (unlikely(msg_non_seq(msg))) {
1060 if (msg_user(msg) == LINK_CONFIG)
1061 tipc_disc_rcv(net, skb, b_ptr);
1062 else
1063 tipc_bclink_rcv(net, skb);
1064 continue;
1065 }
1066
1067 /* Discard unicast link messages destined for another node */
1068 if (unlikely(!msg_short(msg) &&
1069 (msg_destnode(msg) != tn->own_addr)))
1070 goto discard;
1071
1072 /* Locate neighboring node that sent message */
1073 n_ptr = tipc_node_find(net, msg_prevnode(msg));
1074 if (unlikely(!n_ptr))
1075 goto discard;
1076
1077 tipc_node_lock(n_ptr);
1078 /* Locate unicast link endpoint that should handle message */
1079 l_ptr = n_ptr->links[b_ptr->identity];
1080 if (unlikely(!l_ptr))
1081 goto unlock;
1082
1083 /* Verify that communication with node is currently allowed */
1084 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1085 msg_user(msg) == LINK_PROTOCOL &&
1086 (msg_type(msg) == RESET_MSG ||
1087 msg_type(msg) == ACTIVATE_MSG) &&
1088 !msg_redundant_link(msg))
1089 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1090
1091 if (tipc_node_blocked(n_ptr))
1092 goto unlock;
1093
1094 /* Validate message sequence number info */
1095 seq_no = msg_seqno(msg);
1096 ackd = msg_ack(msg);
1097
1098 /* Release acked messages */
1099 if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
1100 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1101
1102 released = 0;
1103 skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
1104 if (more(buf_seqno(skb1), ackd))
1105 break;
1106 __skb_unlink(skb1, &l_ptr->transmq);
1107 kfree_skb(skb1);
1108 released = 1;
1109 }
1110
1111 /* Try sending any messages link endpoint has pending */
1112 if (unlikely(skb_queue_len(&l_ptr->backlogq)))
1113 tipc_link_push_packets(l_ptr);
1114
1115 if (released && !skb_queue_empty(&l_ptr->wakeupq))
1116 link_prepare_wakeup(l_ptr);
1117
1118 /* Process the incoming packet */
1119 if (unlikely(!link_working_working(l_ptr))) {
1120 if (msg_user(msg) == LINK_PROTOCOL) {
1121 tipc_link_proto_rcv(l_ptr, skb);
1122 link_retrieve_defq(l_ptr, &head);
1123 skb = NULL;
1124 goto unlock;
1125 }
1126
1127 /* Traffic message. Conditionally activate link */
1128 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1129
1130 if (link_working_working(l_ptr)) {
1131 /* Re-insert buffer in front of queue */
1132 __skb_queue_head(&head, skb);
1133 skb = NULL;
1134 goto unlock;
1135 }
1136 goto unlock;
1137 }
1138
1139 /* Link is now in state WORKING_WORKING */
1140 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1141 link_handle_out_of_seq_msg(l_ptr, skb);
1142 link_retrieve_defq(l_ptr, &head);
1143 skb = NULL;
1144 goto unlock;
1145 }
1146 /* Synchronize with parallel link if applicable */
1147 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
1148 if (!link_synch(l_ptr))
1149 goto unlock;
1150 }
1151 l_ptr->next_in_no++;
1152 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1153 link_retrieve_defq(l_ptr, &head);
1154 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1155 l_ptr->stats.sent_acks++;
1156 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
1157 }
1158 tipc_link_input(l_ptr, skb);
1159 skb = NULL;
1160 unlock:
1161 tipc_node_unlock(n_ptr);
1162 tipc_node_put(n_ptr);
1163 discard:
1164 if (unlikely(skb))
1165 kfree_skb(skb);
1166 }
1167 }
1168
1169 /* tipc_data_input - deliver data and name distr msgs to upper layer
1170 *
1171 * Consumes buffer if message is of right type
1172 * Node lock must be held
1173 */
1174 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1175 {
1176 struct tipc_node *node = link->owner;
1177 struct tipc_msg *msg = buf_msg(skb);
1178 u32 dport = msg_destport(msg);
1179
1180 switch (msg_user(msg)) {
1181 case TIPC_LOW_IMPORTANCE:
1182 case TIPC_MEDIUM_IMPORTANCE:
1183 case TIPC_HIGH_IMPORTANCE:
1184 case TIPC_CRITICAL_IMPORTANCE:
1185 case CONN_MANAGER:
1186 if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1187 node->inputq = &link->inputq;
1188 node->action_flags |= TIPC_MSG_EVT;
1189 }
1190 return true;
1191 case NAME_DISTRIBUTOR:
1192 node->bclink.recv_permitted = true;
1193 node->namedq = &link->namedq;
1194 skb_queue_tail(&link->namedq, skb);
1195 if (skb_queue_len(&link->namedq) == 1)
1196 node->action_flags |= TIPC_NAMED_MSG_EVT;
1197 return true;
1198 case MSG_BUNDLER:
1199 case TUNNEL_PROTOCOL:
1200 case MSG_FRAGMENTER:
1201 case BCAST_PROTOCOL:
1202 return false;
1203 default:
1204 pr_warn("Dropping received illegal msg type\n");
1205 kfree_skb(skb);
1206 return false;
1207 };
1208 }
1209
1210 /* tipc_link_input - process packet that has passed link protocol check
1211 *
1212 * Consumes buffer
1213 * Node lock must be held
1214 */
1215 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1216 {
1217 struct tipc_node *node = link->owner;
1218 struct tipc_msg *msg = buf_msg(skb);
1219 struct sk_buff *iskb;
1220 int pos = 0;
1221
1222 if (likely(tipc_data_input(link, skb)))
1223 return;
1224
1225 switch (msg_user(msg)) {
1226 case TUNNEL_PROTOCOL:
1227 if (msg_dup(msg)) {
1228 link->flags |= LINK_SYNCHING;
1229 link->synch_point = msg_seqno(msg_get_wrapped(msg));
1230 kfree_skb(skb);
1231 break;
1232 }
1233 if (!tipc_link_failover_rcv(link, &skb))
1234 break;
1235 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1236 tipc_data_input(link, skb);
1237 break;
1238 }
1239 case MSG_BUNDLER:
1240 link->stats.recv_bundles++;
1241 link->stats.recv_bundled += msg_msgcnt(msg);
1242
1243 while (tipc_msg_extract(skb, &iskb, &pos))
1244 tipc_data_input(link, iskb);
1245 break;
1246 case MSG_FRAGMENTER:
1247 link->stats.recv_fragments++;
1248 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1249 link->stats.recv_fragmented++;
1250 tipc_data_input(link, skb);
1251 } else if (!link->reasm_buf) {
1252 tipc_link_reset(link);
1253 }
1254 break;
1255 case BCAST_PROTOCOL:
1256 tipc_link_sync_rcv(node, skb);
1257 break;
1258 default:
1259 break;
1260 };
1261 }
1262
1263 /**
1264 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1265 *
1266 * Returns increase in queue length (i.e. 0 or 1)
1267 */
1268 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1269 {
1270 struct sk_buff *skb1;
1271 u32 seq_no = buf_seqno(skb);
1272
1273 /* Empty queue ? */
1274 if (skb_queue_empty(list)) {
1275 __skb_queue_tail(list, skb);
1276 return 1;
1277 }
1278
1279 /* Last ? */
1280 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1281 __skb_queue_tail(list, skb);
1282 return 1;
1283 }
1284
1285 /* Locate insertion point in queue, then insert; discard if duplicate */
1286 skb_queue_walk(list, skb1) {
1287 u32 curr_seqno = buf_seqno(skb1);
1288
1289 if (seq_no == curr_seqno) {
1290 kfree_skb(skb);
1291 return 0;
1292 }
1293
1294 if (less(seq_no, curr_seqno))
1295 break;
1296 }
1297
1298 __skb_queue_before(list, skb1, skb);
1299 return 1;
1300 }
1301
1302 /*
1303 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1304 */
1305 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1306 struct sk_buff *buf)
1307 {
1308 u32 seq_no = buf_seqno(buf);
1309
1310 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1311 tipc_link_proto_rcv(l_ptr, buf);
1312 return;
1313 }
1314
1315 /* Record OOS packet arrival (force mismatch on next timeout) */
1316 l_ptr->checkpoint--;
1317
1318 /*
1319 * Discard packet if a duplicate; otherwise add it to deferred queue
1320 * and notify peer of gap as per protocol specification
1321 */
1322 if (less(seq_no, mod(l_ptr->next_in_no))) {
1323 l_ptr->stats.duplicates++;
1324 kfree_skb(buf);
1325 return;
1326 }
1327
1328 if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
1329 l_ptr->stats.deferred_recv++;
1330 if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1331 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
1332 } else {
1333 l_ptr->stats.duplicates++;
1334 }
1335 }
1336
1337 /*
1338 * Send protocol message to the other endpoint.
1339 */
1340 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1341 u32 gap, u32 tolerance, u32 priority)
1342 {
1343 struct sk_buff *buf = NULL;
1344 struct tipc_msg *msg = l_ptr->pmsg;
1345 u32 msg_size = sizeof(l_ptr->proto_msg);
1346 int r_flag;
1347
1348 /* Don't send protocol message during link failover */
1349 if (l_ptr->flags & LINK_FAILINGOVER)
1350 return;
1351
1352 /* Abort non-RESET send if communication with node is prohibited */
1353 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1354 return;
1355
1356 /* Create protocol message with "out-of-sequence" sequence number */
1357 msg_set_type(msg, msg_typ);
1358 msg_set_net_plane(msg, l_ptr->net_plane);
1359 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1360 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1361
1362 if (msg_typ == STATE_MSG) {
1363 u32 next_sent = mod(l_ptr->next_out_no);
1364
1365 if (!tipc_link_is_up(l_ptr))
1366 return;
1367 if (skb_queue_len(&l_ptr->backlogq))
1368 next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
1369 msg_set_next_sent(msg, next_sent);
1370 if (!skb_queue_empty(&l_ptr->deferdq)) {
1371 u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
1372 gap = mod(rec - mod(l_ptr->next_in_no));
1373 }
1374 msg_set_seq_gap(msg, gap);
1375 if (gap)
1376 l_ptr->stats.sent_nacks++;
1377 msg_set_link_tolerance(msg, tolerance);
1378 msg_set_linkprio(msg, priority);
1379 msg_set_max_pkt(msg, l_ptr->mtu);
1380 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1381 msg_set_probe(msg, probe_msg != 0);
1382 if (probe_msg)
1383 l_ptr->stats.sent_probes++;
1384 l_ptr->stats.sent_states++;
1385 } else { /* RESET_MSG or ACTIVATE_MSG */
1386 msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
1387 msg_set_seq_gap(msg, 0);
1388 msg_set_next_sent(msg, 1);
1389 msg_set_probe(msg, 0);
1390 msg_set_link_tolerance(msg, l_ptr->tolerance);
1391 msg_set_linkprio(msg, l_ptr->priority);
1392 msg_set_max_pkt(msg, l_ptr->advertised_mtu);
1393 }
1394
1395 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1396 msg_set_redundant_link(msg, r_flag);
1397 msg_set_linkprio(msg, l_ptr->priority);
1398 msg_set_size(msg, msg_size);
1399
1400 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1401
1402 buf = tipc_buf_acquire(msg_size);
1403 if (!buf)
1404 return;
1405
1406 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1407 buf->priority = TC_PRIO_CONTROL;
1408 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1409 &l_ptr->media_addr);
1410 l_ptr->rcv_unacked = 0;
1411 kfree_skb(buf);
1412 }
1413
1414 /*
1415 * Receive protocol message :
1416 * Note that network plane id propagates through the network, and may
1417 * change at any time. The node with lowest address rules
1418 */
1419 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1420 struct sk_buff *buf)
1421 {
1422 u32 rec_gap = 0;
1423 u32 msg_tol;
1424 struct tipc_msg *msg = buf_msg(buf);
1425
1426 if (l_ptr->flags & LINK_FAILINGOVER)
1427 goto exit;
1428
1429 if (l_ptr->net_plane != msg_net_plane(msg))
1430 if (link_own_addr(l_ptr) > msg_prevnode(msg))
1431 l_ptr->net_plane = msg_net_plane(msg);
1432
1433 switch (msg_type(msg)) {
1434
1435 case RESET_MSG:
1436 if (!link_working_unknown(l_ptr) &&
1437 (l_ptr->peer_session != INVALID_SESSION)) {
1438 if (less_eq(msg_session(msg), l_ptr->peer_session))
1439 break; /* duplicate or old reset: ignore */
1440 }
1441
1442 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1443 link_working_unknown(l_ptr))) {
1444 /*
1445 * peer has lost contact -- don't allow peer's links
1446 * to reactivate before we recognize loss & clean up
1447 */
1448 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1449 }
1450
1451 link_state_event(l_ptr, RESET_MSG);
1452
1453 /* fall thru' */
1454 case ACTIVATE_MSG:
1455 /* Update link settings according other endpoint's values */
1456 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1457
1458 msg_tol = msg_link_tolerance(msg);
1459 if (msg_tol > l_ptr->tolerance)
1460 link_set_supervision_props(l_ptr, msg_tol);
1461
1462 if (msg_linkprio(msg) > l_ptr->priority)
1463 l_ptr->priority = msg_linkprio(msg);
1464
1465 if (l_ptr->mtu > msg_max_pkt(msg))
1466 l_ptr->mtu = msg_max_pkt(msg);
1467
1468 /* Synchronize broadcast link info, if not done previously */
1469 if (!tipc_node_is_up(l_ptr->owner)) {
1470 l_ptr->owner->bclink.last_sent =
1471 l_ptr->owner->bclink.last_in =
1472 msg_last_bcast(msg);
1473 l_ptr->owner->bclink.oos_state = 0;
1474 }
1475
1476 l_ptr->peer_session = msg_session(msg);
1477 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1478
1479 if (msg_type(msg) == ACTIVATE_MSG)
1480 link_state_event(l_ptr, ACTIVATE_MSG);
1481 break;
1482 case STATE_MSG:
1483
1484 msg_tol = msg_link_tolerance(msg);
1485 if (msg_tol)
1486 link_set_supervision_props(l_ptr, msg_tol);
1487
1488 if (msg_linkprio(msg) &&
1489 (msg_linkprio(msg) != l_ptr->priority)) {
1490 pr_debug("%s<%s>, priority change %u->%u\n",
1491 link_rst_msg, l_ptr->name,
1492 l_ptr->priority, msg_linkprio(msg));
1493 l_ptr->priority = msg_linkprio(msg);
1494 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1495 break;
1496 }
1497
1498 /* Record reception; force mismatch at next timeout: */
1499 l_ptr->checkpoint--;
1500
1501 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1502 l_ptr->stats.recv_states++;
1503 if (link_reset_unknown(l_ptr))
1504 break;
1505
1506 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1507 rec_gap = mod(msg_next_sent(msg) -
1508 mod(l_ptr->next_in_no));
1509 }
1510
1511 if (msg_probe(msg))
1512 l_ptr->stats.recv_probes++;
1513
1514 /* Protocol message before retransmits, reduce loss risk */
1515 if (l_ptr->owner->bclink.recv_permitted)
1516 tipc_bclink_update_link_state(l_ptr->owner,
1517 msg_last_bcast(msg));
1518
1519 if (rec_gap || (msg_probe(msg))) {
1520 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
1521 rec_gap, 0, 0);
1522 }
1523 if (msg_seq_gap(msg)) {
1524 l_ptr->stats.recv_nacks++;
1525 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
1526 msg_seq_gap(msg));
1527 }
1528 break;
1529 }
1530 exit:
1531 kfree_skb(buf);
1532 }
1533
1534
1535 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1536 * a different bearer. Owner node is locked.
1537 */
1538 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1539 struct tipc_msg *tunnel_hdr,
1540 struct tipc_msg *msg,
1541 u32 selector)
1542 {
1543 struct tipc_link *tunnel;
1544 struct sk_buff *skb;
1545 u32 length = msg_size(msg);
1546
1547 tunnel = l_ptr->owner->active_links[selector & 1];
1548 if (!tipc_link_is_up(tunnel)) {
1549 pr_warn("%stunnel link no longer available\n", link_co_err);
1550 return;
1551 }
1552 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1553 skb = tipc_buf_acquire(length + INT_H_SIZE);
1554 if (!skb) {
1555 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1556 return;
1557 }
1558 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1559 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1560 __tipc_link_xmit_skb(tunnel, skb);
1561 }
1562
1563
1564 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1565 * link is still active. We can do failover. Tunnel the failing link's
1566 * whole send queue via the remaining link. This way, we don't lose
1567 * any packets, and sequence order is preserved for subsequent traffic
1568 * sent over the remaining link. Owner node is locked.
1569 */
1570 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1571 {
1572 int msgcount;
1573 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1574 struct tipc_msg tunnel_hdr;
1575 struct sk_buff *skb;
1576 int split_bundles;
1577
1578 if (!tunnel)
1579 return;
1580
1581 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1582 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1583 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1584 tipc_link_purge_backlog(l_ptr);
1585 msgcount = skb_queue_len(&l_ptr->transmq);
1586 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1587 msg_set_msgcnt(&tunnel_hdr, msgcount);
1588
1589 if (skb_queue_empty(&l_ptr->transmq)) {
1590 skb = tipc_buf_acquire(INT_H_SIZE);
1591 if (skb) {
1592 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1593 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1594 __tipc_link_xmit_skb(tunnel, skb);
1595 } else {
1596 pr_warn("%sunable to send changeover msg\n",
1597 link_co_err);
1598 }
1599 return;
1600 }
1601
1602 split_bundles = (l_ptr->owner->active_links[0] !=
1603 l_ptr->owner->active_links[1]);
1604
1605 skb_queue_walk(&l_ptr->transmq, skb) {
1606 struct tipc_msg *msg = buf_msg(skb);
1607
1608 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1609 struct tipc_msg *m = msg_get_wrapped(msg);
1610 unchar *pos = (unchar *)m;
1611
1612 msgcount = msg_msgcnt(msg);
1613 while (msgcount--) {
1614 msg_set_seqno(m, msg_seqno(msg));
1615 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1616 msg_link_selector(m));
1617 pos += align(msg_size(m));
1618 m = (struct tipc_msg *)pos;
1619 }
1620 } else {
1621 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1622 msg_link_selector(msg));
1623 }
1624 }
1625 }
1626
1627 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1628 * duplicate of the first link's send queue via the new link. This way, we
1629 * are guaranteed that currently queued packets from a socket are delivered
1630 * before future traffic from the same socket, even if this is using the
1631 * new link. The last arriving copy of each duplicate packet is dropped at
1632 * the receiving end by the regular protocol check, so packet cardinality
1633 * and sequence order is preserved per sender/receiver socket pair.
1634 * Owner node is locked.
1635 */
1636 void tipc_link_dup_queue_xmit(struct tipc_link *link,
1637 struct tipc_link *tnl)
1638 {
1639 struct sk_buff *skb;
1640 struct tipc_msg tnl_hdr;
1641 struct sk_buff_head *queue = &link->transmq;
1642 int mcnt;
1643
1644 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1645 SYNCH_MSG, INT_H_SIZE, link->addr);
1646 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1647 msg_set_msgcnt(&tnl_hdr, mcnt);
1648 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1649
1650 tunnel_queue:
1651 skb_queue_walk(queue, skb) {
1652 struct sk_buff *outskb;
1653 struct tipc_msg *msg = buf_msg(skb);
1654 u32 len = msg_size(msg);
1655
1656 msg_set_ack(msg, mod(link->next_in_no - 1));
1657 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1658 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1659 outskb = tipc_buf_acquire(len + INT_H_SIZE);
1660 if (outskb == NULL) {
1661 pr_warn("%sunable to send duplicate msg\n",
1662 link_co_err);
1663 return;
1664 }
1665 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1666 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1667 skb->data, len);
1668 __tipc_link_xmit_skb(tnl, outskb);
1669 if (!tipc_link_is_up(link))
1670 return;
1671 }
1672 if (queue == &link->backlogq)
1673 return;
1674 queue = &link->backlogq;
1675 goto tunnel_queue;
1676 }
1677
1678 /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1679 * Owner node is locked.
1680 */
1681 static bool tipc_link_failover_rcv(struct tipc_link *link,
1682 struct sk_buff **skb)
1683 {
1684 struct tipc_msg *msg = buf_msg(*skb);
1685 struct sk_buff *iskb = NULL;
1686 struct tipc_link *pl = NULL;
1687 int bearer_id = msg_bearer_id(msg);
1688 int pos = 0;
1689
1690 if (msg_type(msg) != FAILOVER_MSG) {
1691 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1692 goto exit;
1693 }
1694 if (bearer_id >= MAX_BEARERS)
1695 goto exit;
1696
1697 if (bearer_id == link->bearer_id)
1698 goto exit;
1699
1700 pl = link->owner->links[bearer_id];
1701 if (pl && tipc_link_is_up(pl))
1702 tipc_link_reset(pl);
1703
1704 if (link->failover_pkts == FIRST_FAILOVER)
1705 link->failover_pkts = msg_msgcnt(msg);
1706
1707 /* Should we expect an inner packet? */
1708 if (!link->failover_pkts)
1709 goto exit;
1710
1711 if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1712 pr_warn("%sno inner failover pkt\n", link_co_err);
1713 *skb = NULL;
1714 goto exit;
1715 }
1716 link->failover_pkts--;
1717 *skb = NULL;
1718
1719 /* Was this packet already delivered? */
1720 if (less(buf_seqno(iskb), link->failover_checkpt)) {
1721 kfree_skb(iskb);
1722 iskb = NULL;
1723 goto exit;
1724 }
1725 if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1726 link->stats.recv_fragments++;
1727 tipc_buf_append(&link->failover_skb, &iskb);
1728 }
1729 exit:
1730 if (!link->failover_pkts && pl)
1731 pl->flags &= ~LINK_FAILINGOVER;
1732 kfree_skb(*skb);
1733 *skb = iskb;
1734 return *skb;
1735 }
1736
1737 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1738 {
1739 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1740
1741 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1742 return;
1743
1744 l_ptr->tolerance = tol;
1745 l_ptr->cont_intv = msecs_to_jiffies(intv);
1746 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1747 }
1748
1749 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1750 {
1751 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1752
1753 l->window = win;
1754 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1755 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1756 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1757 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1758 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1759 }
1760
1761 /* tipc_link_find_owner - locate owner node of link by link's name
1762 * @net: the applicable net namespace
1763 * @name: pointer to link name string
1764 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1765 *
1766 * Returns pointer to node owning the link, or 0 if no matching link is found.
1767 */
1768 static struct tipc_node *tipc_link_find_owner(struct net *net,
1769 const char *link_name,
1770 unsigned int *bearer_id)
1771 {
1772 struct tipc_net *tn = net_generic(net, tipc_net_id);
1773 struct tipc_link *l_ptr;
1774 struct tipc_node *n_ptr;
1775 struct tipc_node *found_node = NULL;
1776 int i;
1777
1778 *bearer_id = 0;
1779 rcu_read_lock();
1780 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1781 tipc_node_lock(n_ptr);
1782 for (i = 0; i < MAX_BEARERS; i++) {
1783 l_ptr = n_ptr->links[i];
1784 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1785 *bearer_id = i;
1786 found_node = n_ptr;
1787 break;
1788 }
1789 }
1790 tipc_node_unlock(n_ptr);
1791 if (found_node)
1792 break;
1793 }
1794 rcu_read_unlock();
1795
1796 return found_node;
1797 }
1798
1799 /**
1800 * link_reset_statistics - reset link statistics
1801 * @l_ptr: pointer to link
1802 */
1803 static void link_reset_statistics(struct tipc_link *l_ptr)
1804 {
1805 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1806 l_ptr->stats.sent_info = l_ptr->next_out_no;
1807 l_ptr->stats.recv_info = l_ptr->next_in_no;
1808 }
1809
1810 static void link_print(struct tipc_link *l_ptr, const char *str)
1811 {
1812 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1813 struct tipc_bearer *b_ptr;
1814
1815 rcu_read_lock();
1816 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1817 if (b_ptr)
1818 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1819 rcu_read_unlock();
1820
1821 if (link_working_unknown(l_ptr))
1822 pr_cont(":WU\n");
1823 else if (link_reset_reset(l_ptr))
1824 pr_cont(":RR\n");
1825 else if (link_reset_unknown(l_ptr))
1826 pr_cont(":RU\n");
1827 else if (link_working_working(l_ptr))
1828 pr_cont(":WW\n");
1829 else
1830 pr_cont("\n");
1831 }
1832
1833 /* Parse and validate nested (link) properties valid for media, bearer and link
1834 */
1835 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1836 {
1837 int err;
1838
1839 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1840 tipc_nl_prop_policy);
1841 if (err)
1842 return err;
1843
1844 if (props[TIPC_NLA_PROP_PRIO]) {
1845 u32 prio;
1846
1847 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1848 if (prio > TIPC_MAX_LINK_PRI)
1849 return -EINVAL;
1850 }
1851
1852 if (props[TIPC_NLA_PROP_TOL]) {
1853 u32 tol;
1854
1855 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1856 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1857 return -EINVAL;
1858 }
1859
1860 if (props[TIPC_NLA_PROP_WIN]) {
1861 u32 win;
1862
1863 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1864 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1865 return -EINVAL;
1866 }
1867
1868 return 0;
1869 }
1870
1871 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1872 {
1873 int err;
1874 int res = 0;
1875 int bearer_id;
1876 char *name;
1877 struct tipc_link *link;
1878 struct tipc_node *node;
1879 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1880 struct net *net = sock_net(skb->sk);
1881
1882 if (!info->attrs[TIPC_NLA_LINK])
1883 return -EINVAL;
1884
1885 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1886 info->attrs[TIPC_NLA_LINK],
1887 tipc_nl_link_policy);
1888 if (err)
1889 return err;
1890
1891 if (!attrs[TIPC_NLA_LINK_NAME])
1892 return -EINVAL;
1893
1894 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1895
1896 node = tipc_link_find_owner(net, name, &bearer_id);
1897 if (!node)
1898 return -EINVAL;
1899
1900 tipc_node_lock(node);
1901
1902 link = node->links[bearer_id];
1903 if (!link) {
1904 res = -EINVAL;
1905 goto out;
1906 }
1907
1908 if (attrs[TIPC_NLA_LINK_PROP]) {
1909 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1910
1911 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1912 props);
1913 if (err) {
1914 res = err;
1915 goto out;
1916 }
1917
1918 if (props[TIPC_NLA_PROP_TOL]) {
1919 u32 tol;
1920
1921 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1922 link_set_supervision_props(link, tol);
1923 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1924 }
1925 if (props[TIPC_NLA_PROP_PRIO]) {
1926 u32 prio;
1927
1928 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1929 link->priority = prio;
1930 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1931 }
1932 if (props[TIPC_NLA_PROP_WIN]) {
1933 u32 win;
1934
1935 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1936 tipc_link_set_queue_limits(link, win);
1937 }
1938 }
1939
1940 out:
1941 tipc_node_unlock(node);
1942
1943 return res;
1944 }
1945
1946 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1947 {
1948 int i;
1949 struct nlattr *stats;
1950
1951 struct nla_map {
1952 u32 key;
1953 u32 val;
1954 };
1955
1956 struct nla_map map[] = {
1957 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1958 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1959 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1960 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1961 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1962 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1963 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1964 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1965 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1966 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1967 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1968 s->msg_length_counts : 1},
1969 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1970 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1971 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1972 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1973 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1974 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1975 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1976 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1977 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1978 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1979 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1980 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1981 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1982 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1983 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1984 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1985 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1986 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1987 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1988 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1989 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1990 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1991 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1992 };
1993
1994 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1995 if (!stats)
1996 return -EMSGSIZE;
1997
1998 for (i = 0; i < ARRAY_SIZE(map); i++)
1999 if (nla_put_u32(skb, map[i].key, map[i].val))
2000 goto msg_full;
2001
2002 nla_nest_end(skb, stats);
2003
2004 return 0;
2005 msg_full:
2006 nla_nest_cancel(skb, stats);
2007
2008 return -EMSGSIZE;
2009 }
2010
2011 /* Caller should hold appropriate locks to protect the link */
2012 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2013 struct tipc_link *link, int nlflags)
2014 {
2015 int err;
2016 void *hdr;
2017 struct nlattr *attrs;
2018 struct nlattr *prop;
2019 struct tipc_net *tn = net_generic(net, tipc_net_id);
2020
2021 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2022 nlflags, TIPC_NL_LINK_GET);
2023 if (!hdr)
2024 return -EMSGSIZE;
2025
2026 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2027 if (!attrs)
2028 goto msg_full;
2029
2030 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2031 goto attr_msg_full;
2032 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2033 tipc_cluster_mask(tn->own_addr)))
2034 goto attr_msg_full;
2035 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2036 goto attr_msg_full;
2037 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2038 goto attr_msg_full;
2039 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2040 goto attr_msg_full;
2041
2042 if (tipc_link_is_up(link))
2043 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2044 goto attr_msg_full;
2045 if (tipc_link_is_active(link))
2046 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2047 goto attr_msg_full;
2048
2049 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2050 if (!prop)
2051 goto attr_msg_full;
2052 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2053 goto prop_msg_full;
2054 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2055 goto prop_msg_full;
2056 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2057 link->window))
2058 goto prop_msg_full;
2059 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2060 goto prop_msg_full;
2061 nla_nest_end(msg->skb, prop);
2062
2063 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2064 if (err)
2065 goto attr_msg_full;
2066
2067 nla_nest_end(msg->skb, attrs);
2068 genlmsg_end(msg->skb, hdr);
2069
2070 return 0;
2071
2072 prop_msg_full:
2073 nla_nest_cancel(msg->skb, prop);
2074 attr_msg_full:
2075 nla_nest_cancel(msg->skb, attrs);
2076 msg_full:
2077 genlmsg_cancel(msg->skb, hdr);
2078
2079 return -EMSGSIZE;
2080 }
2081
2082 /* Caller should hold node lock */
2083 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2084 struct tipc_node *node, u32 *prev_link)
2085 {
2086 u32 i;
2087 int err;
2088
2089 for (i = *prev_link; i < MAX_BEARERS; i++) {
2090 *prev_link = i;
2091
2092 if (!node->links[i])
2093 continue;
2094
2095 err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
2096 if (err)
2097 return err;
2098 }
2099 *prev_link = 0;
2100
2101 return 0;
2102 }
2103
2104 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2105 {
2106 struct net *net = sock_net(skb->sk);
2107 struct tipc_net *tn = net_generic(net, tipc_net_id);
2108 struct tipc_node *node;
2109 struct tipc_nl_msg msg;
2110 u32 prev_node = cb->args[0];
2111 u32 prev_link = cb->args[1];
2112 int done = cb->args[2];
2113 int err;
2114
2115 if (done)
2116 return 0;
2117
2118 msg.skb = skb;
2119 msg.portid = NETLINK_CB(cb->skb).portid;
2120 msg.seq = cb->nlh->nlmsg_seq;
2121
2122 rcu_read_lock();
2123 if (prev_node) {
2124 node = tipc_node_find(net, prev_node);
2125 if (!node) {
2126 /* We never set seq or call nl_dump_check_consistent()
2127 * this means that setting prev_seq here will cause the
2128 * consistence check to fail in the netlink callback
2129 * handler. Resulting in the last NLMSG_DONE message
2130 * having the NLM_F_DUMP_INTR flag set.
2131 */
2132 cb->prev_seq = 1;
2133 goto out;
2134 }
2135 tipc_node_put(node);
2136
2137 list_for_each_entry_continue_rcu(node, &tn->node_list,
2138 list) {
2139 tipc_node_lock(node);
2140 err = __tipc_nl_add_node_links(net, &msg, node,
2141 &prev_link);
2142 tipc_node_unlock(node);
2143 if (err)
2144 goto out;
2145
2146 prev_node = node->addr;
2147 }
2148 } else {
2149 err = tipc_nl_add_bc_link(net, &msg);
2150 if (err)
2151 goto out;
2152
2153 list_for_each_entry_rcu(node, &tn->node_list, list) {
2154 tipc_node_lock(node);
2155 err = __tipc_nl_add_node_links(net, &msg, node,
2156 &prev_link);
2157 tipc_node_unlock(node);
2158 if (err)
2159 goto out;
2160
2161 prev_node = node->addr;
2162 }
2163 }
2164 done = 1;
2165 out:
2166 rcu_read_unlock();
2167
2168 cb->args[0] = prev_node;
2169 cb->args[1] = prev_link;
2170 cb->args[2] = done;
2171
2172 return skb->len;
2173 }
2174
2175 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2176 {
2177 struct net *net = genl_info_net(info);
2178 struct sk_buff *ans_skb;
2179 struct tipc_nl_msg msg;
2180 struct tipc_link *link;
2181 struct tipc_node *node;
2182 char *name;
2183 int bearer_id;
2184 int err;
2185
2186 if (!info->attrs[TIPC_NLA_LINK_NAME])
2187 return -EINVAL;
2188
2189 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2190 node = tipc_link_find_owner(net, name, &bearer_id);
2191 if (!node)
2192 return -EINVAL;
2193
2194 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2195 if (!ans_skb)
2196 return -ENOMEM;
2197
2198 msg.skb = ans_skb;
2199 msg.portid = info->snd_portid;
2200 msg.seq = info->snd_seq;
2201
2202 tipc_node_lock(node);
2203 link = node->links[bearer_id];
2204 if (!link) {
2205 err = -EINVAL;
2206 goto err_out;
2207 }
2208
2209 err = __tipc_nl_add_link(net, &msg, link, 0);
2210 if (err)
2211 goto err_out;
2212
2213 tipc_node_unlock(node);
2214
2215 return genlmsg_reply(ans_skb, info);
2216
2217 err_out:
2218 tipc_node_unlock(node);
2219 nlmsg_free(ans_skb);
2220
2221 return err;
2222 }
2223
2224 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2225 {
2226 int err;
2227 char *link_name;
2228 unsigned int bearer_id;
2229 struct tipc_link *link;
2230 struct tipc_node *node;
2231 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2232 struct net *net = sock_net(skb->sk);
2233
2234 if (!info->attrs[TIPC_NLA_LINK])
2235 return -EINVAL;
2236
2237 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2238 info->attrs[TIPC_NLA_LINK],
2239 tipc_nl_link_policy);
2240 if (err)
2241 return err;
2242
2243 if (!attrs[TIPC_NLA_LINK_NAME])
2244 return -EINVAL;
2245
2246 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2247
2248 if (strcmp(link_name, tipc_bclink_name) == 0) {
2249 err = tipc_bclink_reset_stats(net);
2250 if (err)
2251 return err;
2252 return 0;
2253 }
2254
2255 node = tipc_link_find_owner(net, link_name, &bearer_id);
2256 if (!node)
2257 return -EINVAL;
2258
2259 tipc_node_lock(node);
2260
2261 link = node->links[bearer_id];
2262 if (!link) {
2263 tipc_node_unlock(node);
2264 return -EINVAL;
2265 }
2266
2267 link_reset_statistics(link);
2268
2269 tipc_node_unlock(node);
2270
2271 return 0;
2272 }
This page took 0.121284 seconds and 6 git commands to generate.