tipc: move message validation function to msg.c
[deliverable/linux.git] / net / tipc / link.c
1 /*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "link.h"
39 #include "bcast.h"
40 #include "socket.h"
41 #include "name_distr.h"
42 #include "discover.h"
43 #include "netlink.h"
44
45 #include <linux/pkt_sched.h>
46
47 /*
48 * Error message prefixes
49 */
50 static const char *link_co_err = "Link changeover error, ";
51 static const char *link_rst_msg = "Resetting link ";
52 static const char *link_unk_evt = "Unknown link event ";
53
54 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
55 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
56 [TIPC_NLA_LINK_NAME] = {
57 .type = NLA_STRING,
58 .len = TIPC_MAX_LINK_NAME
59 },
60 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
61 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
62 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
65 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
67 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
68 };
69
70 /* Properties valid for media, bearar and link */
71 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
72 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
73 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
74 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
76 };
77
78 /*
79 * Out-of-range value for link session numbers
80 */
81 #define INVALID_SESSION 0x10000
82
83 /*
84 * Link state events:
85 */
86 #define STARTING_EVT 856384768 /* link processing trigger */
87 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
88 #define TIMEOUT_EVT 560817u /* link timer expired */
89
90 /*
91 * The following two 'message types' is really just implementation
92 * data conveniently stored in the message header.
93 * They must not be considered part of the protocol
94 */
95 #define OPEN_MSG 0
96 #define CLOSED_MSG 1
97
98 /*
99 * State value stored in 'exp_msg_count'
100 */
101 #define START_CHANGEOVER 100000u
102
103 static void link_handle_out_of_seq_msg(struct tipc_link *link,
104 struct sk_buff *skb);
105 static void tipc_link_proto_rcv(struct tipc_link *link,
106 struct sk_buff *skb);
107 static int tipc_link_tunnel_rcv(struct tipc_node *node,
108 struct sk_buff **skb);
109 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
110 static void link_state_event(struct tipc_link *l_ptr, u32 event);
111 static void link_reset_statistics(struct tipc_link *l_ptr);
112 static void link_print(struct tipc_link *l_ptr, const char *str);
113 static void tipc_link_sync_xmit(struct tipc_link *l);
114 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
115 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
116 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
117
118 /*
119 * Simple link routines
120 */
121 static unsigned int align(unsigned int i)
122 {
123 return (i + 3) & ~3u;
124 }
125
126 static void tipc_link_release(struct kref *kref)
127 {
128 kfree(container_of(kref, struct tipc_link, ref));
129 }
130
131 static void tipc_link_get(struct tipc_link *l_ptr)
132 {
133 kref_get(&l_ptr->ref);
134 }
135
136 static void tipc_link_put(struct tipc_link *l_ptr)
137 {
138 kref_put(&l_ptr->ref, tipc_link_release);
139 }
140
141 static void link_init_max_pkt(struct tipc_link *l_ptr)
142 {
143 struct tipc_node *node = l_ptr->owner;
144 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
145 struct tipc_bearer *b_ptr;
146 u32 max_pkt;
147
148 rcu_read_lock();
149 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
150 if (!b_ptr) {
151 rcu_read_unlock();
152 return;
153 }
154 max_pkt = (b_ptr->mtu & ~3);
155 rcu_read_unlock();
156
157 if (max_pkt > MAX_MSG_SIZE)
158 max_pkt = MAX_MSG_SIZE;
159
160 l_ptr->max_pkt_target = max_pkt;
161 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
162 l_ptr->max_pkt = l_ptr->max_pkt_target;
163 else
164 l_ptr->max_pkt = MAX_PKT_DEFAULT;
165
166 l_ptr->max_pkt_probes = 0;
167 }
168
169 /*
170 * Simple non-static link routines (i.e. referenced outside this file)
171 */
172 int tipc_link_is_up(struct tipc_link *l_ptr)
173 {
174 if (!l_ptr)
175 return 0;
176 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
177 }
178
179 int tipc_link_is_active(struct tipc_link *l_ptr)
180 {
181 return (l_ptr->owner->active_links[0] == l_ptr) ||
182 (l_ptr->owner->active_links[1] == l_ptr);
183 }
184
185 /**
186 * link_timeout - handle expiration of link timer
187 * @l_ptr: pointer to link
188 */
189 static void link_timeout(unsigned long data)
190 {
191 struct tipc_link *l_ptr = (struct tipc_link *)data;
192 struct sk_buff *skb;
193
194 tipc_node_lock(l_ptr->owner);
195
196 /* update counters used in statistical profiling of send traffic */
197 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
198 l_ptr->stats.queue_sz_counts++;
199
200 skb = skb_peek(&l_ptr->outqueue);
201 if (skb) {
202 struct tipc_msg *msg = buf_msg(skb);
203 u32 length = msg_size(msg);
204
205 if ((msg_user(msg) == MSG_FRAGMENTER) &&
206 (msg_type(msg) == FIRST_FRAGMENT)) {
207 length = msg_size(msg_get_wrapped(msg));
208 }
209 if (length) {
210 l_ptr->stats.msg_lengths_total += length;
211 l_ptr->stats.msg_length_counts++;
212 if (length <= 64)
213 l_ptr->stats.msg_length_profile[0]++;
214 else if (length <= 256)
215 l_ptr->stats.msg_length_profile[1]++;
216 else if (length <= 1024)
217 l_ptr->stats.msg_length_profile[2]++;
218 else if (length <= 4096)
219 l_ptr->stats.msg_length_profile[3]++;
220 else if (length <= 16384)
221 l_ptr->stats.msg_length_profile[4]++;
222 else if (length <= 32768)
223 l_ptr->stats.msg_length_profile[5]++;
224 else
225 l_ptr->stats.msg_length_profile[6]++;
226 }
227 }
228
229 /* do all other link processing performed on a periodic basis */
230 link_state_event(l_ptr, TIMEOUT_EVT);
231
232 if (l_ptr->next_out)
233 tipc_link_push_packets(l_ptr);
234
235 tipc_node_unlock(l_ptr->owner);
236 tipc_link_put(l_ptr);
237 }
238
239 static void link_set_timer(struct tipc_link *link, unsigned long time)
240 {
241 if (!mod_timer(&link->timer, jiffies + time))
242 tipc_link_get(link);
243 }
244
245 /**
246 * tipc_link_create - create a new link
247 * @n_ptr: pointer to associated node
248 * @b_ptr: pointer to associated bearer
249 * @media_addr: media address to use when sending messages over link
250 *
251 * Returns pointer to link.
252 */
253 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
254 struct tipc_bearer *b_ptr,
255 const struct tipc_media_addr *media_addr)
256 {
257 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
258 struct tipc_link *l_ptr;
259 struct tipc_msg *msg;
260 char *if_name;
261 char addr_string[16];
262 u32 peer = n_ptr->addr;
263
264 if (n_ptr->link_cnt >= MAX_BEARERS) {
265 tipc_addr_string_fill(addr_string, n_ptr->addr);
266 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
267 n_ptr->link_cnt, addr_string, MAX_BEARERS);
268 return NULL;
269 }
270
271 if (n_ptr->links[b_ptr->identity]) {
272 tipc_addr_string_fill(addr_string, n_ptr->addr);
273 pr_err("Attempt to establish second link on <%s> to %s\n",
274 b_ptr->name, addr_string);
275 return NULL;
276 }
277
278 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
279 if (!l_ptr) {
280 pr_warn("Link creation failed, no memory\n");
281 return NULL;
282 }
283 kref_init(&l_ptr->ref);
284 l_ptr->addr = peer;
285 if_name = strchr(b_ptr->name, ':') + 1;
286 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
287 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
288 tipc_node(tn->own_addr),
289 if_name,
290 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
291 /* note: peer i/f name is updated by reset/activate message */
292 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
293 l_ptr->owner = n_ptr;
294 l_ptr->checkpoint = 1;
295 l_ptr->peer_session = INVALID_SESSION;
296 l_ptr->bearer_id = b_ptr->identity;
297 link_set_supervision_props(l_ptr, b_ptr->tolerance);
298 l_ptr->state = RESET_UNKNOWN;
299
300 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
301 msg = l_ptr->pmsg;
302 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
303 l_ptr->addr);
304 msg_set_size(msg, sizeof(l_ptr->proto_msg));
305 msg_set_session(msg, (tn->random & 0xffff));
306 msg_set_bearer_id(msg, b_ptr->identity);
307 strcpy((char *)msg_data(msg), if_name);
308
309 l_ptr->priority = b_ptr->priority;
310 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
311
312 l_ptr->net_plane = b_ptr->net_plane;
313 link_init_max_pkt(l_ptr);
314
315 l_ptr->next_out_no = 1;
316 __skb_queue_head_init(&l_ptr->outqueue);
317 __skb_queue_head_init(&l_ptr->deferred_queue);
318 skb_queue_head_init(&l_ptr->wakeupq);
319 skb_queue_head_init(&l_ptr->inputq);
320 skb_queue_head_init(&l_ptr->namedq);
321 link_reset_statistics(l_ptr);
322 tipc_node_attach_link(n_ptr, l_ptr);
323 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
324 link_state_event(l_ptr, STARTING_EVT);
325
326 return l_ptr;
327 }
328
329 /**
330 * link_delete - Conditional deletion of link.
331 * If timer still running, real delete is done when it expires
332 * @link: link to be deleted
333 */
334 void tipc_link_delete(struct tipc_link *link)
335 {
336 tipc_link_reset_fragments(link);
337 tipc_node_detach_link(link->owner, link);
338 tipc_link_put(link);
339 }
340
341 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
342 bool shutting_down)
343 {
344 struct tipc_net *tn = net_generic(net, tipc_net_id);
345 struct tipc_link *link;
346 struct tipc_node *node;
347 bool del_link;
348
349 rcu_read_lock();
350 list_for_each_entry_rcu(node, &tn->node_list, list) {
351 tipc_node_lock(node);
352 link = node->links[bearer_id];
353 if (!link) {
354 tipc_node_unlock(node);
355 continue;
356 }
357 del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
358 tipc_link_reset(link);
359 if (del_timer(&link->timer))
360 tipc_link_put(link);
361 link->flags |= LINK_STOPPED;
362 /* Delete link now, or when failover is finished: */
363 if (shutting_down || !tipc_node_is_up(node) || del_link)
364 tipc_link_delete(link);
365 tipc_node_unlock(node);
366 }
367 rcu_read_unlock();
368 }
369
370 /**
371 * link_schedule_user - schedule user for wakeup after congestion
372 * @link: congested link
373 * @oport: sending port
374 * @chain_sz: size of buffer chain that was attempted sent
375 * @imp: importance of message attempted sent
376 * Create pseudo msg to send back to user when congestion abates
377 */
378 static bool link_schedule_user(struct tipc_link *link, u32 oport,
379 uint chain_sz, uint imp)
380 {
381 struct sk_buff *buf;
382
383 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
384 link_own_addr(link), link_own_addr(link),
385 oport, 0, 0);
386 if (!buf)
387 return false;
388 TIPC_SKB_CB(buf)->chain_sz = chain_sz;
389 TIPC_SKB_CB(buf)->chain_imp = imp;
390 skb_queue_tail(&link->wakeupq, buf);
391 link->stats.link_congs++;
392 return true;
393 }
394
395 /**
396 * link_prepare_wakeup - prepare users for wakeup after congestion
397 * @link: congested link
398 * Move a number of waiting users, as permitted by available space in
399 * the send queue, from link wait queue to node wait queue for wakeup
400 */
401 void link_prepare_wakeup(struct tipc_link *link)
402 {
403 uint pend_qsz = skb_queue_len(&link->outqueue);
404 struct sk_buff *skb, *tmp;
405
406 skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
407 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
408 break;
409 pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
410 skb_unlink(skb, &link->wakeupq);
411 skb_queue_tail(&link->inputq, skb);
412 link->owner->inputq = &link->inputq;
413 link->owner->action_flags |= TIPC_MSG_EVT;
414 }
415 }
416
417 /**
418 * tipc_link_reset_fragments - purge link's inbound message fragments queue
419 * @l_ptr: pointer to link
420 */
421 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
422 {
423 kfree_skb(l_ptr->reasm_buf);
424 l_ptr->reasm_buf = NULL;
425 }
426
427 /**
428 * tipc_link_purge_queues - purge all pkt queues associated with link
429 * @l_ptr: pointer to link
430 */
431 void tipc_link_purge_queues(struct tipc_link *l_ptr)
432 {
433 __skb_queue_purge(&l_ptr->deferred_queue);
434 __skb_queue_purge(&l_ptr->outqueue);
435 tipc_link_reset_fragments(l_ptr);
436 }
437
438 void tipc_link_reset(struct tipc_link *l_ptr)
439 {
440 u32 prev_state = l_ptr->state;
441 u32 checkpoint = l_ptr->next_in_no;
442 int was_active_link = tipc_link_is_active(l_ptr);
443 struct tipc_node *owner = l_ptr->owner;
444
445 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
446
447 /* Link is down, accept any session */
448 l_ptr->peer_session = INVALID_SESSION;
449
450 /* Prepare for max packet size negotiation */
451 link_init_max_pkt(l_ptr);
452
453 l_ptr->state = RESET_UNKNOWN;
454
455 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
456 return;
457
458 tipc_node_link_down(l_ptr->owner, l_ptr);
459 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
460
461 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
462 l_ptr->reset_checkpoint = checkpoint;
463 l_ptr->exp_msg_count = START_CHANGEOVER;
464 }
465
466 /* Clean up all queues, except inputq: */
467 __skb_queue_purge(&l_ptr->outqueue);
468 __skb_queue_purge(&l_ptr->deferred_queue);
469 if (!owner->inputq)
470 owner->inputq = &l_ptr->inputq;
471 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
472 if (!skb_queue_empty(owner->inputq))
473 owner->action_flags |= TIPC_MSG_EVT;
474 l_ptr->next_out = NULL;
475 l_ptr->unacked_window = 0;
476 l_ptr->checkpoint = 1;
477 l_ptr->next_out_no = 1;
478 l_ptr->fsm_msg_cnt = 0;
479 l_ptr->stale_count = 0;
480 link_reset_statistics(l_ptr);
481 }
482
483 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
484 {
485 struct tipc_net *tn = net_generic(net, tipc_net_id);
486 struct tipc_link *l_ptr;
487 struct tipc_node *n_ptr;
488
489 rcu_read_lock();
490 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
491 tipc_node_lock(n_ptr);
492 l_ptr = n_ptr->links[bearer_id];
493 if (l_ptr)
494 tipc_link_reset(l_ptr);
495 tipc_node_unlock(n_ptr);
496 }
497 rcu_read_unlock();
498 }
499
500 static void link_activate(struct tipc_link *link)
501 {
502 struct tipc_node *node = link->owner;
503
504 link->next_in_no = 1;
505 link->stats.recv_info = 1;
506 tipc_node_link_up(node, link);
507 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
508 }
509
510 /**
511 * link_state_event - link finite state machine
512 * @l_ptr: pointer to link
513 * @event: state machine event to process
514 */
515 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
516 {
517 struct tipc_link *other;
518 unsigned long cont_intv = l_ptr->cont_intv;
519
520 if (l_ptr->flags & LINK_STOPPED)
521 return;
522
523 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
524 return; /* Not yet. */
525
526 /* Check whether changeover is going on */
527 if (l_ptr->exp_msg_count) {
528 if (event == TIMEOUT_EVT)
529 link_set_timer(l_ptr, cont_intv);
530 return;
531 }
532
533 switch (l_ptr->state) {
534 case WORKING_WORKING:
535 switch (event) {
536 case TRAFFIC_MSG_EVT:
537 case ACTIVATE_MSG:
538 break;
539 case TIMEOUT_EVT:
540 if (l_ptr->next_in_no != l_ptr->checkpoint) {
541 l_ptr->checkpoint = l_ptr->next_in_no;
542 if (tipc_bclink_acks_missing(l_ptr->owner)) {
543 tipc_link_proto_xmit(l_ptr, STATE_MSG,
544 0, 0, 0, 0, 0);
545 l_ptr->fsm_msg_cnt++;
546 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
547 tipc_link_proto_xmit(l_ptr, STATE_MSG,
548 1, 0, 0, 0, 0);
549 l_ptr->fsm_msg_cnt++;
550 }
551 link_set_timer(l_ptr, cont_intv);
552 break;
553 }
554 l_ptr->state = WORKING_UNKNOWN;
555 l_ptr->fsm_msg_cnt = 0;
556 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
557 l_ptr->fsm_msg_cnt++;
558 link_set_timer(l_ptr, cont_intv / 4);
559 break;
560 case RESET_MSG:
561 pr_debug("%s<%s>, requested by peer\n",
562 link_rst_msg, l_ptr->name);
563 tipc_link_reset(l_ptr);
564 l_ptr->state = RESET_RESET;
565 l_ptr->fsm_msg_cnt = 0;
566 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
567 0, 0, 0, 0, 0);
568 l_ptr->fsm_msg_cnt++;
569 link_set_timer(l_ptr, cont_intv);
570 break;
571 default:
572 pr_debug("%s%u in WW state\n", link_unk_evt, event);
573 }
574 break;
575 case WORKING_UNKNOWN:
576 switch (event) {
577 case TRAFFIC_MSG_EVT:
578 case ACTIVATE_MSG:
579 l_ptr->state = WORKING_WORKING;
580 l_ptr->fsm_msg_cnt = 0;
581 link_set_timer(l_ptr, cont_intv);
582 break;
583 case RESET_MSG:
584 pr_debug("%s<%s>, requested by peer while probing\n",
585 link_rst_msg, l_ptr->name);
586 tipc_link_reset(l_ptr);
587 l_ptr->state = RESET_RESET;
588 l_ptr->fsm_msg_cnt = 0;
589 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
590 0, 0, 0, 0, 0);
591 l_ptr->fsm_msg_cnt++;
592 link_set_timer(l_ptr, cont_intv);
593 break;
594 case TIMEOUT_EVT:
595 if (l_ptr->next_in_no != l_ptr->checkpoint) {
596 l_ptr->state = WORKING_WORKING;
597 l_ptr->fsm_msg_cnt = 0;
598 l_ptr->checkpoint = l_ptr->next_in_no;
599 if (tipc_bclink_acks_missing(l_ptr->owner)) {
600 tipc_link_proto_xmit(l_ptr, STATE_MSG,
601 0, 0, 0, 0, 0);
602 l_ptr->fsm_msg_cnt++;
603 }
604 link_set_timer(l_ptr, cont_intv);
605 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
606 tipc_link_proto_xmit(l_ptr, STATE_MSG,
607 1, 0, 0, 0, 0);
608 l_ptr->fsm_msg_cnt++;
609 link_set_timer(l_ptr, cont_intv / 4);
610 } else { /* Link has failed */
611 pr_debug("%s<%s>, peer not responding\n",
612 link_rst_msg, l_ptr->name);
613 tipc_link_reset(l_ptr);
614 l_ptr->state = RESET_UNKNOWN;
615 l_ptr->fsm_msg_cnt = 0;
616 tipc_link_proto_xmit(l_ptr, RESET_MSG,
617 0, 0, 0, 0, 0);
618 l_ptr->fsm_msg_cnt++;
619 link_set_timer(l_ptr, cont_intv);
620 }
621 break;
622 default:
623 pr_err("%s%u in WU state\n", link_unk_evt, event);
624 }
625 break;
626 case RESET_UNKNOWN:
627 switch (event) {
628 case TRAFFIC_MSG_EVT:
629 break;
630 case ACTIVATE_MSG:
631 other = l_ptr->owner->active_links[0];
632 if (other && link_working_unknown(other))
633 break;
634 l_ptr->state = WORKING_WORKING;
635 l_ptr->fsm_msg_cnt = 0;
636 link_activate(l_ptr);
637 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
638 l_ptr->fsm_msg_cnt++;
639 if (l_ptr->owner->working_links == 1)
640 tipc_link_sync_xmit(l_ptr);
641 link_set_timer(l_ptr, cont_intv);
642 break;
643 case RESET_MSG:
644 l_ptr->state = RESET_RESET;
645 l_ptr->fsm_msg_cnt = 0;
646 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
647 1, 0, 0, 0, 0);
648 l_ptr->fsm_msg_cnt++;
649 link_set_timer(l_ptr, cont_intv);
650 break;
651 case STARTING_EVT:
652 l_ptr->flags |= LINK_STARTED;
653 l_ptr->fsm_msg_cnt++;
654 link_set_timer(l_ptr, cont_intv);
655 break;
656 case TIMEOUT_EVT:
657 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
658 l_ptr->fsm_msg_cnt++;
659 link_set_timer(l_ptr, cont_intv);
660 break;
661 default:
662 pr_err("%s%u in RU state\n", link_unk_evt, event);
663 }
664 break;
665 case RESET_RESET:
666 switch (event) {
667 case TRAFFIC_MSG_EVT:
668 case ACTIVATE_MSG:
669 other = l_ptr->owner->active_links[0];
670 if (other && link_working_unknown(other))
671 break;
672 l_ptr->state = WORKING_WORKING;
673 l_ptr->fsm_msg_cnt = 0;
674 link_activate(l_ptr);
675 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
676 l_ptr->fsm_msg_cnt++;
677 if (l_ptr->owner->working_links == 1)
678 tipc_link_sync_xmit(l_ptr);
679 link_set_timer(l_ptr, cont_intv);
680 break;
681 case RESET_MSG:
682 break;
683 case TIMEOUT_EVT:
684 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
685 0, 0, 0, 0, 0);
686 l_ptr->fsm_msg_cnt++;
687 link_set_timer(l_ptr, cont_intv);
688 break;
689 default:
690 pr_err("%s%u in RR state\n", link_unk_evt, event);
691 }
692 break;
693 default:
694 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
695 }
696 }
697
698 /* tipc_link_cong: determine return value and how to treat the
699 * sent buffer during link congestion.
700 * - For plain, errorless user data messages we keep the buffer and
701 * return -ELINKONG.
702 * - For all other messages we discard the buffer and return -EHOSTUNREACH
703 * - For TIPC internal messages we also reset the link
704 */
705 static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
706 {
707 struct sk_buff *skb = skb_peek(list);
708 struct tipc_msg *msg = buf_msg(skb);
709 uint imp = tipc_msg_tot_importance(msg);
710 u32 oport = msg_tot_origport(msg);
711
712 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
713 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
714 tipc_link_reset(link);
715 goto drop;
716 }
717 if (unlikely(msg_errcode(msg)))
718 goto drop;
719 if (unlikely(msg_reroute_cnt(msg)))
720 goto drop;
721 if (TIPC_SKB_CB(skb)->wakeup_pending)
722 return -ELINKCONG;
723 if (link_schedule_user(link, oport, skb_queue_len(list), imp))
724 return -ELINKCONG;
725 drop:
726 __skb_queue_purge(list);
727 return -EHOSTUNREACH;
728 }
729
730 /**
731 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
732 * @link: link to use
733 * @list: chain of buffers containing message
734 *
735 * Consumes the buffer chain, except when returning -ELINKCONG
736 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
737 * user data messages) or -EHOSTUNREACH (all other messages/senders)
738 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
739 * to act on the return value, since they may need to do more send attempts.
740 */
741 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
742 struct sk_buff_head *list)
743 {
744 struct tipc_msg *msg = buf_msg(skb_peek(list));
745 uint psz = msg_size(msg);
746 uint sndlim = link->queue_limit[0];
747 uint imp = tipc_msg_tot_importance(msg);
748 uint mtu = link->max_pkt;
749 uint ack = mod(link->next_in_no - 1);
750 uint seqno = link->next_out_no;
751 uint bc_last_in = link->owner->bclink.last_in;
752 struct tipc_media_addr *addr = &link->media_addr;
753 struct sk_buff_head *outqueue = &link->outqueue;
754 struct sk_buff *skb, *tmp;
755
756 /* Match queue limits against msg importance: */
757 if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
758 return tipc_link_cong(link, list);
759
760 /* Has valid packet limit been used ? */
761 if (unlikely(psz > mtu)) {
762 __skb_queue_purge(list);
763 return -EMSGSIZE;
764 }
765
766 /* Prepare each packet for sending, and add to outqueue: */
767 skb_queue_walk_safe(list, skb, tmp) {
768 __skb_unlink(skb, list);
769 msg = buf_msg(skb);
770 msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
771 msg_set_bcast_ack(msg, bc_last_in);
772
773 if (skb_queue_len(outqueue) < sndlim) {
774 __skb_queue_tail(outqueue, skb);
775 tipc_bearer_send(net, link->bearer_id,
776 skb, addr);
777 link->next_out = NULL;
778 link->unacked_window = 0;
779 } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
780 link->stats.sent_bundled++;
781 continue;
782 } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
783 link->addr)) {
784 link->stats.sent_bundled++;
785 link->stats.sent_bundles++;
786 if (!link->next_out)
787 link->next_out = skb_peek_tail(outqueue);
788 } else {
789 __skb_queue_tail(outqueue, skb);
790 if (!link->next_out)
791 link->next_out = skb;
792 }
793 seqno++;
794 }
795 link->next_out_no = seqno;
796 return 0;
797 }
798
799 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
800 {
801 skb_queue_head_init(list);
802 __skb_queue_tail(list, skb);
803 }
804
805 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
806 {
807 struct sk_buff_head head;
808
809 skb2list(skb, &head);
810 return __tipc_link_xmit(link->owner->net, link, &head);
811 }
812
813 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
814 u32 selector)
815 {
816 struct sk_buff_head head;
817
818 skb2list(skb, &head);
819 return tipc_link_xmit(net, &head, dnode, selector);
820 }
821
822 /**
823 * tipc_link_xmit() is the general link level function for message sending
824 * @net: the applicable net namespace
825 * @list: chain of buffers containing message
826 * @dsz: amount of user data to be sent
827 * @dnode: address of destination node
828 * @selector: a number used for deterministic link selection
829 * Consumes the buffer chain, except when returning -ELINKCONG
830 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
831 */
832 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
833 u32 selector)
834 {
835 struct tipc_link *link = NULL;
836 struct tipc_node *node;
837 int rc = -EHOSTUNREACH;
838
839 node = tipc_node_find(net, dnode);
840 if (node) {
841 tipc_node_lock(node);
842 link = node->active_links[selector & 1];
843 if (link)
844 rc = __tipc_link_xmit(net, link, list);
845 tipc_node_unlock(node);
846 }
847 if (link)
848 return rc;
849
850 if (likely(in_own_node(net, dnode)))
851 return tipc_sk_rcv(net, list);
852
853 __skb_queue_purge(list);
854 return rc;
855 }
856
857 /*
858 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
859 *
860 * Give a newly added peer node the sequence number where it should
861 * start receiving and acking broadcast packets.
862 *
863 * Called with node locked
864 */
865 static void tipc_link_sync_xmit(struct tipc_link *link)
866 {
867 struct sk_buff *skb;
868 struct tipc_msg *msg;
869
870 skb = tipc_buf_acquire(INT_H_SIZE);
871 if (!skb)
872 return;
873
874 msg = buf_msg(skb);
875 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
876 INT_H_SIZE, link->addr);
877 msg_set_last_bcast(msg, link->owner->bclink.acked);
878 __tipc_link_xmit_skb(link, skb);
879 }
880
881 /*
882 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
883 * Receive the sequence number where we should start receiving and
884 * acking broadcast packets from a newly added peer node, and open
885 * up for reception of such packets.
886 *
887 * Called with node locked
888 */
889 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
890 {
891 struct tipc_msg *msg = buf_msg(buf);
892
893 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
894 n->bclink.recv_permitted = true;
895 kfree_skb(buf);
896 }
897
898 struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
899 const struct sk_buff *skb)
900 {
901 if (skb_queue_is_last(list, skb))
902 return NULL;
903 return skb->next;
904 }
905
906 /*
907 * tipc_link_push_packets - push unsent packets to bearer
908 *
909 * Push out the unsent messages of a link where congestion
910 * has abated. Node is locked.
911 *
912 * Called with node locked
913 */
914 void tipc_link_push_packets(struct tipc_link *l_ptr)
915 {
916 struct sk_buff_head *outqueue = &l_ptr->outqueue;
917 struct sk_buff *skb = l_ptr->next_out;
918 struct tipc_msg *msg;
919 u32 next, first;
920
921 skb_queue_walk_from(outqueue, skb) {
922 msg = buf_msg(skb);
923 next = msg_seqno(msg);
924 first = buf_seqno(skb_peek(outqueue));
925
926 if (mod(next - first) < l_ptr->queue_limit[0]) {
927 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
928 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
929 if (msg_user(msg) == MSG_BUNDLER)
930 TIPC_SKB_CB(skb)->bundling = false;
931 tipc_bearer_send(l_ptr->owner->net,
932 l_ptr->bearer_id, skb,
933 &l_ptr->media_addr);
934 l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
935 } else {
936 break;
937 }
938 }
939 }
940
941 void tipc_link_reset_all(struct tipc_node *node)
942 {
943 char addr_string[16];
944 u32 i;
945
946 tipc_node_lock(node);
947
948 pr_warn("Resetting all links to %s\n",
949 tipc_addr_string_fill(addr_string, node->addr));
950
951 for (i = 0; i < MAX_BEARERS; i++) {
952 if (node->links[i]) {
953 link_print(node->links[i], "Resetting link\n");
954 tipc_link_reset(node->links[i]);
955 }
956 }
957
958 tipc_node_unlock(node);
959 }
960
961 static void link_retransmit_failure(struct tipc_link *l_ptr,
962 struct sk_buff *buf)
963 {
964 struct tipc_msg *msg = buf_msg(buf);
965 struct net *net = l_ptr->owner->net;
966
967 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
968
969 if (l_ptr->addr) {
970 /* Handle failure on standard link */
971 link_print(l_ptr, "Resetting link\n");
972 tipc_link_reset(l_ptr);
973
974 } else {
975 /* Handle failure on broadcast link */
976 struct tipc_node *n_ptr;
977 char addr_string[16];
978
979 pr_info("Msg seq number: %u, ", msg_seqno(msg));
980 pr_cont("Outstanding acks: %lu\n",
981 (unsigned long) TIPC_SKB_CB(buf)->handle);
982
983 n_ptr = tipc_bclink_retransmit_to(net);
984 tipc_node_lock(n_ptr);
985
986 tipc_addr_string_fill(addr_string, n_ptr->addr);
987 pr_info("Broadcast link info for %s\n", addr_string);
988 pr_info("Reception permitted: %d, Acked: %u\n",
989 n_ptr->bclink.recv_permitted,
990 n_ptr->bclink.acked);
991 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
992 n_ptr->bclink.last_in,
993 n_ptr->bclink.oos_state,
994 n_ptr->bclink.last_sent);
995
996 tipc_node_unlock(n_ptr);
997
998 tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
999 l_ptr->stale_count = 0;
1000 }
1001 }
1002
1003 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
1004 u32 retransmits)
1005 {
1006 struct tipc_msg *msg;
1007
1008 if (!skb)
1009 return;
1010
1011 msg = buf_msg(skb);
1012
1013 /* Detect repeated retransmit failures */
1014 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1015 if (++l_ptr->stale_count > 100) {
1016 link_retransmit_failure(l_ptr, skb);
1017 return;
1018 }
1019 } else {
1020 l_ptr->last_retransmitted = msg_seqno(msg);
1021 l_ptr->stale_count = 1;
1022 }
1023
1024 skb_queue_walk_from(&l_ptr->outqueue, skb) {
1025 if (!retransmits || skb == l_ptr->next_out)
1026 break;
1027 msg = buf_msg(skb);
1028 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1029 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1030 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1031 &l_ptr->media_addr);
1032 retransmits--;
1033 l_ptr->stats.retransmitted++;
1034 }
1035 }
1036
1037 static void link_retrieve_defq(struct tipc_link *link,
1038 struct sk_buff_head *list)
1039 {
1040 u32 seq_no;
1041
1042 if (skb_queue_empty(&link->deferred_queue))
1043 return;
1044
1045 seq_no = buf_seqno(skb_peek(&link->deferred_queue));
1046 if (seq_no == mod(link->next_in_no))
1047 skb_queue_splice_tail_init(&link->deferred_queue, list);
1048 }
1049
1050 /**
1051 * tipc_rcv - process TIPC packets/messages arriving from off-node
1052 * @net: the applicable net namespace
1053 * @skb: TIPC packet
1054 * @b_ptr: pointer to bearer message arrived on
1055 *
1056 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1057 * structure (i.e. cannot be NULL), but bearer can be inactive.
1058 */
1059 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1060 {
1061 struct tipc_net *tn = net_generic(net, tipc_net_id);
1062 struct sk_buff_head head;
1063 struct tipc_node *n_ptr;
1064 struct tipc_link *l_ptr;
1065 struct sk_buff *skb1, *tmp;
1066 struct tipc_msg *msg;
1067 u32 seq_no;
1068 u32 ackd;
1069 u32 released;
1070
1071 skb2list(skb, &head);
1072
1073 while ((skb = __skb_dequeue(&head))) {
1074 /* Ensure message is well-formed */
1075 if (unlikely(!tipc_msg_validate(skb)))
1076 goto discard;
1077
1078 /* Ensure message data is a single contiguous unit */
1079 if (unlikely(skb_linearize(skb)))
1080 goto discard;
1081
1082 /* Handle arrival of a non-unicast link message */
1083 msg = buf_msg(skb);
1084
1085 if (unlikely(msg_non_seq(msg))) {
1086 if (msg_user(msg) == LINK_CONFIG)
1087 tipc_disc_rcv(net, skb, b_ptr);
1088 else
1089 tipc_bclink_rcv(net, skb);
1090 continue;
1091 }
1092
1093 /* Discard unicast link messages destined for another node */
1094 if (unlikely(!msg_short(msg) &&
1095 (msg_destnode(msg) != tn->own_addr)))
1096 goto discard;
1097
1098 /* Locate neighboring node that sent message */
1099 n_ptr = tipc_node_find(net, msg_prevnode(msg));
1100 if (unlikely(!n_ptr))
1101 goto discard;
1102 tipc_node_lock(n_ptr);
1103
1104 /* Locate unicast link endpoint that should handle message */
1105 l_ptr = n_ptr->links[b_ptr->identity];
1106 if (unlikely(!l_ptr))
1107 goto unlock;
1108
1109 /* Verify that communication with node is currently allowed */
1110 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1111 msg_user(msg) == LINK_PROTOCOL &&
1112 (msg_type(msg) == RESET_MSG ||
1113 msg_type(msg) == ACTIVATE_MSG) &&
1114 !msg_redundant_link(msg))
1115 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1116
1117 if (tipc_node_blocked(n_ptr))
1118 goto unlock;
1119
1120 /* Validate message sequence number info */
1121 seq_no = msg_seqno(msg);
1122 ackd = msg_ack(msg);
1123
1124 /* Release acked messages */
1125 if (n_ptr->bclink.recv_permitted)
1126 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1127
1128 released = 0;
1129 skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
1130 if (skb1 == l_ptr->next_out ||
1131 more(buf_seqno(skb1), ackd))
1132 break;
1133 __skb_unlink(skb1, &l_ptr->outqueue);
1134 kfree_skb(skb1);
1135 released = 1;
1136 }
1137
1138 /* Try sending any messages link endpoint has pending */
1139 if (unlikely(l_ptr->next_out))
1140 tipc_link_push_packets(l_ptr);
1141
1142 if (released && !skb_queue_empty(&l_ptr->wakeupq))
1143 link_prepare_wakeup(l_ptr);
1144
1145 /* Process the incoming packet */
1146 if (unlikely(!link_working_working(l_ptr))) {
1147 if (msg_user(msg) == LINK_PROTOCOL) {
1148 tipc_link_proto_rcv(l_ptr, skb);
1149 link_retrieve_defq(l_ptr, &head);
1150 skb = NULL;
1151 goto unlock;
1152 }
1153
1154 /* Traffic message. Conditionally activate link */
1155 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1156
1157 if (link_working_working(l_ptr)) {
1158 /* Re-insert buffer in front of queue */
1159 __skb_queue_head(&head, skb);
1160 skb = NULL;
1161 goto unlock;
1162 }
1163 goto unlock;
1164 }
1165
1166 /* Link is now in state WORKING_WORKING */
1167 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1168 link_handle_out_of_seq_msg(l_ptr, skb);
1169 link_retrieve_defq(l_ptr, &head);
1170 skb = NULL;
1171 goto unlock;
1172 }
1173 l_ptr->next_in_no++;
1174 if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
1175 link_retrieve_defq(l_ptr, &head);
1176
1177 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1178 l_ptr->stats.sent_acks++;
1179 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1180 }
1181 tipc_link_input(l_ptr, skb);
1182 skb = NULL;
1183 unlock:
1184 tipc_node_unlock(n_ptr);
1185 discard:
1186 if (unlikely(skb))
1187 kfree_skb(skb);
1188 }
1189 }
1190
1191 /* tipc_data_input - deliver data and name distr msgs to upper layer
1192 *
1193 * Consumes buffer if message is of right type
1194 * Node lock must be held
1195 */
1196 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1197 {
1198 struct tipc_node *node = link->owner;
1199 struct tipc_msg *msg = buf_msg(skb);
1200 u32 dport = msg_destport(msg);
1201
1202 switch (msg_user(msg)) {
1203 case TIPC_LOW_IMPORTANCE:
1204 case TIPC_MEDIUM_IMPORTANCE:
1205 case TIPC_HIGH_IMPORTANCE:
1206 case TIPC_CRITICAL_IMPORTANCE:
1207 case CONN_MANAGER:
1208 if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1209 node->inputq = &link->inputq;
1210 node->action_flags |= TIPC_MSG_EVT;
1211 }
1212 return true;
1213 case NAME_DISTRIBUTOR:
1214 node->bclink.recv_permitted = true;
1215 node->namedq = &link->namedq;
1216 skb_queue_tail(&link->namedq, skb);
1217 if (skb_queue_len(&link->namedq) == 1)
1218 node->action_flags |= TIPC_NAMED_MSG_EVT;
1219 return true;
1220 case MSG_BUNDLER:
1221 case CHANGEOVER_PROTOCOL:
1222 case MSG_FRAGMENTER:
1223 case BCAST_PROTOCOL:
1224 return false;
1225 default:
1226 pr_warn("Dropping received illegal msg type\n");
1227 kfree_skb(skb);
1228 return false;
1229 };
1230 }
1231
1232 /* tipc_link_input - process packet that has passed link protocol check
1233 *
1234 * Consumes buffer
1235 * Node lock must be held
1236 */
1237 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1238 {
1239 struct tipc_node *node = link->owner;
1240 struct tipc_msg *msg = buf_msg(skb);
1241 struct sk_buff *iskb;
1242 int pos = 0;
1243
1244 if (likely(tipc_data_input(link, skb)))
1245 return;
1246
1247 switch (msg_user(msg)) {
1248 case CHANGEOVER_PROTOCOL:
1249 if (!tipc_link_tunnel_rcv(node, &skb))
1250 break;
1251 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1252 tipc_data_input(link, skb);
1253 break;
1254 }
1255 case MSG_BUNDLER:
1256 link->stats.recv_bundles++;
1257 link->stats.recv_bundled += msg_msgcnt(msg);
1258
1259 while (tipc_msg_extract(skb, &iskb, &pos))
1260 tipc_data_input(link, iskb);
1261 break;
1262 case MSG_FRAGMENTER:
1263 link->stats.recv_fragments++;
1264 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1265 link->stats.recv_fragmented++;
1266 tipc_data_input(link, skb);
1267 } else if (!link->reasm_buf) {
1268 tipc_link_reset(link);
1269 }
1270 break;
1271 case BCAST_PROTOCOL:
1272 tipc_link_sync_rcv(node, skb);
1273 break;
1274 default:
1275 break;
1276 };
1277 }
1278
1279 /**
1280 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1281 *
1282 * Returns increase in queue length (i.e. 0 or 1)
1283 */
1284 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1285 {
1286 struct sk_buff *skb1;
1287 u32 seq_no = buf_seqno(skb);
1288
1289 /* Empty queue ? */
1290 if (skb_queue_empty(list)) {
1291 __skb_queue_tail(list, skb);
1292 return 1;
1293 }
1294
1295 /* Last ? */
1296 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1297 __skb_queue_tail(list, skb);
1298 return 1;
1299 }
1300
1301 /* Locate insertion point in queue, then insert; discard if duplicate */
1302 skb_queue_walk(list, skb1) {
1303 u32 curr_seqno = buf_seqno(skb1);
1304
1305 if (seq_no == curr_seqno) {
1306 kfree_skb(skb);
1307 return 0;
1308 }
1309
1310 if (less(seq_no, curr_seqno))
1311 break;
1312 }
1313
1314 __skb_queue_before(list, skb1, skb);
1315 return 1;
1316 }
1317
1318 /*
1319 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1320 */
1321 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1322 struct sk_buff *buf)
1323 {
1324 u32 seq_no = buf_seqno(buf);
1325
1326 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1327 tipc_link_proto_rcv(l_ptr, buf);
1328 return;
1329 }
1330
1331 /* Record OOS packet arrival (force mismatch on next timeout) */
1332 l_ptr->checkpoint--;
1333
1334 /*
1335 * Discard packet if a duplicate; otherwise add it to deferred queue
1336 * and notify peer of gap as per protocol specification
1337 */
1338 if (less(seq_no, mod(l_ptr->next_in_no))) {
1339 l_ptr->stats.duplicates++;
1340 kfree_skb(buf);
1341 return;
1342 }
1343
1344 if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
1345 l_ptr->stats.deferred_recv++;
1346 if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
1347 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1348 } else {
1349 l_ptr->stats.duplicates++;
1350 }
1351 }
1352
1353 /*
1354 * Send protocol message to the other endpoint.
1355 */
1356 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1357 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1358 {
1359 struct sk_buff *buf = NULL;
1360 struct tipc_msg *msg = l_ptr->pmsg;
1361 u32 msg_size = sizeof(l_ptr->proto_msg);
1362 int r_flag;
1363
1364 /* Don't send protocol message during link changeover */
1365 if (l_ptr->exp_msg_count)
1366 return;
1367
1368 /* Abort non-RESET send if communication with node is prohibited */
1369 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1370 return;
1371
1372 /* Create protocol message with "out-of-sequence" sequence number */
1373 msg_set_type(msg, msg_typ);
1374 msg_set_net_plane(msg, l_ptr->net_plane);
1375 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1376 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1377
1378 if (msg_typ == STATE_MSG) {
1379 u32 next_sent = mod(l_ptr->next_out_no);
1380
1381 if (!tipc_link_is_up(l_ptr))
1382 return;
1383 if (l_ptr->next_out)
1384 next_sent = buf_seqno(l_ptr->next_out);
1385 msg_set_next_sent(msg, next_sent);
1386 if (!skb_queue_empty(&l_ptr->deferred_queue)) {
1387 u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
1388 gap = mod(rec - mod(l_ptr->next_in_no));
1389 }
1390 msg_set_seq_gap(msg, gap);
1391 if (gap)
1392 l_ptr->stats.sent_nacks++;
1393 msg_set_link_tolerance(msg, tolerance);
1394 msg_set_linkprio(msg, priority);
1395 msg_set_max_pkt(msg, ack_mtu);
1396 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1397 msg_set_probe(msg, probe_msg != 0);
1398 if (probe_msg) {
1399 u32 mtu = l_ptr->max_pkt;
1400
1401 if ((mtu < l_ptr->max_pkt_target) &&
1402 link_working_working(l_ptr) &&
1403 l_ptr->fsm_msg_cnt) {
1404 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1405 if (l_ptr->max_pkt_probes == 10) {
1406 l_ptr->max_pkt_target = (msg_size - 4);
1407 l_ptr->max_pkt_probes = 0;
1408 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1409 }
1410 l_ptr->max_pkt_probes++;
1411 }
1412
1413 l_ptr->stats.sent_probes++;
1414 }
1415 l_ptr->stats.sent_states++;
1416 } else { /* RESET_MSG or ACTIVATE_MSG */
1417 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1418 msg_set_seq_gap(msg, 0);
1419 msg_set_next_sent(msg, 1);
1420 msg_set_probe(msg, 0);
1421 msg_set_link_tolerance(msg, l_ptr->tolerance);
1422 msg_set_linkprio(msg, l_ptr->priority);
1423 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1424 }
1425
1426 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1427 msg_set_redundant_link(msg, r_flag);
1428 msg_set_linkprio(msg, l_ptr->priority);
1429 msg_set_size(msg, msg_size);
1430
1431 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1432
1433 buf = tipc_buf_acquire(msg_size);
1434 if (!buf)
1435 return;
1436
1437 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1438 buf->priority = TC_PRIO_CONTROL;
1439
1440 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1441 &l_ptr->media_addr);
1442 l_ptr->unacked_window = 0;
1443 kfree_skb(buf);
1444 }
1445
1446 /*
1447 * Receive protocol message :
1448 * Note that network plane id propagates through the network, and may
1449 * change at any time. The node with lowest address rules
1450 */
1451 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1452 struct sk_buff *buf)
1453 {
1454 u32 rec_gap = 0;
1455 u32 max_pkt_info;
1456 u32 max_pkt_ack;
1457 u32 msg_tol;
1458 struct tipc_msg *msg = buf_msg(buf);
1459
1460 /* Discard protocol message during link changeover */
1461 if (l_ptr->exp_msg_count)
1462 goto exit;
1463
1464 if (l_ptr->net_plane != msg_net_plane(msg))
1465 if (link_own_addr(l_ptr) > msg_prevnode(msg))
1466 l_ptr->net_plane = msg_net_plane(msg);
1467
1468 switch (msg_type(msg)) {
1469
1470 case RESET_MSG:
1471 if (!link_working_unknown(l_ptr) &&
1472 (l_ptr->peer_session != INVALID_SESSION)) {
1473 if (less_eq(msg_session(msg), l_ptr->peer_session))
1474 break; /* duplicate or old reset: ignore */
1475 }
1476
1477 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1478 link_working_unknown(l_ptr))) {
1479 /*
1480 * peer has lost contact -- don't allow peer's links
1481 * to reactivate before we recognize loss & clean up
1482 */
1483 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1484 }
1485
1486 link_state_event(l_ptr, RESET_MSG);
1487
1488 /* fall thru' */
1489 case ACTIVATE_MSG:
1490 /* Update link settings according other endpoint's values */
1491 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1492
1493 msg_tol = msg_link_tolerance(msg);
1494 if (msg_tol > l_ptr->tolerance)
1495 link_set_supervision_props(l_ptr, msg_tol);
1496
1497 if (msg_linkprio(msg) > l_ptr->priority)
1498 l_ptr->priority = msg_linkprio(msg);
1499
1500 max_pkt_info = msg_max_pkt(msg);
1501 if (max_pkt_info) {
1502 if (max_pkt_info < l_ptr->max_pkt_target)
1503 l_ptr->max_pkt_target = max_pkt_info;
1504 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1505 l_ptr->max_pkt = l_ptr->max_pkt_target;
1506 } else {
1507 l_ptr->max_pkt = l_ptr->max_pkt_target;
1508 }
1509
1510 /* Synchronize broadcast link info, if not done previously */
1511 if (!tipc_node_is_up(l_ptr->owner)) {
1512 l_ptr->owner->bclink.last_sent =
1513 l_ptr->owner->bclink.last_in =
1514 msg_last_bcast(msg);
1515 l_ptr->owner->bclink.oos_state = 0;
1516 }
1517
1518 l_ptr->peer_session = msg_session(msg);
1519 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1520
1521 if (msg_type(msg) == ACTIVATE_MSG)
1522 link_state_event(l_ptr, ACTIVATE_MSG);
1523 break;
1524 case STATE_MSG:
1525
1526 msg_tol = msg_link_tolerance(msg);
1527 if (msg_tol)
1528 link_set_supervision_props(l_ptr, msg_tol);
1529
1530 if (msg_linkprio(msg) &&
1531 (msg_linkprio(msg) != l_ptr->priority)) {
1532 pr_debug("%s<%s>, priority change %u->%u\n",
1533 link_rst_msg, l_ptr->name,
1534 l_ptr->priority, msg_linkprio(msg));
1535 l_ptr->priority = msg_linkprio(msg);
1536 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1537 break;
1538 }
1539
1540 /* Record reception; force mismatch at next timeout: */
1541 l_ptr->checkpoint--;
1542
1543 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1544 l_ptr->stats.recv_states++;
1545 if (link_reset_unknown(l_ptr))
1546 break;
1547
1548 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1549 rec_gap = mod(msg_next_sent(msg) -
1550 mod(l_ptr->next_in_no));
1551 }
1552
1553 max_pkt_ack = msg_max_pkt(msg);
1554 if (max_pkt_ack > l_ptr->max_pkt) {
1555 l_ptr->max_pkt = max_pkt_ack;
1556 l_ptr->max_pkt_probes = 0;
1557 }
1558
1559 max_pkt_ack = 0;
1560 if (msg_probe(msg)) {
1561 l_ptr->stats.recv_probes++;
1562 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1563 max_pkt_ack = msg_size(msg);
1564 }
1565
1566 /* Protocol message before retransmits, reduce loss risk */
1567 if (l_ptr->owner->bclink.recv_permitted)
1568 tipc_bclink_update_link_state(l_ptr->owner,
1569 msg_last_bcast(msg));
1570
1571 if (rec_gap || (msg_probe(msg))) {
1572 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1573 0, max_pkt_ack);
1574 }
1575 if (msg_seq_gap(msg)) {
1576 l_ptr->stats.recv_nacks++;
1577 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
1578 msg_seq_gap(msg));
1579 }
1580 break;
1581 }
1582 exit:
1583 kfree_skb(buf);
1584 }
1585
1586
1587 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1588 * a different bearer. Owner node is locked.
1589 */
1590 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1591 struct tipc_msg *tunnel_hdr,
1592 struct tipc_msg *msg,
1593 u32 selector)
1594 {
1595 struct tipc_link *tunnel;
1596 struct sk_buff *skb;
1597 u32 length = msg_size(msg);
1598
1599 tunnel = l_ptr->owner->active_links[selector & 1];
1600 if (!tipc_link_is_up(tunnel)) {
1601 pr_warn("%stunnel link no longer available\n", link_co_err);
1602 return;
1603 }
1604 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1605 skb = tipc_buf_acquire(length + INT_H_SIZE);
1606 if (!skb) {
1607 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1608 return;
1609 }
1610 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1611 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1612 __tipc_link_xmit_skb(tunnel, skb);
1613 }
1614
1615
1616 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1617 * link is still active. We can do failover. Tunnel the failing link's
1618 * whole send queue via the remaining link. This way, we don't lose
1619 * any packets, and sequence order is preserved for subsequent traffic
1620 * sent over the remaining link. Owner node is locked.
1621 */
1622 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1623 {
1624 u32 msgcount = skb_queue_len(&l_ptr->outqueue);
1625 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1626 struct tipc_msg tunnel_hdr;
1627 struct sk_buff *skb;
1628 int split_bundles;
1629
1630 if (!tunnel)
1631 return;
1632
1633 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1634 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1635 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1636 msg_set_msgcnt(&tunnel_hdr, msgcount);
1637
1638 if (skb_queue_empty(&l_ptr->outqueue)) {
1639 skb = tipc_buf_acquire(INT_H_SIZE);
1640 if (skb) {
1641 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1642 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1643 __tipc_link_xmit_skb(tunnel, skb);
1644 } else {
1645 pr_warn("%sunable to send changeover msg\n",
1646 link_co_err);
1647 }
1648 return;
1649 }
1650
1651 split_bundles = (l_ptr->owner->active_links[0] !=
1652 l_ptr->owner->active_links[1]);
1653
1654 skb_queue_walk(&l_ptr->outqueue, skb) {
1655 struct tipc_msg *msg = buf_msg(skb);
1656
1657 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1658 struct tipc_msg *m = msg_get_wrapped(msg);
1659 unchar *pos = (unchar *)m;
1660
1661 msgcount = msg_msgcnt(msg);
1662 while (msgcount--) {
1663 msg_set_seqno(m, msg_seqno(msg));
1664 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1665 msg_link_selector(m));
1666 pos += align(msg_size(m));
1667 m = (struct tipc_msg *)pos;
1668 }
1669 } else {
1670 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1671 msg_link_selector(msg));
1672 }
1673 }
1674 }
1675
1676 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1677 * duplicate of the first link's send queue via the new link. This way, we
1678 * are guaranteed that currently queued packets from a socket are delivered
1679 * before future traffic from the same socket, even if this is using the
1680 * new link. The last arriving copy of each duplicate packet is dropped at
1681 * the receiving end by the regular protocol check, so packet cardinality
1682 * and sequence order is preserved per sender/receiver socket pair.
1683 * Owner node is locked.
1684 */
1685 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1686 struct tipc_link *tunnel)
1687 {
1688 struct sk_buff *skb;
1689 struct tipc_msg tunnel_hdr;
1690
1691 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1692 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1693 msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
1694 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1695 skb_queue_walk(&l_ptr->outqueue, skb) {
1696 struct sk_buff *outskb;
1697 struct tipc_msg *msg = buf_msg(skb);
1698 u32 length = msg_size(msg);
1699
1700 if (msg_user(msg) == MSG_BUNDLER)
1701 msg_set_type(msg, CLOSED_MSG);
1702 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
1703 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1704 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
1705 outskb = tipc_buf_acquire(length + INT_H_SIZE);
1706 if (outskb == NULL) {
1707 pr_warn("%sunable to send duplicate msg\n",
1708 link_co_err);
1709 return;
1710 }
1711 skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
1712 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
1713 length);
1714 __tipc_link_xmit_skb(tunnel, outskb);
1715 if (!tipc_link_is_up(l_ptr))
1716 return;
1717 }
1718 }
1719
1720 /**
1721 * buf_extract - extracts embedded TIPC message from another message
1722 * @skb: encapsulating message buffer
1723 * @from_pos: offset to extract from
1724 *
1725 * Returns a new message buffer containing an embedded message. The
1726 * encapsulating buffer is left unchanged.
1727 */
1728 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
1729 {
1730 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
1731 u32 size = msg_size(msg);
1732 struct sk_buff *eb;
1733
1734 eb = tipc_buf_acquire(size);
1735 if (eb)
1736 skb_copy_to_linear_data(eb, msg, size);
1737 return eb;
1738 }
1739
1740 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1741 * Owner node is locked.
1742 */
1743 static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
1744 struct sk_buff *t_buf)
1745 {
1746 struct sk_buff *buf;
1747
1748 if (!tipc_link_is_up(l_ptr))
1749 return;
1750
1751 buf = buf_extract(t_buf, INT_H_SIZE);
1752 if (buf == NULL) {
1753 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1754 return;
1755 }
1756
1757 /* Add buffer to deferred queue, if applicable: */
1758 link_handle_out_of_seq_msg(l_ptr, buf);
1759 }
1760
1761 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1762 * Owner node is locked.
1763 */
1764 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1765 struct sk_buff *t_buf)
1766 {
1767 struct tipc_msg *t_msg = buf_msg(t_buf);
1768 struct sk_buff *buf = NULL;
1769 struct tipc_msg *msg;
1770
1771 if (tipc_link_is_up(l_ptr))
1772 tipc_link_reset(l_ptr);
1773
1774 /* First failover packet? */
1775 if (l_ptr->exp_msg_count == START_CHANGEOVER)
1776 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1777
1778 /* Should there be an inner packet? */
1779 if (l_ptr->exp_msg_count) {
1780 l_ptr->exp_msg_count--;
1781 buf = buf_extract(t_buf, INT_H_SIZE);
1782 if (buf == NULL) {
1783 pr_warn("%sno inner failover pkt\n", link_co_err);
1784 goto exit;
1785 }
1786 msg = buf_msg(buf);
1787
1788 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1789 kfree_skb(buf);
1790 buf = NULL;
1791 goto exit;
1792 }
1793 if (msg_user(msg) == MSG_FRAGMENTER) {
1794 l_ptr->stats.recv_fragments++;
1795 tipc_buf_append(&l_ptr->reasm_buf, &buf);
1796 }
1797 }
1798 exit:
1799 if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1800 tipc_link_delete(l_ptr);
1801 return buf;
1802 }
1803
1804 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1805 * via other link as result of a failover (ORIGINAL_MSG) or
1806 * a new active link (DUPLICATE_MSG). Failover packets are
1807 * returned to the active link for delivery upwards.
1808 * Owner node is locked.
1809 */
1810 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1811 struct sk_buff **buf)
1812 {
1813 struct sk_buff *t_buf = *buf;
1814 struct tipc_link *l_ptr;
1815 struct tipc_msg *t_msg = buf_msg(t_buf);
1816 u32 bearer_id = msg_bearer_id(t_msg);
1817
1818 *buf = NULL;
1819
1820 if (bearer_id >= MAX_BEARERS)
1821 goto exit;
1822
1823 l_ptr = n_ptr->links[bearer_id];
1824 if (!l_ptr)
1825 goto exit;
1826
1827 if (msg_type(t_msg) == DUPLICATE_MSG)
1828 tipc_link_dup_rcv(l_ptr, t_buf);
1829 else if (msg_type(t_msg) == ORIGINAL_MSG)
1830 *buf = tipc_link_failover_rcv(l_ptr, t_buf);
1831 else
1832 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1833 exit:
1834 kfree_skb(t_buf);
1835 return *buf != NULL;
1836 }
1837
1838 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1839 {
1840 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1841
1842 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1843 return;
1844
1845 l_ptr->tolerance = tol;
1846 l_ptr->cont_intv = msecs_to_jiffies(intv);
1847 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1848 }
1849
1850 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
1851 {
1852 /* Data messages from this node, inclusive FIRST_FRAGM */
1853 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
1854 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
1855 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
1856 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
1857 /* Transiting data messages,inclusive FIRST_FRAGM */
1858 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
1859 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
1860 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
1861 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
1862 l_ptr->queue_limit[CONN_MANAGER] = 1200;
1863 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
1864 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
1865 /* FRAGMENT and LAST_FRAGMENT packets */
1866 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
1867 }
1868
1869 /* tipc_link_find_owner - locate owner node of link by link's name
1870 * @net: the applicable net namespace
1871 * @name: pointer to link name string
1872 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1873 *
1874 * Returns pointer to node owning the link, or 0 if no matching link is found.
1875 */
1876 static struct tipc_node *tipc_link_find_owner(struct net *net,
1877 const char *link_name,
1878 unsigned int *bearer_id)
1879 {
1880 struct tipc_net *tn = net_generic(net, tipc_net_id);
1881 struct tipc_link *l_ptr;
1882 struct tipc_node *n_ptr;
1883 struct tipc_node *found_node = NULL;
1884 int i;
1885
1886 *bearer_id = 0;
1887 rcu_read_lock();
1888 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1889 tipc_node_lock(n_ptr);
1890 for (i = 0; i < MAX_BEARERS; i++) {
1891 l_ptr = n_ptr->links[i];
1892 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1893 *bearer_id = i;
1894 found_node = n_ptr;
1895 break;
1896 }
1897 }
1898 tipc_node_unlock(n_ptr);
1899 if (found_node)
1900 break;
1901 }
1902 rcu_read_unlock();
1903
1904 return found_node;
1905 }
1906
1907 /**
1908 * link_reset_statistics - reset link statistics
1909 * @l_ptr: pointer to link
1910 */
1911 static void link_reset_statistics(struct tipc_link *l_ptr)
1912 {
1913 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1914 l_ptr->stats.sent_info = l_ptr->next_out_no;
1915 l_ptr->stats.recv_info = l_ptr->next_in_no;
1916 }
1917
1918 static void link_print(struct tipc_link *l_ptr, const char *str)
1919 {
1920 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1921 struct tipc_bearer *b_ptr;
1922
1923 rcu_read_lock();
1924 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1925 if (b_ptr)
1926 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1927 rcu_read_unlock();
1928
1929 if (link_working_unknown(l_ptr))
1930 pr_cont(":WU\n");
1931 else if (link_reset_reset(l_ptr))
1932 pr_cont(":RR\n");
1933 else if (link_reset_unknown(l_ptr))
1934 pr_cont(":RU\n");
1935 else if (link_working_working(l_ptr))
1936 pr_cont(":WW\n");
1937 else
1938 pr_cont("\n");
1939 }
1940
1941 /* Parse and validate nested (link) properties valid for media, bearer and link
1942 */
1943 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1944 {
1945 int err;
1946
1947 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1948 tipc_nl_prop_policy);
1949 if (err)
1950 return err;
1951
1952 if (props[TIPC_NLA_PROP_PRIO]) {
1953 u32 prio;
1954
1955 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1956 if (prio > TIPC_MAX_LINK_PRI)
1957 return -EINVAL;
1958 }
1959
1960 if (props[TIPC_NLA_PROP_TOL]) {
1961 u32 tol;
1962
1963 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1964 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1965 return -EINVAL;
1966 }
1967
1968 if (props[TIPC_NLA_PROP_WIN]) {
1969 u32 win;
1970
1971 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1972 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1973 return -EINVAL;
1974 }
1975
1976 return 0;
1977 }
1978
1979 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1980 {
1981 int err;
1982 int res = 0;
1983 int bearer_id;
1984 char *name;
1985 struct tipc_link *link;
1986 struct tipc_node *node;
1987 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1988 struct net *net = sock_net(skb->sk);
1989
1990 if (!info->attrs[TIPC_NLA_LINK])
1991 return -EINVAL;
1992
1993 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1994 info->attrs[TIPC_NLA_LINK],
1995 tipc_nl_link_policy);
1996 if (err)
1997 return err;
1998
1999 if (!attrs[TIPC_NLA_LINK_NAME])
2000 return -EINVAL;
2001
2002 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2003
2004 node = tipc_link_find_owner(net, name, &bearer_id);
2005 if (!node)
2006 return -EINVAL;
2007
2008 tipc_node_lock(node);
2009
2010 link = node->links[bearer_id];
2011 if (!link) {
2012 res = -EINVAL;
2013 goto out;
2014 }
2015
2016 if (attrs[TIPC_NLA_LINK_PROP]) {
2017 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2018
2019 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2020 props);
2021 if (err) {
2022 res = err;
2023 goto out;
2024 }
2025
2026 if (props[TIPC_NLA_PROP_TOL]) {
2027 u32 tol;
2028
2029 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2030 link_set_supervision_props(link, tol);
2031 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
2032 }
2033 if (props[TIPC_NLA_PROP_PRIO]) {
2034 u32 prio;
2035
2036 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2037 link->priority = prio;
2038 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
2039 }
2040 if (props[TIPC_NLA_PROP_WIN]) {
2041 u32 win;
2042
2043 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2044 tipc_link_set_queue_limits(link, win);
2045 }
2046 }
2047
2048 out:
2049 tipc_node_unlock(node);
2050
2051 return res;
2052 }
2053
2054 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2055 {
2056 int i;
2057 struct nlattr *stats;
2058
2059 struct nla_map {
2060 u32 key;
2061 u32 val;
2062 };
2063
2064 struct nla_map map[] = {
2065 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
2066 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2067 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2068 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2069 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2070 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
2071 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2072 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2073 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2074 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2075 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2076 s->msg_length_counts : 1},
2077 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2078 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2079 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2080 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2081 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2082 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2083 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2084 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2085 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2086 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2087 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2088 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2089 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2090 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2091 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2092 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2093 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2094 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2095 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2096 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2097 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2098 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2099 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2100 };
2101
2102 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2103 if (!stats)
2104 return -EMSGSIZE;
2105
2106 for (i = 0; i < ARRAY_SIZE(map); i++)
2107 if (nla_put_u32(skb, map[i].key, map[i].val))
2108 goto msg_full;
2109
2110 nla_nest_end(skb, stats);
2111
2112 return 0;
2113 msg_full:
2114 nla_nest_cancel(skb, stats);
2115
2116 return -EMSGSIZE;
2117 }
2118
2119 /* Caller should hold appropriate locks to protect the link */
2120 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2121 struct tipc_link *link)
2122 {
2123 int err;
2124 void *hdr;
2125 struct nlattr *attrs;
2126 struct nlattr *prop;
2127 struct tipc_net *tn = net_generic(net, tipc_net_id);
2128
2129 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2130 NLM_F_MULTI, TIPC_NL_LINK_GET);
2131 if (!hdr)
2132 return -EMSGSIZE;
2133
2134 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2135 if (!attrs)
2136 goto msg_full;
2137
2138 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2139 goto attr_msg_full;
2140 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2141 tipc_cluster_mask(tn->own_addr)))
2142 goto attr_msg_full;
2143 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2144 goto attr_msg_full;
2145 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2146 goto attr_msg_full;
2147 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2148 goto attr_msg_full;
2149
2150 if (tipc_link_is_up(link))
2151 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2152 goto attr_msg_full;
2153 if (tipc_link_is_active(link))
2154 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2155 goto attr_msg_full;
2156
2157 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2158 if (!prop)
2159 goto attr_msg_full;
2160 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2161 goto prop_msg_full;
2162 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2163 goto prop_msg_full;
2164 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2165 link->queue_limit[TIPC_LOW_IMPORTANCE]))
2166 goto prop_msg_full;
2167 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2168 goto prop_msg_full;
2169 nla_nest_end(msg->skb, prop);
2170
2171 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2172 if (err)
2173 goto attr_msg_full;
2174
2175 nla_nest_end(msg->skb, attrs);
2176 genlmsg_end(msg->skb, hdr);
2177
2178 return 0;
2179
2180 prop_msg_full:
2181 nla_nest_cancel(msg->skb, prop);
2182 attr_msg_full:
2183 nla_nest_cancel(msg->skb, attrs);
2184 msg_full:
2185 genlmsg_cancel(msg->skb, hdr);
2186
2187 return -EMSGSIZE;
2188 }
2189
2190 /* Caller should hold node lock */
2191 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2192 struct tipc_node *node, u32 *prev_link)
2193 {
2194 u32 i;
2195 int err;
2196
2197 for (i = *prev_link; i < MAX_BEARERS; i++) {
2198 *prev_link = i;
2199
2200 if (!node->links[i])
2201 continue;
2202
2203 err = __tipc_nl_add_link(net, msg, node->links[i]);
2204 if (err)
2205 return err;
2206 }
2207 *prev_link = 0;
2208
2209 return 0;
2210 }
2211
2212 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2213 {
2214 struct net *net = sock_net(skb->sk);
2215 struct tipc_net *tn = net_generic(net, tipc_net_id);
2216 struct tipc_node *node;
2217 struct tipc_nl_msg msg;
2218 u32 prev_node = cb->args[0];
2219 u32 prev_link = cb->args[1];
2220 int done = cb->args[2];
2221 int err;
2222
2223 if (done)
2224 return 0;
2225
2226 msg.skb = skb;
2227 msg.portid = NETLINK_CB(cb->skb).portid;
2228 msg.seq = cb->nlh->nlmsg_seq;
2229
2230 rcu_read_lock();
2231
2232 if (prev_node) {
2233 node = tipc_node_find(net, prev_node);
2234 if (!node) {
2235 /* We never set seq or call nl_dump_check_consistent()
2236 * this means that setting prev_seq here will cause the
2237 * consistence check to fail in the netlink callback
2238 * handler. Resulting in the last NLMSG_DONE message
2239 * having the NLM_F_DUMP_INTR flag set.
2240 */
2241 cb->prev_seq = 1;
2242 goto out;
2243 }
2244
2245 list_for_each_entry_continue_rcu(node, &tn->node_list,
2246 list) {
2247 tipc_node_lock(node);
2248 err = __tipc_nl_add_node_links(net, &msg, node,
2249 &prev_link);
2250 tipc_node_unlock(node);
2251 if (err)
2252 goto out;
2253
2254 prev_node = node->addr;
2255 }
2256 } else {
2257 err = tipc_nl_add_bc_link(net, &msg);
2258 if (err)
2259 goto out;
2260
2261 list_for_each_entry_rcu(node, &tn->node_list, list) {
2262 tipc_node_lock(node);
2263 err = __tipc_nl_add_node_links(net, &msg, node,
2264 &prev_link);
2265 tipc_node_unlock(node);
2266 if (err)
2267 goto out;
2268
2269 prev_node = node->addr;
2270 }
2271 }
2272 done = 1;
2273 out:
2274 rcu_read_unlock();
2275
2276 cb->args[0] = prev_node;
2277 cb->args[1] = prev_link;
2278 cb->args[2] = done;
2279
2280 return skb->len;
2281 }
2282
2283 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2284 {
2285 struct net *net = genl_info_net(info);
2286 struct sk_buff *ans_skb;
2287 struct tipc_nl_msg msg;
2288 struct tipc_link *link;
2289 struct tipc_node *node;
2290 char *name;
2291 int bearer_id;
2292 int err;
2293
2294 if (!info->attrs[TIPC_NLA_LINK_NAME])
2295 return -EINVAL;
2296
2297 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2298 node = tipc_link_find_owner(net, name, &bearer_id);
2299 if (!node)
2300 return -EINVAL;
2301
2302 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2303 if (!ans_skb)
2304 return -ENOMEM;
2305
2306 msg.skb = ans_skb;
2307 msg.portid = info->snd_portid;
2308 msg.seq = info->snd_seq;
2309
2310 tipc_node_lock(node);
2311 link = node->links[bearer_id];
2312 if (!link) {
2313 err = -EINVAL;
2314 goto err_out;
2315 }
2316
2317 err = __tipc_nl_add_link(net, &msg, link);
2318 if (err)
2319 goto err_out;
2320
2321 tipc_node_unlock(node);
2322
2323 return genlmsg_reply(ans_skb, info);
2324
2325 err_out:
2326 tipc_node_unlock(node);
2327 nlmsg_free(ans_skb);
2328
2329 return err;
2330 }
2331
2332 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2333 {
2334 int err;
2335 char *link_name;
2336 unsigned int bearer_id;
2337 struct tipc_link *link;
2338 struct tipc_node *node;
2339 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2340 struct net *net = sock_net(skb->sk);
2341
2342 if (!info->attrs[TIPC_NLA_LINK])
2343 return -EINVAL;
2344
2345 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2346 info->attrs[TIPC_NLA_LINK],
2347 tipc_nl_link_policy);
2348 if (err)
2349 return err;
2350
2351 if (!attrs[TIPC_NLA_LINK_NAME])
2352 return -EINVAL;
2353
2354 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2355
2356 if (strcmp(link_name, tipc_bclink_name) == 0) {
2357 err = tipc_bclink_reset_stats(net);
2358 if (err)
2359 return err;
2360 return 0;
2361 }
2362
2363 node = tipc_link_find_owner(net, link_name, &bearer_id);
2364 if (!node)
2365 return -EINVAL;
2366
2367 tipc_node_lock(node);
2368
2369 link = node->links[bearer_id];
2370 if (!link) {
2371 tipc_node_unlock(node);
2372 return -EINVAL;
2373 }
2374
2375 link_reset_statistics(link);
2376
2377 tipc_node_unlock(node);
2378
2379 return 0;
2380 }
This page took 1.094914 seconds and 5 git commands to generate.