4738cb1bf7c0cd130104e1eb3021b2d35483bc9f
[deliverable/linux.git] / net / tipc / link.c
1 /*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2007, 2012-2014, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "link.h"
39 #include "bcast.h"
40 #include "socket.h"
41 #include "name_distr.h"
42 #include "discover.h"
43 #include "config.h"
44 #include "netlink.h"
45
46 #include <linux/pkt_sched.h>
47
48 /*
49 * Error message prefixes
50 */
51 static const char *link_co_err = "Link changeover error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char *link_unk_evt = "Unknown link event ";
54
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
58 .type = NLA_STRING,
59 .len = TIPC_MAX_LINK_NAME
60 },
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
69 };
70
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
77 };
78
79 /*
80 * Out-of-range value for link session numbers
81 */
82 #define INVALID_SESSION 0x10000
83
84 /*
85 * Link state events:
86 */
87 #define STARTING_EVT 856384768 /* link processing trigger */
88 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
89 #define TIMEOUT_EVT 560817u /* link timer expired */
90
91 /*
92 * The following two 'message types' is really just implementation
93 * data conveniently stored in the message header.
94 * They must not be considered part of the protocol
95 */
96 #define OPEN_MSG 0
97 #define CLOSED_MSG 1
98
99 /*
100 * State value stored in 'exp_msg_count'
101 */
102 #define START_CHANGEOVER 100000u
103
104 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
105 struct sk_buff *buf);
106 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
107 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
108 struct sk_buff **buf);
109 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
110 static void link_state_event(struct tipc_link *l_ptr, u32 event);
111 static void link_reset_statistics(struct tipc_link *l_ptr);
112 static void link_print(struct tipc_link *l_ptr, const char *str);
113 static void tipc_link_sync_xmit(struct tipc_link *l);
114 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
115 static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
116 static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
117
118 /*
119 * Simple link routines
120 */
121 static unsigned int align(unsigned int i)
122 {
123 return (i + 3) & ~3u;
124 }
125
126 static void link_init_max_pkt(struct tipc_link *l_ptr)
127 {
128 struct tipc_bearer *b_ptr;
129 u32 max_pkt;
130
131 rcu_read_lock();
132 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
133 if (!b_ptr) {
134 rcu_read_unlock();
135 return;
136 }
137 max_pkt = (b_ptr->mtu & ~3);
138 rcu_read_unlock();
139
140 if (max_pkt > MAX_MSG_SIZE)
141 max_pkt = MAX_MSG_SIZE;
142
143 l_ptr->max_pkt_target = max_pkt;
144 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
145 l_ptr->max_pkt = l_ptr->max_pkt_target;
146 else
147 l_ptr->max_pkt = MAX_PKT_DEFAULT;
148
149 l_ptr->max_pkt_probes = 0;
150 }
151
152 static u32 link_next_sent(struct tipc_link *l_ptr)
153 {
154 if (l_ptr->next_out)
155 return buf_seqno(l_ptr->next_out);
156 return mod(l_ptr->next_out_no);
157 }
158
159 static u32 link_last_sent(struct tipc_link *l_ptr)
160 {
161 return mod(link_next_sent(l_ptr) - 1);
162 }
163
164 /*
165 * Simple non-static link routines (i.e. referenced outside this file)
166 */
167 int tipc_link_is_up(struct tipc_link *l_ptr)
168 {
169 if (!l_ptr)
170 return 0;
171 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
172 }
173
174 int tipc_link_is_active(struct tipc_link *l_ptr)
175 {
176 return (l_ptr->owner->active_links[0] == l_ptr) ||
177 (l_ptr->owner->active_links[1] == l_ptr);
178 }
179
180 /**
181 * link_timeout - handle expiration of link timer
182 * @l_ptr: pointer to link
183 */
184 static void link_timeout(struct tipc_link *l_ptr)
185 {
186 tipc_node_lock(l_ptr->owner);
187
188 /* update counters used in statistical profiling of send traffic */
189 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
190 l_ptr->stats.queue_sz_counts++;
191
192 if (l_ptr->first_out) {
193 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
194 u32 length = msg_size(msg);
195
196 if ((msg_user(msg) == MSG_FRAGMENTER) &&
197 (msg_type(msg) == FIRST_FRAGMENT)) {
198 length = msg_size(msg_get_wrapped(msg));
199 }
200 if (length) {
201 l_ptr->stats.msg_lengths_total += length;
202 l_ptr->stats.msg_length_counts++;
203 if (length <= 64)
204 l_ptr->stats.msg_length_profile[0]++;
205 else if (length <= 256)
206 l_ptr->stats.msg_length_profile[1]++;
207 else if (length <= 1024)
208 l_ptr->stats.msg_length_profile[2]++;
209 else if (length <= 4096)
210 l_ptr->stats.msg_length_profile[3]++;
211 else if (length <= 16384)
212 l_ptr->stats.msg_length_profile[4]++;
213 else if (length <= 32768)
214 l_ptr->stats.msg_length_profile[5]++;
215 else
216 l_ptr->stats.msg_length_profile[6]++;
217 }
218 }
219
220 /* do all other link processing performed on a periodic basis */
221
222 link_state_event(l_ptr, TIMEOUT_EVT);
223
224 if (l_ptr->next_out)
225 tipc_link_push_queue(l_ptr);
226
227 tipc_node_unlock(l_ptr->owner);
228 }
229
230 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
231 {
232 k_start_timer(&l_ptr->timer, time);
233 }
234
235 /**
236 * tipc_link_create - create a new link
237 * @n_ptr: pointer to associated node
238 * @b_ptr: pointer to associated bearer
239 * @media_addr: media address to use when sending messages over link
240 *
241 * Returns pointer to link.
242 */
243 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
244 struct tipc_bearer *b_ptr,
245 const struct tipc_media_addr *media_addr)
246 {
247 struct tipc_link *l_ptr;
248 struct tipc_msg *msg;
249 char *if_name;
250 char addr_string[16];
251 u32 peer = n_ptr->addr;
252
253 if (n_ptr->link_cnt >= MAX_BEARERS) {
254 tipc_addr_string_fill(addr_string, n_ptr->addr);
255 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
256 n_ptr->link_cnt, addr_string, MAX_BEARERS);
257 return NULL;
258 }
259
260 if (n_ptr->links[b_ptr->identity]) {
261 tipc_addr_string_fill(addr_string, n_ptr->addr);
262 pr_err("Attempt to establish second link on <%s> to %s\n",
263 b_ptr->name, addr_string);
264 return NULL;
265 }
266
267 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
268 if (!l_ptr) {
269 pr_warn("Link creation failed, no memory\n");
270 return NULL;
271 }
272
273 l_ptr->addr = peer;
274 if_name = strchr(b_ptr->name, ':') + 1;
275 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
276 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
277 tipc_node(tipc_own_addr),
278 if_name,
279 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
280 /* note: peer i/f name is updated by reset/activate message */
281 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
282 l_ptr->owner = n_ptr;
283 l_ptr->checkpoint = 1;
284 l_ptr->peer_session = INVALID_SESSION;
285 l_ptr->bearer_id = b_ptr->identity;
286 link_set_supervision_props(l_ptr, b_ptr->tolerance);
287 l_ptr->state = RESET_UNKNOWN;
288
289 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
290 msg = l_ptr->pmsg;
291 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
292 msg_set_size(msg, sizeof(l_ptr->proto_msg));
293 msg_set_session(msg, (tipc_random & 0xffff));
294 msg_set_bearer_id(msg, b_ptr->identity);
295 strcpy((char *)msg_data(msg), if_name);
296
297 l_ptr->priority = b_ptr->priority;
298 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
299
300 l_ptr->net_plane = b_ptr->net_plane;
301 link_init_max_pkt(l_ptr);
302
303 l_ptr->next_out_no = 1;
304 __skb_queue_head_init(&l_ptr->waiting_sks);
305
306 link_reset_statistics(l_ptr);
307
308 tipc_node_attach_link(n_ptr, l_ptr);
309
310 k_init_timer(&l_ptr->timer, (Handler)link_timeout,
311 (unsigned long)l_ptr);
312
313 link_state_event(l_ptr, STARTING_EVT);
314
315 return l_ptr;
316 }
317
318 void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
319 {
320 struct tipc_link *l_ptr;
321 struct tipc_node *n_ptr;
322
323 rcu_read_lock();
324 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
325 tipc_node_lock(n_ptr);
326 l_ptr = n_ptr->links[bearer_id];
327 if (l_ptr) {
328 tipc_link_reset(l_ptr);
329 if (shutting_down || !tipc_node_is_up(n_ptr)) {
330 tipc_node_detach_link(l_ptr->owner, l_ptr);
331 tipc_link_reset_fragments(l_ptr);
332 tipc_node_unlock(n_ptr);
333
334 /* Nobody else can access this link now: */
335 del_timer_sync(&l_ptr->timer);
336 kfree(l_ptr);
337 } else {
338 /* Detach/delete when failover is finished: */
339 l_ptr->flags |= LINK_STOPPED;
340 tipc_node_unlock(n_ptr);
341 del_timer_sync(&l_ptr->timer);
342 }
343 continue;
344 }
345 tipc_node_unlock(n_ptr);
346 }
347 rcu_read_unlock();
348 }
349
350 /**
351 * link_schedule_user - schedule user for wakeup after congestion
352 * @link: congested link
353 * @oport: sending port
354 * @chain_sz: size of buffer chain that was attempted sent
355 * @imp: importance of message attempted sent
356 * Create pseudo msg to send back to user when congestion abates
357 */
358 static bool link_schedule_user(struct tipc_link *link, u32 oport,
359 uint chain_sz, uint imp)
360 {
361 struct sk_buff *buf;
362
363 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
364 tipc_own_addr, oport, 0, 0);
365 if (!buf)
366 return false;
367 TIPC_SKB_CB(buf)->chain_sz = chain_sz;
368 TIPC_SKB_CB(buf)->chain_imp = imp;
369 __skb_queue_tail(&link->waiting_sks, buf);
370 link->stats.link_congs++;
371 return true;
372 }
373
374 /**
375 * link_prepare_wakeup - prepare users for wakeup after congestion
376 * @link: congested link
377 * Move a number of waiting users, as permitted by available space in
378 * the send queue, from link wait queue to node wait queue for wakeup
379 */
380 static void link_prepare_wakeup(struct tipc_link *link)
381 {
382 struct sk_buff_head *wq = &link->waiting_sks;
383 struct sk_buff *buf;
384 uint pend_qsz = link->out_queue_size;
385
386 for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) {
387 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp])
388 break;
389 pend_qsz += TIPC_SKB_CB(buf)->chain_sz;
390 __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq));
391 }
392 }
393
394 /**
395 * link_release_outqueue - purge link's outbound message queue
396 * @l_ptr: pointer to link
397 */
398 static void link_release_outqueue(struct tipc_link *l_ptr)
399 {
400 kfree_skb_list(l_ptr->first_out);
401 l_ptr->first_out = NULL;
402 l_ptr->out_queue_size = 0;
403 }
404
405 /**
406 * tipc_link_reset_fragments - purge link's inbound message fragments queue
407 * @l_ptr: pointer to link
408 */
409 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
410 {
411 kfree_skb(l_ptr->reasm_buf);
412 l_ptr->reasm_buf = NULL;
413 }
414
415 /**
416 * tipc_link_purge_queues - purge all pkt queues associated with link
417 * @l_ptr: pointer to link
418 */
419 void tipc_link_purge_queues(struct tipc_link *l_ptr)
420 {
421 kfree_skb_list(l_ptr->oldest_deferred_in);
422 kfree_skb_list(l_ptr->first_out);
423 tipc_link_reset_fragments(l_ptr);
424 kfree_skb(l_ptr->proto_msg_queue);
425 l_ptr->proto_msg_queue = NULL;
426 }
427
428 void tipc_link_reset(struct tipc_link *l_ptr)
429 {
430 u32 prev_state = l_ptr->state;
431 u32 checkpoint = l_ptr->next_in_no;
432 int was_active_link = tipc_link_is_active(l_ptr);
433 struct tipc_node *owner = l_ptr->owner;
434
435 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
436
437 /* Link is down, accept any session */
438 l_ptr->peer_session = INVALID_SESSION;
439
440 /* Prepare for max packet size negotiation */
441 link_init_max_pkt(l_ptr);
442
443 l_ptr->state = RESET_UNKNOWN;
444
445 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
446 return;
447
448 tipc_node_link_down(l_ptr->owner, l_ptr);
449 tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
450
451 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
452 l_ptr->reset_checkpoint = checkpoint;
453 l_ptr->exp_msg_count = START_CHANGEOVER;
454 }
455
456 /* Clean up all queues: */
457 link_release_outqueue(l_ptr);
458 kfree_skb(l_ptr->proto_msg_queue);
459 l_ptr->proto_msg_queue = NULL;
460 kfree_skb_list(l_ptr->oldest_deferred_in);
461 if (!skb_queue_empty(&l_ptr->waiting_sks)) {
462 skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
463 owner->action_flags |= TIPC_WAKEUP_USERS;
464 }
465 l_ptr->retransm_queue_head = 0;
466 l_ptr->retransm_queue_size = 0;
467 l_ptr->last_out = NULL;
468 l_ptr->first_out = NULL;
469 l_ptr->next_out = NULL;
470 l_ptr->unacked_window = 0;
471 l_ptr->checkpoint = 1;
472 l_ptr->next_out_no = 1;
473 l_ptr->deferred_inqueue_sz = 0;
474 l_ptr->oldest_deferred_in = NULL;
475 l_ptr->newest_deferred_in = NULL;
476 l_ptr->fsm_msg_cnt = 0;
477 l_ptr->stale_count = 0;
478 link_reset_statistics(l_ptr);
479 }
480
481 void tipc_link_reset_list(unsigned int bearer_id)
482 {
483 struct tipc_link *l_ptr;
484 struct tipc_node *n_ptr;
485
486 rcu_read_lock();
487 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
488 tipc_node_lock(n_ptr);
489 l_ptr = n_ptr->links[bearer_id];
490 if (l_ptr)
491 tipc_link_reset(l_ptr);
492 tipc_node_unlock(n_ptr);
493 }
494 rcu_read_unlock();
495 }
496
497 static void link_activate(struct tipc_link *l_ptr)
498 {
499 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
500 tipc_node_link_up(l_ptr->owner, l_ptr);
501 tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
502 }
503
504 /**
505 * link_state_event - link finite state machine
506 * @l_ptr: pointer to link
507 * @event: state machine event to process
508 */
509 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
510 {
511 struct tipc_link *other;
512 u32 cont_intv = l_ptr->continuity_interval;
513
514 if (l_ptr->flags & LINK_STOPPED)
515 return;
516
517 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
518 return; /* Not yet. */
519
520 /* Check whether changeover is going on */
521 if (l_ptr->exp_msg_count) {
522 if (event == TIMEOUT_EVT)
523 link_set_timer(l_ptr, cont_intv);
524 return;
525 }
526
527 switch (l_ptr->state) {
528 case WORKING_WORKING:
529 switch (event) {
530 case TRAFFIC_MSG_EVT:
531 case ACTIVATE_MSG:
532 break;
533 case TIMEOUT_EVT:
534 if (l_ptr->next_in_no != l_ptr->checkpoint) {
535 l_ptr->checkpoint = l_ptr->next_in_no;
536 if (tipc_bclink_acks_missing(l_ptr->owner)) {
537 tipc_link_proto_xmit(l_ptr, STATE_MSG,
538 0, 0, 0, 0, 0);
539 l_ptr->fsm_msg_cnt++;
540 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
541 tipc_link_proto_xmit(l_ptr, STATE_MSG,
542 1, 0, 0, 0, 0);
543 l_ptr->fsm_msg_cnt++;
544 }
545 link_set_timer(l_ptr, cont_intv);
546 break;
547 }
548 l_ptr->state = WORKING_UNKNOWN;
549 l_ptr->fsm_msg_cnt = 0;
550 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
551 l_ptr->fsm_msg_cnt++;
552 link_set_timer(l_ptr, cont_intv / 4);
553 break;
554 case RESET_MSG:
555 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
556 l_ptr->name);
557 tipc_link_reset(l_ptr);
558 l_ptr->state = RESET_RESET;
559 l_ptr->fsm_msg_cnt = 0;
560 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
561 0, 0, 0, 0, 0);
562 l_ptr->fsm_msg_cnt++;
563 link_set_timer(l_ptr, cont_intv);
564 break;
565 default:
566 pr_err("%s%u in WW state\n", link_unk_evt, event);
567 }
568 break;
569 case WORKING_UNKNOWN:
570 switch (event) {
571 case TRAFFIC_MSG_EVT:
572 case ACTIVATE_MSG:
573 l_ptr->state = WORKING_WORKING;
574 l_ptr->fsm_msg_cnt = 0;
575 link_set_timer(l_ptr, cont_intv);
576 break;
577 case RESET_MSG:
578 pr_info("%s<%s>, requested by peer while probing\n",
579 link_rst_msg, l_ptr->name);
580 tipc_link_reset(l_ptr);
581 l_ptr->state = RESET_RESET;
582 l_ptr->fsm_msg_cnt = 0;
583 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
584 0, 0, 0, 0, 0);
585 l_ptr->fsm_msg_cnt++;
586 link_set_timer(l_ptr, cont_intv);
587 break;
588 case TIMEOUT_EVT:
589 if (l_ptr->next_in_no != l_ptr->checkpoint) {
590 l_ptr->state = WORKING_WORKING;
591 l_ptr->fsm_msg_cnt = 0;
592 l_ptr->checkpoint = l_ptr->next_in_no;
593 if (tipc_bclink_acks_missing(l_ptr->owner)) {
594 tipc_link_proto_xmit(l_ptr, STATE_MSG,
595 0, 0, 0, 0, 0);
596 l_ptr->fsm_msg_cnt++;
597 }
598 link_set_timer(l_ptr, cont_intv);
599 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
600 tipc_link_proto_xmit(l_ptr, STATE_MSG,
601 1, 0, 0, 0, 0);
602 l_ptr->fsm_msg_cnt++;
603 link_set_timer(l_ptr, cont_intv / 4);
604 } else { /* Link has failed */
605 pr_warn("%s<%s>, peer not responding\n",
606 link_rst_msg, l_ptr->name);
607 tipc_link_reset(l_ptr);
608 l_ptr->state = RESET_UNKNOWN;
609 l_ptr->fsm_msg_cnt = 0;
610 tipc_link_proto_xmit(l_ptr, RESET_MSG,
611 0, 0, 0, 0, 0);
612 l_ptr->fsm_msg_cnt++;
613 link_set_timer(l_ptr, cont_intv);
614 }
615 break;
616 default:
617 pr_err("%s%u in WU state\n", link_unk_evt, event);
618 }
619 break;
620 case RESET_UNKNOWN:
621 switch (event) {
622 case TRAFFIC_MSG_EVT:
623 break;
624 case ACTIVATE_MSG:
625 other = l_ptr->owner->active_links[0];
626 if (other && link_working_unknown(other))
627 break;
628 l_ptr->state = WORKING_WORKING;
629 l_ptr->fsm_msg_cnt = 0;
630 link_activate(l_ptr);
631 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
632 l_ptr->fsm_msg_cnt++;
633 if (l_ptr->owner->working_links == 1)
634 tipc_link_sync_xmit(l_ptr);
635 link_set_timer(l_ptr, cont_intv);
636 break;
637 case RESET_MSG:
638 l_ptr->state = RESET_RESET;
639 l_ptr->fsm_msg_cnt = 0;
640 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
641 1, 0, 0, 0, 0);
642 l_ptr->fsm_msg_cnt++;
643 link_set_timer(l_ptr, cont_intv);
644 break;
645 case STARTING_EVT:
646 l_ptr->flags |= LINK_STARTED;
647 /* fall through */
648 case TIMEOUT_EVT:
649 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
650 l_ptr->fsm_msg_cnt++;
651 link_set_timer(l_ptr, cont_intv);
652 break;
653 default:
654 pr_err("%s%u in RU state\n", link_unk_evt, event);
655 }
656 break;
657 case RESET_RESET:
658 switch (event) {
659 case TRAFFIC_MSG_EVT:
660 case ACTIVATE_MSG:
661 other = l_ptr->owner->active_links[0];
662 if (other && link_working_unknown(other))
663 break;
664 l_ptr->state = WORKING_WORKING;
665 l_ptr->fsm_msg_cnt = 0;
666 link_activate(l_ptr);
667 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
668 l_ptr->fsm_msg_cnt++;
669 if (l_ptr->owner->working_links == 1)
670 tipc_link_sync_xmit(l_ptr);
671 link_set_timer(l_ptr, cont_intv);
672 break;
673 case RESET_MSG:
674 break;
675 case TIMEOUT_EVT:
676 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
677 0, 0, 0, 0, 0);
678 l_ptr->fsm_msg_cnt++;
679 link_set_timer(l_ptr, cont_intv);
680 break;
681 default:
682 pr_err("%s%u in RR state\n", link_unk_evt, event);
683 }
684 break;
685 default:
686 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
687 }
688 }
689
690 /* tipc_link_cong: determine return value and how to treat the
691 * sent buffer during link congestion.
692 * - For plain, errorless user data messages we keep the buffer and
693 * return -ELINKONG.
694 * - For all other messages we discard the buffer and return -EHOSTUNREACH
695 * - For TIPC internal messages we also reset the link
696 */
697 static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
698 {
699 struct tipc_msg *msg = buf_msg(buf);
700 uint imp = tipc_msg_tot_importance(msg);
701 u32 oport = msg_tot_origport(msg);
702
703 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
704 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
705 tipc_link_reset(link);
706 goto drop;
707 }
708 if (unlikely(msg_errcode(msg)))
709 goto drop;
710 if (unlikely(msg_reroute_cnt(msg)))
711 goto drop;
712 if (TIPC_SKB_CB(buf)->wakeup_pending)
713 return -ELINKCONG;
714 if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp))
715 return -ELINKCONG;
716 drop:
717 kfree_skb_list(buf);
718 return -EHOSTUNREACH;
719 }
720
721 /**
722 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
723 * @link: link to use
724 * @buf: chain of buffers containing message
725 * Consumes the buffer chain, except when returning -ELINKCONG
726 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
727 * user data messages) or -EHOSTUNREACH (all other messages/senders)
728 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
729 * to act on the return value, since they may need to do more send attempts.
730 */
731 int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
732 {
733 struct tipc_msg *msg = buf_msg(buf);
734 uint psz = msg_size(msg);
735 uint qsz = link->out_queue_size;
736 uint sndlim = link->queue_limit[0];
737 uint imp = tipc_msg_tot_importance(msg);
738 uint mtu = link->max_pkt;
739 uint ack = mod(link->next_in_no - 1);
740 uint seqno = link->next_out_no;
741 uint bc_last_in = link->owner->bclink.last_in;
742 struct tipc_media_addr *addr = &link->media_addr;
743 struct sk_buff *next = buf->next;
744
745 /* Match queue limits against msg importance: */
746 if (unlikely(qsz >= link->queue_limit[imp]))
747 return tipc_link_cong(link, buf);
748
749 /* Has valid packet limit been used ? */
750 if (unlikely(psz > mtu)) {
751 kfree_skb_list(buf);
752 return -EMSGSIZE;
753 }
754
755 /* Prepare each packet for sending, and add to outqueue: */
756 while (buf) {
757 next = buf->next;
758 msg = buf_msg(buf);
759 msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
760 msg_set_bcast_ack(msg, bc_last_in);
761
762 if (!link->first_out) {
763 link->first_out = buf;
764 } else if (qsz < sndlim) {
765 link->last_out->next = buf;
766 } else if (tipc_msg_bundle(link->last_out, buf, mtu)) {
767 link->stats.sent_bundled++;
768 buf = next;
769 next = buf->next;
770 continue;
771 } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) {
772 link->stats.sent_bundled++;
773 link->stats.sent_bundles++;
774 link->last_out->next = buf;
775 if (!link->next_out)
776 link->next_out = buf;
777 } else {
778 link->last_out->next = buf;
779 if (!link->next_out)
780 link->next_out = buf;
781 }
782
783 /* Send packet if possible: */
784 if (likely(++qsz <= sndlim)) {
785 tipc_bearer_send(link->bearer_id, buf, addr);
786 link->next_out = next;
787 link->unacked_window = 0;
788 }
789 seqno++;
790 link->last_out = buf;
791 buf = next;
792 }
793 link->next_out_no = seqno;
794 link->out_queue_size = qsz;
795 return 0;
796 }
797
798 /**
799 * tipc_link_xmit() is the general link level function for message sending
800 * @buf: chain of buffers containing message
801 * @dsz: amount of user data to be sent
802 * @dnode: address of destination node
803 * @selector: a number used for deterministic link selection
804 * Consumes the buffer chain, except when returning -ELINKCONG
805 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
806 */
807 int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
808 {
809 struct tipc_link *link = NULL;
810 struct tipc_node *node;
811 int rc = -EHOSTUNREACH;
812
813 node = tipc_node_find(dnode);
814 if (node) {
815 tipc_node_lock(node);
816 link = node->active_links[selector & 1];
817 if (link)
818 rc = __tipc_link_xmit(link, buf);
819 tipc_node_unlock(node);
820 }
821
822 if (link)
823 return rc;
824
825 if (likely(in_own_node(dnode)))
826 return tipc_sk_rcv(buf);
827
828 kfree_skb_list(buf);
829 return rc;
830 }
831
832 /*
833 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
834 *
835 * Give a newly added peer node the sequence number where it should
836 * start receiving and acking broadcast packets.
837 *
838 * Called with node locked
839 */
840 static void tipc_link_sync_xmit(struct tipc_link *link)
841 {
842 struct sk_buff *buf;
843 struct tipc_msg *msg;
844
845 buf = tipc_buf_acquire(INT_H_SIZE);
846 if (!buf)
847 return;
848
849 msg = buf_msg(buf);
850 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
851 msg_set_last_bcast(msg, link->owner->bclink.acked);
852 __tipc_link_xmit(link, buf);
853 }
854
855 /*
856 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
857 * Receive the sequence number where we should start receiving and
858 * acking broadcast packets from a newly added peer node, and open
859 * up for reception of such packets.
860 *
861 * Called with node locked
862 */
863 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
864 {
865 struct tipc_msg *msg = buf_msg(buf);
866
867 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
868 n->bclink.recv_permitted = true;
869 kfree_skb(buf);
870 }
871
872 /*
873 * tipc_link_push_packet: Push one unsent packet to the media
874 */
875 static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
876 {
877 struct sk_buff *buf = l_ptr->first_out;
878 u32 r_q_size = l_ptr->retransm_queue_size;
879 u32 r_q_head = l_ptr->retransm_queue_head;
880
881 /* Step to position where retransmission failed, if any, */
882 /* consider that buffers may have been released in meantime */
883 if (r_q_size && buf) {
884 u32 last = lesser(mod(r_q_head + r_q_size),
885 link_last_sent(l_ptr));
886 u32 first = buf_seqno(buf);
887
888 while (buf && less(first, r_q_head)) {
889 first = mod(first + 1);
890 buf = buf->next;
891 }
892 l_ptr->retransm_queue_head = r_q_head = first;
893 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
894 }
895
896 /* Continue retransmission now, if there is anything: */
897 if (r_q_size && buf) {
898 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
899 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
900 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
901 l_ptr->retransm_queue_head = mod(++r_q_head);
902 l_ptr->retransm_queue_size = --r_q_size;
903 l_ptr->stats.retransmitted++;
904 return 0;
905 }
906
907 /* Send deferred protocol message, if any: */
908 buf = l_ptr->proto_msg_queue;
909 if (buf) {
910 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
911 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
912 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
913 l_ptr->unacked_window = 0;
914 kfree_skb(buf);
915 l_ptr->proto_msg_queue = NULL;
916 return 0;
917 }
918
919 /* Send one deferred data message, if send window not full: */
920 buf = l_ptr->next_out;
921 if (buf) {
922 struct tipc_msg *msg = buf_msg(buf);
923 u32 next = msg_seqno(msg);
924 u32 first = buf_seqno(l_ptr->first_out);
925
926 if (mod(next - first) < l_ptr->queue_limit[0]) {
927 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
928 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
929 tipc_bearer_send(l_ptr->bearer_id, buf,
930 &l_ptr->media_addr);
931 if (msg_user(msg) == MSG_BUNDLER)
932 msg_set_type(msg, BUNDLE_CLOSED);
933 l_ptr->next_out = buf->next;
934 return 0;
935 }
936 }
937 return 1;
938 }
939
940 /*
941 * push_queue(): push out the unsent messages of a link where
942 * congestion has abated. Node is locked
943 */
944 void tipc_link_push_queue(struct tipc_link *l_ptr)
945 {
946 u32 res;
947
948 do {
949 res = tipc_link_push_packet(l_ptr);
950 } while (!res);
951 }
952
953 void tipc_link_reset_all(struct tipc_node *node)
954 {
955 char addr_string[16];
956 u32 i;
957
958 tipc_node_lock(node);
959
960 pr_warn("Resetting all links to %s\n",
961 tipc_addr_string_fill(addr_string, node->addr));
962
963 for (i = 0; i < MAX_BEARERS; i++) {
964 if (node->links[i]) {
965 link_print(node->links[i], "Resetting link\n");
966 tipc_link_reset(node->links[i]);
967 }
968 }
969
970 tipc_node_unlock(node);
971 }
972
973 static void link_retransmit_failure(struct tipc_link *l_ptr,
974 struct sk_buff *buf)
975 {
976 struct tipc_msg *msg = buf_msg(buf);
977
978 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
979
980 if (l_ptr->addr) {
981 /* Handle failure on standard link */
982 link_print(l_ptr, "Resetting link\n");
983 tipc_link_reset(l_ptr);
984
985 } else {
986 /* Handle failure on broadcast link */
987 struct tipc_node *n_ptr;
988 char addr_string[16];
989
990 pr_info("Msg seq number: %u, ", msg_seqno(msg));
991 pr_cont("Outstanding acks: %lu\n",
992 (unsigned long) TIPC_SKB_CB(buf)->handle);
993
994 n_ptr = tipc_bclink_retransmit_to();
995 tipc_node_lock(n_ptr);
996
997 tipc_addr_string_fill(addr_string, n_ptr->addr);
998 pr_info("Broadcast link info for %s\n", addr_string);
999 pr_info("Reception permitted: %d, Acked: %u\n",
1000 n_ptr->bclink.recv_permitted,
1001 n_ptr->bclink.acked);
1002 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1003 n_ptr->bclink.last_in,
1004 n_ptr->bclink.oos_state,
1005 n_ptr->bclink.last_sent);
1006
1007 tipc_node_unlock(n_ptr);
1008
1009 tipc_bclink_set_flags(TIPC_BCLINK_RESET);
1010 l_ptr->stale_count = 0;
1011 }
1012 }
1013
1014 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1015 u32 retransmits)
1016 {
1017 struct tipc_msg *msg;
1018
1019 if (!buf)
1020 return;
1021
1022 msg = buf_msg(buf);
1023
1024 /* Detect repeated retransmit failures */
1025 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1026 if (++l_ptr->stale_count > 100) {
1027 link_retransmit_failure(l_ptr, buf);
1028 return;
1029 }
1030 } else {
1031 l_ptr->last_retransmitted = msg_seqno(msg);
1032 l_ptr->stale_count = 1;
1033 }
1034
1035 while (retransmits && (buf != l_ptr->next_out) && buf) {
1036 msg = buf_msg(buf);
1037 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1038 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1039 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1040 buf = buf->next;
1041 retransmits--;
1042 l_ptr->stats.retransmitted++;
1043 }
1044
1045 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1046 }
1047
1048 /**
1049 * link_insert_deferred_queue - insert deferred messages back into receive chain
1050 */
1051 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1052 struct sk_buff *buf)
1053 {
1054 u32 seq_no;
1055
1056 if (l_ptr->oldest_deferred_in == NULL)
1057 return buf;
1058
1059 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1060 if (seq_no == mod(l_ptr->next_in_no)) {
1061 l_ptr->newest_deferred_in->next = buf;
1062 buf = l_ptr->oldest_deferred_in;
1063 l_ptr->oldest_deferred_in = NULL;
1064 l_ptr->deferred_inqueue_sz = 0;
1065 }
1066 return buf;
1067 }
1068
1069 /**
1070 * link_recv_buf_validate - validate basic format of received message
1071 *
1072 * This routine ensures a TIPC message has an acceptable header, and at least
1073 * as much data as the header indicates it should. The routine also ensures
1074 * that the entire message header is stored in the main fragment of the message
1075 * buffer, to simplify future access to message header fields.
1076 *
1077 * Note: Having extra info present in the message header or data areas is OK.
1078 * TIPC will ignore the excess, under the assumption that it is optional info
1079 * introduced by a later release of the protocol.
1080 */
1081 static int link_recv_buf_validate(struct sk_buff *buf)
1082 {
1083 static u32 min_data_hdr_size[8] = {
1084 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1085 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1086 };
1087
1088 struct tipc_msg *msg;
1089 u32 tipc_hdr[2];
1090 u32 size;
1091 u32 hdr_size;
1092 u32 min_hdr_size;
1093
1094 /* If this packet comes from the defer queue, the skb has already
1095 * been validated
1096 */
1097 if (unlikely(TIPC_SKB_CB(buf)->deferred))
1098 return 1;
1099
1100 if (unlikely(buf->len < MIN_H_SIZE))
1101 return 0;
1102
1103 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1104 if (msg == NULL)
1105 return 0;
1106
1107 if (unlikely(msg_version(msg) != TIPC_VERSION))
1108 return 0;
1109
1110 size = msg_size(msg);
1111 hdr_size = msg_hdr_sz(msg);
1112 min_hdr_size = msg_isdata(msg) ?
1113 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1114
1115 if (unlikely((hdr_size < min_hdr_size) ||
1116 (size < hdr_size) ||
1117 (buf->len < size) ||
1118 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1119 return 0;
1120
1121 return pskb_may_pull(buf, hdr_size);
1122 }
1123
1124 /**
1125 * tipc_rcv - process TIPC packets/messages arriving from off-node
1126 * @head: pointer to message buffer chain
1127 * @b_ptr: pointer to bearer message arrived on
1128 *
1129 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1130 * structure (i.e. cannot be NULL), but bearer can be inactive.
1131 */
1132 void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1133 {
1134 while (head) {
1135 struct tipc_node *n_ptr;
1136 struct tipc_link *l_ptr;
1137 struct sk_buff *crs;
1138 struct sk_buff *buf = head;
1139 struct tipc_msg *msg;
1140 u32 seq_no;
1141 u32 ackd;
1142 u32 released = 0;
1143
1144 head = head->next;
1145 buf->next = NULL;
1146
1147 /* Ensure message is well-formed */
1148 if (unlikely(!link_recv_buf_validate(buf)))
1149 goto discard;
1150
1151 /* Ensure message data is a single contiguous unit */
1152 if (unlikely(skb_linearize(buf)))
1153 goto discard;
1154
1155 /* Handle arrival of a non-unicast link message */
1156 msg = buf_msg(buf);
1157
1158 if (unlikely(msg_non_seq(msg))) {
1159 if (msg_user(msg) == LINK_CONFIG)
1160 tipc_disc_rcv(buf, b_ptr);
1161 else
1162 tipc_bclink_rcv(buf);
1163 continue;
1164 }
1165
1166 /* Discard unicast link messages destined for another node */
1167 if (unlikely(!msg_short(msg) &&
1168 (msg_destnode(msg) != tipc_own_addr)))
1169 goto discard;
1170
1171 /* Locate neighboring node that sent message */
1172 n_ptr = tipc_node_find(msg_prevnode(msg));
1173 if (unlikely(!n_ptr))
1174 goto discard;
1175 tipc_node_lock(n_ptr);
1176
1177 /* Locate unicast link endpoint that should handle message */
1178 l_ptr = n_ptr->links[b_ptr->identity];
1179 if (unlikely(!l_ptr))
1180 goto unlock_discard;
1181
1182 /* Verify that communication with node is currently allowed */
1183 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1184 msg_user(msg) == LINK_PROTOCOL &&
1185 (msg_type(msg) == RESET_MSG ||
1186 msg_type(msg) == ACTIVATE_MSG) &&
1187 !msg_redundant_link(msg))
1188 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1189
1190 if (tipc_node_blocked(n_ptr))
1191 goto unlock_discard;
1192
1193 /* Validate message sequence number info */
1194 seq_no = msg_seqno(msg);
1195 ackd = msg_ack(msg);
1196
1197 /* Release acked messages */
1198 if (n_ptr->bclink.recv_permitted)
1199 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1200
1201 crs = l_ptr->first_out;
1202 while ((crs != l_ptr->next_out) &&
1203 less_eq(buf_seqno(crs), ackd)) {
1204 struct sk_buff *next = crs->next;
1205 kfree_skb(crs);
1206 crs = next;
1207 released++;
1208 }
1209 if (released) {
1210 l_ptr->first_out = crs;
1211 l_ptr->out_queue_size -= released;
1212 }
1213
1214 /* Try sending any messages link endpoint has pending */
1215 if (unlikely(l_ptr->next_out))
1216 tipc_link_push_queue(l_ptr);
1217
1218 if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
1219 link_prepare_wakeup(l_ptr);
1220 l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
1221 }
1222
1223 /* Process the incoming packet */
1224 if (unlikely(!link_working_working(l_ptr))) {
1225 if (msg_user(msg) == LINK_PROTOCOL) {
1226 tipc_link_proto_rcv(l_ptr, buf);
1227 head = link_insert_deferred_queue(l_ptr, head);
1228 tipc_node_unlock(n_ptr);
1229 continue;
1230 }
1231
1232 /* Traffic message. Conditionally activate link */
1233 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1234
1235 if (link_working_working(l_ptr)) {
1236 /* Re-insert buffer in front of queue */
1237 buf->next = head;
1238 head = buf;
1239 tipc_node_unlock(n_ptr);
1240 continue;
1241 }
1242 goto unlock_discard;
1243 }
1244
1245 /* Link is now in state WORKING_WORKING */
1246 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1247 link_handle_out_of_seq_msg(l_ptr, buf);
1248 head = link_insert_deferred_queue(l_ptr, head);
1249 tipc_node_unlock(n_ptr);
1250 continue;
1251 }
1252 l_ptr->next_in_no++;
1253 if (unlikely(l_ptr->oldest_deferred_in))
1254 head = link_insert_deferred_queue(l_ptr, head);
1255
1256 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1257 l_ptr->stats.sent_acks++;
1258 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1259 }
1260
1261 if (tipc_link_prepare_input(l_ptr, &buf)) {
1262 tipc_node_unlock(n_ptr);
1263 continue;
1264 }
1265 tipc_node_unlock(n_ptr);
1266 msg = buf_msg(buf);
1267 if (tipc_link_input(l_ptr, buf) != 0)
1268 goto discard;
1269 continue;
1270 unlock_discard:
1271 tipc_node_unlock(n_ptr);
1272 discard:
1273 kfree_skb(buf);
1274 }
1275 }
1276
1277 /**
1278 * tipc_link_prepare_input - process TIPC link messages
1279 *
1280 * returns nonzero if the message was consumed
1281 *
1282 * Node lock must be held
1283 */
1284 static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
1285 {
1286 struct tipc_node *n;
1287 struct tipc_msg *msg;
1288 int res = -EINVAL;
1289
1290 n = l->owner;
1291 msg = buf_msg(*buf);
1292 switch (msg_user(msg)) {
1293 case CHANGEOVER_PROTOCOL:
1294 if (tipc_link_tunnel_rcv(n, buf))
1295 res = 0;
1296 break;
1297 case MSG_FRAGMENTER:
1298 l->stats.recv_fragments++;
1299 if (tipc_buf_append(&l->reasm_buf, buf)) {
1300 l->stats.recv_fragmented++;
1301 res = 0;
1302 } else if (!l->reasm_buf) {
1303 tipc_link_reset(l);
1304 }
1305 break;
1306 case MSG_BUNDLER:
1307 l->stats.recv_bundles++;
1308 l->stats.recv_bundled += msg_msgcnt(msg);
1309 res = 0;
1310 break;
1311 case NAME_DISTRIBUTOR:
1312 n->bclink.recv_permitted = true;
1313 res = 0;
1314 break;
1315 case BCAST_PROTOCOL:
1316 tipc_link_sync_rcv(n, *buf);
1317 break;
1318 default:
1319 res = 0;
1320 }
1321 return res;
1322 }
1323 /**
1324 * tipc_link_input - Deliver message too higher layers
1325 */
1326 static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
1327 {
1328 struct tipc_msg *msg = buf_msg(buf);
1329 int res = 0;
1330
1331 switch (msg_user(msg)) {
1332 case TIPC_LOW_IMPORTANCE:
1333 case TIPC_MEDIUM_IMPORTANCE:
1334 case TIPC_HIGH_IMPORTANCE:
1335 case TIPC_CRITICAL_IMPORTANCE:
1336 case CONN_MANAGER:
1337 tipc_sk_rcv(buf);
1338 break;
1339 case NAME_DISTRIBUTOR:
1340 tipc_named_rcv(buf);
1341 break;
1342 case MSG_BUNDLER:
1343 tipc_link_bundle_rcv(buf);
1344 break;
1345 default:
1346 res = -EINVAL;
1347 }
1348 return res;
1349 }
1350
1351 /**
1352 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1353 *
1354 * Returns increase in queue length (i.e. 0 or 1)
1355 */
1356 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1357 struct sk_buff *buf)
1358 {
1359 struct sk_buff *queue_buf;
1360 struct sk_buff **prev;
1361 u32 seq_no = buf_seqno(buf);
1362
1363 buf->next = NULL;
1364
1365 /* Empty queue ? */
1366 if (*head == NULL) {
1367 *head = *tail = buf;
1368 return 1;
1369 }
1370
1371 /* Last ? */
1372 if (less(buf_seqno(*tail), seq_no)) {
1373 (*tail)->next = buf;
1374 *tail = buf;
1375 return 1;
1376 }
1377
1378 /* Locate insertion point in queue, then insert; discard if duplicate */
1379 prev = head;
1380 queue_buf = *head;
1381 for (;;) {
1382 u32 curr_seqno = buf_seqno(queue_buf);
1383
1384 if (seq_no == curr_seqno) {
1385 kfree_skb(buf);
1386 return 0;
1387 }
1388
1389 if (less(seq_no, curr_seqno))
1390 break;
1391
1392 prev = &queue_buf->next;
1393 queue_buf = queue_buf->next;
1394 }
1395
1396 buf->next = queue_buf;
1397 *prev = buf;
1398 return 1;
1399 }
1400
1401 /*
1402 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1403 */
1404 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1405 struct sk_buff *buf)
1406 {
1407 u32 seq_no = buf_seqno(buf);
1408
1409 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1410 tipc_link_proto_rcv(l_ptr, buf);
1411 return;
1412 }
1413
1414 /* Record OOS packet arrival (force mismatch on next timeout) */
1415 l_ptr->checkpoint--;
1416
1417 /*
1418 * Discard packet if a duplicate; otherwise add it to deferred queue
1419 * and notify peer of gap as per protocol specification
1420 */
1421 if (less(seq_no, mod(l_ptr->next_in_no))) {
1422 l_ptr->stats.duplicates++;
1423 kfree_skb(buf);
1424 return;
1425 }
1426
1427 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1428 &l_ptr->newest_deferred_in, buf)) {
1429 l_ptr->deferred_inqueue_sz++;
1430 l_ptr->stats.deferred_recv++;
1431 TIPC_SKB_CB(buf)->deferred = true;
1432 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1433 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1434 } else
1435 l_ptr->stats.duplicates++;
1436 }
1437
1438 /*
1439 * Send protocol message to the other endpoint.
1440 */
1441 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1442 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1443 {
1444 struct sk_buff *buf = NULL;
1445 struct tipc_msg *msg = l_ptr->pmsg;
1446 u32 msg_size = sizeof(l_ptr->proto_msg);
1447 int r_flag;
1448
1449 /* Discard any previous message that was deferred due to congestion */
1450 if (l_ptr->proto_msg_queue) {
1451 kfree_skb(l_ptr->proto_msg_queue);
1452 l_ptr->proto_msg_queue = NULL;
1453 }
1454
1455 /* Don't send protocol message during link changeover */
1456 if (l_ptr->exp_msg_count)
1457 return;
1458
1459 /* Abort non-RESET send if communication with node is prohibited */
1460 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1461 return;
1462
1463 /* Create protocol message with "out-of-sequence" sequence number */
1464 msg_set_type(msg, msg_typ);
1465 msg_set_net_plane(msg, l_ptr->net_plane);
1466 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1467 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1468
1469 if (msg_typ == STATE_MSG) {
1470 u32 next_sent = mod(l_ptr->next_out_no);
1471
1472 if (!tipc_link_is_up(l_ptr))
1473 return;
1474 if (l_ptr->next_out)
1475 next_sent = buf_seqno(l_ptr->next_out);
1476 msg_set_next_sent(msg, next_sent);
1477 if (l_ptr->oldest_deferred_in) {
1478 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1479 gap = mod(rec - mod(l_ptr->next_in_no));
1480 }
1481 msg_set_seq_gap(msg, gap);
1482 if (gap)
1483 l_ptr->stats.sent_nacks++;
1484 msg_set_link_tolerance(msg, tolerance);
1485 msg_set_linkprio(msg, priority);
1486 msg_set_max_pkt(msg, ack_mtu);
1487 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1488 msg_set_probe(msg, probe_msg != 0);
1489 if (probe_msg) {
1490 u32 mtu = l_ptr->max_pkt;
1491
1492 if ((mtu < l_ptr->max_pkt_target) &&
1493 link_working_working(l_ptr) &&
1494 l_ptr->fsm_msg_cnt) {
1495 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1496 if (l_ptr->max_pkt_probes == 10) {
1497 l_ptr->max_pkt_target = (msg_size - 4);
1498 l_ptr->max_pkt_probes = 0;
1499 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1500 }
1501 l_ptr->max_pkt_probes++;
1502 }
1503
1504 l_ptr->stats.sent_probes++;
1505 }
1506 l_ptr->stats.sent_states++;
1507 } else { /* RESET_MSG or ACTIVATE_MSG */
1508 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1509 msg_set_seq_gap(msg, 0);
1510 msg_set_next_sent(msg, 1);
1511 msg_set_probe(msg, 0);
1512 msg_set_link_tolerance(msg, l_ptr->tolerance);
1513 msg_set_linkprio(msg, l_ptr->priority);
1514 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1515 }
1516
1517 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1518 msg_set_redundant_link(msg, r_flag);
1519 msg_set_linkprio(msg, l_ptr->priority);
1520 msg_set_size(msg, msg_size);
1521
1522 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1523
1524 buf = tipc_buf_acquire(msg_size);
1525 if (!buf)
1526 return;
1527
1528 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1529 buf->priority = TC_PRIO_CONTROL;
1530
1531 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1532 l_ptr->unacked_window = 0;
1533 kfree_skb(buf);
1534 }
1535
1536 /*
1537 * Receive protocol message :
1538 * Note that network plane id propagates through the network, and may
1539 * change at any time. The node with lowest address rules
1540 */
1541 static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1542 {
1543 u32 rec_gap = 0;
1544 u32 max_pkt_info;
1545 u32 max_pkt_ack;
1546 u32 msg_tol;
1547 struct tipc_msg *msg = buf_msg(buf);
1548
1549 /* Discard protocol message during link changeover */
1550 if (l_ptr->exp_msg_count)
1551 goto exit;
1552
1553 if (l_ptr->net_plane != msg_net_plane(msg))
1554 if (tipc_own_addr > msg_prevnode(msg))
1555 l_ptr->net_plane = msg_net_plane(msg);
1556
1557 switch (msg_type(msg)) {
1558
1559 case RESET_MSG:
1560 if (!link_working_unknown(l_ptr) &&
1561 (l_ptr->peer_session != INVALID_SESSION)) {
1562 if (less_eq(msg_session(msg), l_ptr->peer_session))
1563 break; /* duplicate or old reset: ignore */
1564 }
1565
1566 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1567 link_working_unknown(l_ptr))) {
1568 /*
1569 * peer has lost contact -- don't allow peer's links
1570 * to reactivate before we recognize loss & clean up
1571 */
1572 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1573 }
1574
1575 link_state_event(l_ptr, RESET_MSG);
1576
1577 /* fall thru' */
1578 case ACTIVATE_MSG:
1579 /* Update link settings according other endpoint's values */
1580 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1581
1582 msg_tol = msg_link_tolerance(msg);
1583 if (msg_tol > l_ptr->tolerance)
1584 link_set_supervision_props(l_ptr, msg_tol);
1585
1586 if (msg_linkprio(msg) > l_ptr->priority)
1587 l_ptr->priority = msg_linkprio(msg);
1588
1589 max_pkt_info = msg_max_pkt(msg);
1590 if (max_pkt_info) {
1591 if (max_pkt_info < l_ptr->max_pkt_target)
1592 l_ptr->max_pkt_target = max_pkt_info;
1593 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1594 l_ptr->max_pkt = l_ptr->max_pkt_target;
1595 } else {
1596 l_ptr->max_pkt = l_ptr->max_pkt_target;
1597 }
1598
1599 /* Synchronize broadcast link info, if not done previously */
1600 if (!tipc_node_is_up(l_ptr->owner)) {
1601 l_ptr->owner->bclink.last_sent =
1602 l_ptr->owner->bclink.last_in =
1603 msg_last_bcast(msg);
1604 l_ptr->owner->bclink.oos_state = 0;
1605 }
1606
1607 l_ptr->peer_session = msg_session(msg);
1608 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1609
1610 if (msg_type(msg) == ACTIVATE_MSG)
1611 link_state_event(l_ptr, ACTIVATE_MSG);
1612 break;
1613 case STATE_MSG:
1614
1615 msg_tol = msg_link_tolerance(msg);
1616 if (msg_tol)
1617 link_set_supervision_props(l_ptr, msg_tol);
1618
1619 if (msg_linkprio(msg) &&
1620 (msg_linkprio(msg) != l_ptr->priority)) {
1621 pr_warn("%s<%s>, priority change %u->%u\n",
1622 link_rst_msg, l_ptr->name, l_ptr->priority,
1623 msg_linkprio(msg));
1624 l_ptr->priority = msg_linkprio(msg);
1625 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1626 break;
1627 }
1628
1629 /* Record reception; force mismatch at next timeout: */
1630 l_ptr->checkpoint--;
1631
1632 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1633 l_ptr->stats.recv_states++;
1634 if (link_reset_unknown(l_ptr))
1635 break;
1636
1637 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1638 rec_gap = mod(msg_next_sent(msg) -
1639 mod(l_ptr->next_in_no));
1640 }
1641
1642 max_pkt_ack = msg_max_pkt(msg);
1643 if (max_pkt_ack > l_ptr->max_pkt) {
1644 l_ptr->max_pkt = max_pkt_ack;
1645 l_ptr->max_pkt_probes = 0;
1646 }
1647
1648 max_pkt_ack = 0;
1649 if (msg_probe(msg)) {
1650 l_ptr->stats.recv_probes++;
1651 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1652 max_pkt_ack = msg_size(msg);
1653 }
1654
1655 /* Protocol message before retransmits, reduce loss risk */
1656 if (l_ptr->owner->bclink.recv_permitted)
1657 tipc_bclink_update_link_state(l_ptr->owner,
1658 msg_last_bcast(msg));
1659
1660 if (rec_gap || (msg_probe(msg))) {
1661 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1662 0, max_pkt_ack);
1663 }
1664 if (msg_seq_gap(msg)) {
1665 l_ptr->stats.recv_nacks++;
1666 tipc_link_retransmit(l_ptr, l_ptr->first_out,
1667 msg_seq_gap(msg));
1668 }
1669 break;
1670 }
1671 exit:
1672 kfree_skb(buf);
1673 }
1674
1675
1676 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1677 * a different bearer. Owner node is locked.
1678 */
1679 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1680 struct tipc_msg *tunnel_hdr,
1681 struct tipc_msg *msg,
1682 u32 selector)
1683 {
1684 struct tipc_link *tunnel;
1685 struct sk_buff *buf;
1686 u32 length = msg_size(msg);
1687
1688 tunnel = l_ptr->owner->active_links[selector & 1];
1689 if (!tipc_link_is_up(tunnel)) {
1690 pr_warn("%stunnel link no longer available\n", link_co_err);
1691 return;
1692 }
1693 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1694 buf = tipc_buf_acquire(length + INT_H_SIZE);
1695 if (!buf) {
1696 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1697 return;
1698 }
1699 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
1700 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
1701 __tipc_link_xmit(tunnel, buf);
1702 }
1703
1704
1705 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1706 * link is still active. We can do failover. Tunnel the failing link's
1707 * whole send queue via the remaining link. This way, we don't lose
1708 * any packets, and sequence order is preserved for subsequent traffic
1709 * sent over the remaining link. Owner node is locked.
1710 */
1711 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1712 {
1713 u32 msgcount = l_ptr->out_queue_size;
1714 struct sk_buff *crs = l_ptr->first_out;
1715 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1716 struct tipc_msg tunnel_hdr;
1717 int split_bundles;
1718
1719 if (!tunnel)
1720 return;
1721
1722 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1723 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1724 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1725 msg_set_msgcnt(&tunnel_hdr, msgcount);
1726
1727 if (!l_ptr->first_out) {
1728 struct sk_buff *buf;
1729
1730 buf = tipc_buf_acquire(INT_H_SIZE);
1731 if (buf) {
1732 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
1733 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1734 __tipc_link_xmit(tunnel, buf);
1735 } else {
1736 pr_warn("%sunable to send changeover msg\n",
1737 link_co_err);
1738 }
1739 return;
1740 }
1741
1742 split_bundles = (l_ptr->owner->active_links[0] !=
1743 l_ptr->owner->active_links[1]);
1744
1745 while (crs) {
1746 struct tipc_msg *msg = buf_msg(crs);
1747
1748 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1749 struct tipc_msg *m = msg_get_wrapped(msg);
1750 unchar *pos = (unchar *)m;
1751
1752 msgcount = msg_msgcnt(msg);
1753 while (msgcount--) {
1754 msg_set_seqno(m, msg_seqno(msg));
1755 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1756 msg_link_selector(m));
1757 pos += align(msg_size(m));
1758 m = (struct tipc_msg *)pos;
1759 }
1760 } else {
1761 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1762 msg_link_selector(msg));
1763 }
1764 crs = crs->next;
1765 }
1766 }
1767
1768 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1769 * duplicate of the first link's send queue via the new link. This way, we
1770 * are guaranteed that currently queued packets from a socket are delivered
1771 * before future traffic from the same socket, even if this is using the
1772 * new link. The last arriving copy of each duplicate packet is dropped at
1773 * the receiving end by the regular protocol check, so packet cardinality
1774 * and sequence order is preserved per sender/receiver socket pair.
1775 * Owner node is locked.
1776 */
1777 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1778 struct tipc_link *tunnel)
1779 {
1780 struct sk_buff *iter;
1781 struct tipc_msg tunnel_hdr;
1782
1783 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1784 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1785 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
1786 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1787 iter = l_ptr->first_out;
1788 while (iter) {
1789 struct sk_buff *outbuf;
1790 struct tipc_msg *msg = buf_msg(iter);
1791 u32 length = msg_size(msg);
1792
1793 if (msg_user(msg) == MSG_BUNDLER)
1794 msg_set_type(msg, CLOSED_MSG);
1795 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
1796 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1797 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
1798 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
1799 if (outbuf == NULL) {
1800 pr_warn("%sunable to send duplicate msg\n",
1801 link_co_err);
1802 return;
1803 }
1804 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
1805 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
1806 length);
1807 __tipc_link_xmit(tunnel, outbuf);
1808 if (!tipc_link_is_up(l_ptr))
1809 return;
1810 iter = iter->next;
1811 }
1812 }
1813
1814 /**
1815 * buf_extract - extracts embedded TIPC message from another message
1816 * @skb: encapsulating message buffer
1817 * @from_pos: offset to extract from
1818 *
1819 * Returns a new message buffer containing an embedded message. The
1820 * encapsulating message itself is left unchanged.
1821 */
1822 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
1823 {
1824 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
1825 u32 size = msg_size(msg);
1826 struct sk_buff *eb;
1827
1828 eb = tipc_buf_acquire(size);
1829 if (eb)
1830 skb_copy_to_linear_data(eb, msg, size);
1831 return eb;
1832 }
1833
1834
1835
1836 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1837 * Owner node is locked.
1838 */
1839 static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
1840 struct sk_buff *t_buf)
1841 {
1842 struct sk_buff *buf;
1843
1844 if (!tipc_link_is_up(l_ptr))
1845 return;
1846
1847 buf = buf_extract(t_buf, INT_H_SIZE);
1848 if (buf == NULL) {
1849 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1850 return;
1851 }
1852
1853 /* Add buffer to deferred queue, if applicable: */
1854 link_handle_out_of_seq_msg(l_ptr, buf);
1855 }
1856
1857 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1858 * Owner node is locked.
1859 */
1860 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1861 struct sk_buff *t_buf)
1862 {
1863 struct tipc_msg *t_msg = buf_msg(t_buf);
1864 struct sk_buff *buf = NULL;
1865 struct tipc_msg *msg;
1866
1867 if (tipc_link_is_up(l_ptr))
1868 tipc_link_reset(l_ptr);
1869
1870 /* First failover packet? */
1871 if (l_ptr->exp_msg_count == START_CHANGEOVER)
1872 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1873
1874 /* Should there be an inner packet? */
1875 if (l_ptr->exp_msg_count) {
1876 l_ptr->exp_msg_count--;
1877 buf = buf_extract(t_buf, INT_H_SIZE);
1878 if (buf == NULL) {
1879 pr_warn("%sno inner failover pkt\n", link_co_err);
1880 goto exit;
1881 }
1882 msg = buf_msg(buf);
1883
1884 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1885 kfree_skb(buf);
1886 buf = NULL;
1887 goto exit;
1888 }
1889 if (msg_user(msg) == MSG_FRAGMENTER) {
1890 l_ptr->stats.recv_fragments++;
1891 tipc_buf_append(&l_ptr->reasm_buf, &buf);
1892 }
1893 }
1894 exit:
1895 if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
1896 tipc_node_detach_link(l_ptr->owner, l_ptr);
1897 kfree(l_ptr);
1898 }
1899 return buf;
1900 }
1901
1902 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1903 * via other link as result of a failover (ORIGINAL_MSG) or
1904 * a new active link (DUPLICATE_MSG). Failover packets are
1905 * returned to the active link for delivery upwards.
1906 * Owner node is locked.
1907 */
1908 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1909 struct sk_buff **buf)
1910 {
1911 struct sk_buff *t_buf = *buf;
1912 struct tipc_link *l_ptr;
1913 struct tipc_msg *t_msg = buf_msg(t_buf);
1914 u32 bearer_id = msg_bearer_id(t_msg);
1915
1916 *buf = NULL;
1917
1918 if (bearer_id >= MAX_BEARERS)
1919 goto exit;
1920
1921 l_ptr = n_ptr->links[bearer_id];
1922 if (!l_ptr)
1923 goto exit;
1924
1925 if (msg_type(t_msg) == DUPLICATE_MSG)
1926 tipc_link_dup_rcv(l_ptr, t_buf);
1927 else if (msg_type(t_msg) == ORIGINAL_MSG)
1928 *buf = tipc_link_failover_rcv(l_ptr, t_buf);
1929 else
1930 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1931 exit:
1932 kfree_skb(t_buf);
1933 return *buf != NULL;
1934 }
1935
1936 /*
1937 * Bundler functionality:
1938 */
1939 void tipc_link_bundle_rcv(struct sk_buff *buf)
1940 {
1941 u32 msgcount = msg_msgcnt(buf_msg(buf));
1942 u32 pos = INT_H_SIZE;
1943 struct sk_buff *obuf;
1944 struct tipc_msg *omsg;
1945
1946 while (msgcount--) {
1947 obuf = buf_extract(buf, pos);
1948 if (obuf == NULL) {
1949 pr_warn("Link unable to unbundle message(s)\n");
1950 break;
1951 }
1952 omsg = buf_msg(obuf);
1953 pos += align(msg_size(omsg));
1954 if (msg_isdata(omsg)) {
1955 if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
1956 tipc_sk_mcast_rcv(obuf);
1957 else
1958 tipc_sk_rcv(obuf);
1959 } else if (msg_user(omsg) == CONN_MANAGER) {
1960 tipc_sk_rcv(obuf);
1961 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
1962 tipc_named_rcv(obuf);
1963 } else {
1964 pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
1965 kfree_skb(obuf);
1966 }
1967 }
1968 kfree_skb(buf);
1969 }
1970
1971 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
1972 {
1973 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
1974 return;
1975
1976 l_ptr->tolerance = tolerance;
1977 l_ptr->continuity_interval =
1978 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
1979 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
1980 }
1981
1982 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
1983 {
1984 /* Data messages from this node, inclusive FIRST_FRAGM */
1985 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
1986 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
1987 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
1988 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
1989 /* Transiting data messages,inclusive FIRST_FRAGM */
1990 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
1991 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
1992 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
1993 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
1994 l_ptr->queue_limit[CONN_MANAGER] = 1200;
1995 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
1996 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
1997 /* FRAGMENT and LAST_FRAGMENT packets */
1998 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
1999 }
2000
2001 /* tipc_link_find_owner - locate owner node of link by link's name
2002 * @name: pointer to link name string
2003 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2004 *
2005 * Returns pointer to node owning the link, or 0 if no matching link is found.
2006 */
2007 static struct tipc_node *tipc_link_find_owner(const char *link_name,
2008 unsigned int *bearer_id)
2009 {
2010 struct tipc_link *l_ptr;
2011 struct tipc_node *n_ptr;
2012 struct tipc_node *found_node = 0;
2013 int i;
2014
2015 *bearer_id = 0;
2016 rcu_read_lock();
2017 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
2018 tipc_node_lock(n_ptr);
2019 for (i = 0; i < MAX_BEARERS; i++) {
2020 l_ptr = n_ptr->links[i];
2021 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
2022 *bearer_id = i;
2023 found_node = n_ptr;
2024 break;
2025 }
2026 }
2027 tipc_node_unlock(n_ptr);
2028 if (found_node)
2029 break;
2030 }
2031 rcu_read_unlock();
2032
2033 return found_node;
2034 }
2035
2036 /**
2037 * link_value_is_valid -- validate proposed link tolerance/priority/window
2038 *
2039 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2040 * @new_value: the new value
2041 *
2042 * Returns 1 if value is within range, 0 if not.
2043 */
2044 static int link_value_is_valid(u16 cmd, u32 new_value)
2045 {
2046 switch (cmd) {
2047 case TIPC_CMD_SET_LINK_TOL:
2048 return (new_value >= TIPC_MIN_LINK_TOL) &&
2049 (new_value <= TIPC_MAX_LINK_TOL);
2050 case TIPC_CMD_SET_LINK_PRI:
2051 return (new_value <= TIPC_MAX_LINK_PRI);
2052 case TIPC_CMD_SET_LINK_WINDOW:
2053 return (new_value >= TIPC_MIN_LINK_WIN) &&
2054 (new_value <= TIPC_MAX_LINK_WIN);
2055 }
2056 return 0;
2057 }
2058
2059 /**
2060 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2061 * @name: ptr to link, bearer, or media name
2062 * @new_value: new value of link, bearer, or media setting
2063 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2064 *
2065 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
2066 *
2067 * Returns 0 if value updated and negative value on error.
2068 */
2069 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2070 {
2071 struct tipc_node *node;
2072 struct tipc_link *l_ptr;
2073 struct tipc_bearer *b_ptr;
2074 struct tipc_media *m_ptr;
2075 int bearer_id;
2076 int res = 0;
2077
2078 node = tipc_link_find_owner(name, &bearer_id);
2079 if (node) {
2080 tipc_node_lock(node);
2081 l_ptr = node->links[bearer_id];
2082
2083 if (l_ptr) {
2084 switch (cmd) {
2085 case TIPC_CMD_SET_LINK_TOL:
2086 link_set_supervision_props(l_ptr, new_value);
2087 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2088 new_value, 0, 0);
2089 break;
2090 case TIPC_CMD_SET_LINK_PRI:
2091 l_ptr->priority = new_value;
2092 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2093 0, new_value, 0);
2094 break;
2095 case TIPC_CMD_SET_LINK_WINDOW:
2096 tipc_link_set_queue_limits(l_ptr, new_value);
2097 break;
2098 default:
2099 res = -EINVAL;
2100 break;
2101 }
2102 }
2103 tipc_node_unlock(node);
2104 return res;
2105 }
2106
2107 b_ptr = tipc_bearer_find(name);
2108 if (b_ptr) {
2109 switch (cmd) {
2110 case TIPC_CMD_SET_LINK_TOL:
2111 b_ptr->tolerance = new_value;
2112 break;
2113 case TIPC_CMD_SET_LINK_PRI:
2114 b_ptr->priority = new_value;
2115 break;
2116 case TIPC_CMD_SET_LINK_WINDOW:
2117 b_ptr->window = new_value;
2118 break;
2119 default:
2120 res = -EINVAL;
2121 break;
2122 }
2123 return res;
2124 }
2125
2126 m_ptr = tipc_media_find(name);
2127 if (!m_ptr)
2128 return -ENODEV;
2129 switch (cmd) {
2130 case TIPC_CMD_SET_LINK_TOL:
2131 m_ptr->tolerance = new_value;
2132 break;
2133 case TIPC_CMD_SET_LINK_PRI:
2134 m_ptr->priority = new_value;
2135 break;
2136 case TIPC_CMD_SET_LINK_WINDOW:
2137 m_ptr->window = new_value;
2138 break;
2139 default:
2140 res = -EINVAL;
2141 break;
2142 }
2143 return res;
2144 }
2145
2146 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2147 u16 cmd)
2148 {
2149 struct tipc_link_config *args;
2150 u32 new_value;
2151 int res;
2152
2153 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2154 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2155
2156 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2157 new_value = ntohl(args->value);
2158
2159 if (!link_value_is_valid(cmd, new_value))
2160 return tipc_cfg_reply_error_string(
2161 "cannot change, value invalid");
2162
2163 if (!strcmp(args->name, tipc_bclink_name)) {
2164 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2165 (tipc_bclink_set_queue_limits(new_value) == 0))
2166 return tipc_cfg_reply_none();
2167 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2168 " (cannot change setting on broadcast link)");
2169 }
2170
2171 res = link_cmd_set_value(args->name, new_value, cmd);
2172 if (res)
2173 return tipc_cfg_reply_error_string("cannot change link setting");
2174
2175 return tipc_cfg_reply_none();
2176 }
2177
2178 /**
2179 * link_reset_statistics - reset link statistics
2180 * @l_ptr: pointer to link
2181 */
2182 static void link_reset_statistics(struct tipc_link *l_ptr)
2183 {
2184 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2185 l_ptr->stats.sent_info = l_ptr->next_out_no;
2186 l_ptr->stats.recv_info = l_ptr->next_in_no;
2187 }
2188
2189 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2190 {
2191 char *link_name;
2192 struct tipc_link *l_ptr;
2193 struct tipc_node *node;
2194 unsigned int bearer_id;
2195
2196 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2197 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2198
2199 link_name = (char *)TLV_DATA(req_tlv_area);
2200 if (!strcmp(link_name, tipc_bclink_name)) {
2201 if (tipc_bclink_reset_stats())
2202 return tipc_cfg_reply_error_string("link not found");
2203 return tipc_cfg_reply_none();
2204 }
2205 node = tipc_link_find_owner(link_name, &bearer_id);
2206 if (!node)
2207 return tipc_cfg_reply_error_string("link not found");
2208
2209 tipc_node_lock(node);
2210 l_ptr = node->links[bearer_id];
2211 if (!l_ptr) {
2212 tipc_node_unlock(node);
2213 return tipc_cfg_reply_error_string("link not found");
2214 }
2215 link_reset_statistics(l_ptr);
2216 tipc_node_unlock(node);
2217 return tipc_cfg_reply_none();
2218 }
2219
2220 /**
2221 * percent - convert count to a percentage of total (rounding up or down)
2222 */
2223 static u32 percent(u32 count, u32 total)
2224 {
2225 return (count * 100 + (total / 2)) / total;
2226 }
2227
2228 /**
2229 * tipc_link_stats - print link statistics
2230 * @name: link name
2231 * @buf: print buffer area
2232 * @buf_size: size of print buffer area
2233 *
2234 * Returns length of print buffer data string (or 0 if error)
2235 */
2236 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2237 {
2238 struct tipc_link *l;
2239 struct tipc_stats *s;
2240 struct tipc_node *node;
2241 char *status;
2242 u32 profile_total = 0;
2243 unsigned int bearer_id;
2244 int ret;
2245
2246 if (!strcmp(name, tipc_bclink_name))
2247 return tipc_bclink_stats(buf, buf_size);
2248
2249 node = tipc_link_find_owner(name, &bearer_id);
2250 if (!node)
2251 return 0;
2252
2253 tipc_node_lock(node);
2254
2255 l = node->links[bearer_id];
2256 if (!l) {
2257 tipc_node_unlock(node);
2258 return 0;
2259 }
2260
2261 s = &l->stats;
2262
2263 if (tipc_link_is_active(l))
2264 status = "ACTIVE";
2265 else if (tipc_link_is_up(l))
2266 status = "STANDBY";
2267 else
2268 status = "DEFUNCT";
2269
2270 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2271 " %s MTU:%u Priority:%u Tolerance:%u ms"
2272 " Window:%u packets\n",
2273 l->name, status, l->max_pkt, l->priority,
2274 l->tolerance, l->queue_limit[0]);
2275
2276 ret += tipc_snprintf(buf + ret, buf_size - ret,
2277 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2278 l->next_in_no - s->recv_info, s->recv_fragments,
2279 s->recv_fragmented, s->recv_bundles,
2280 s->recv_bundled);
2281
2282 ret += tipc_snprintf(buf + ret, buf_size - ret,
2283 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2284 l->next_out_no - s->sent_info, s->sent_fragments,
2285 s->sent_fragmented, s->sent_bundles,
2286 s->sent_bundled);
2287
2288 profile_total = s->msg_length_counts;
2289 if (!profile_total)
2290 profile_total = 1;
2291
2292 ret += tipc_snprintf(buf + ret, buf_size - ret,
2293 " TX profile sample:%u packets average:%u octets\n"
2294 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2295 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2296 s->msg_length_counts,
2297 s->msg_lengths_total / profile_total,
2298 percent(s->msg_length_profile[0], profile_total),
2299 percent(s->msg_length_profile[1], profile_total),
2300 percent(s->msg_length_profile[2], profile_total),
2301 percent(s->msg_length_profile[3], profile_total),
2302 percent(s->msg_length_profile[4], profile_total),
2303 percent(s->msg_length_profile[5], profile_total),
2304 percent(s->msg_length_profile[6], profile_total));
2305
2306 ret += tipc_snprintf(buf + ret, buf_size - ret,
2307 " RX states:%u probes:%u naks:%u defs:%u"
2308 " dups:%u\n", s->recv_states, s->recv_probes,
2309 s->recv_nacks, s->deferred_recv, s->duplicates);
2310
2311 ret += tipc_snprintf(buf + ret, buf_size - ret,
2312 " TX states:%u probes:%u naks:%u acks:%u"
2313 " dups:%u\n", s->sent_states, s->sent_probes,
2314 s->sent_nacks, s->sent_acks, s->retransmitted);
2315
2316 ret += tipc_snprintf(buf + ret, buf_size - ret,
2317 " Congestion link:%u Send queue"
2318 " max:%u avg:%u\n", s->link_congs,
2319 s->max_queue_sz, s->queue_sz_counts ?
2320 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2321
2322 tipc_node_unlock(node);
2323 return ret;
2324 }
2325
2326 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2327 {
2328 struct sk_buff *buf;
2329 struct tlv_desc *rep_tlv;
2330 int str_len;
2331 int pb_len;
2332 char *pb;
2333
2334 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2335 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2336
2337 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2338 if (!buf)
2339 return NULL;
2340
2341 rep_tlv = (struct tlv_desc *)buf->data;
2342 pb = TLV_DATA(rep_tlv);
2343 pb_len = ULTRA_STRING_MAX_LEN;
2344 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2345 pb, pb_len);
2346 if (!str_len) {
2347 kfree_skb(buf);
2348 return tipc_cfg_reply_error_string("link not found");
2349 }
2350 str_len += 1; /* for "\0" */
2351 skb_put(buf, TLV_SPACE(str_len));
2352 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2353
2354 return buf;
2355 }
2356
2357 /**
2358 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2359 * @dest: network address of destination node
2360 * @selector: used to select from set of active links
2361 *
2362 * If no active link can be found, uses default maximum packet size.
2363 */
2364 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2365 {
2366 struct tipc_node *n_ptr;
2367 struct tipc_link *l_ptr;
2368 u32 res = MAX_PKT_DEFAULT;
2369
2370 if (dest == tipc_own_addr)
2371 return MAX_MSG_SIZE;
2372
2373 n_ptr = tipc_node_find(dest);
2374 if (n_ptr) {
2375 tipc_node_lock(n_ptr);
2376 l_ptr = n_ptr->active_links[selector & 1];
2377 if (l_ptr)
2378 res = l_ptr->max_pkt;
2379 tipc_node_unlock(n_ptr);
2380 }
2381 return res;
2382 }
2383
2384 static void link_print(struct tipc_link *l_ptr, const char *str)
2385 {
2386 struct tipc_bearer *b_ptr;
2387
2388 rcu_read_lock();
2389 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
2390 if (b_ptr)
2391 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
2392 rcu_read_unlock();
2393
2394 if (link_working_unknown(l_ptr))
2395 pr_cont(":WU\n");
2396 else if (link_reset_reset(l_ptr))
2397 pr_cont(":RR\n");
2398 else if (link_reset_unknown(l_ptr))
2399 pr_cont(":RU\n");
2400 else if (link_working_working(l_ptr))
2401 pr_cont(":WW\n");
2402 else
2403 pr_cont("\n");
2404 }
2405
2406 /* Parse and validate nested (link) properties valid for media, bearer and link
2407 */
2408 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2409 {
2410 int err;
2411
2412 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
2413 tipc_nl_prop_policy);
2414 if (err)
2415 return err;
2416
2417 if (props[TIPC_NLA_PROP_PRIO]) {
2418 u32 prio;
2419
2420 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2421 if (prio > TIPC_MAX_LINK_PRI)
2422 return -EINVAL;
2423 }
2424
2425 if (props[TIPC_NLA_PROP_TOL]) {
2426 u32 tol;
2427
2428 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2429 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2430 return -EINVAL;
2431 }
2432
2433 if (props[TIPC_NLA_PROP_WIN]) {
2434 u32 win;
2435
2436 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2437 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2438 return -EINVAL;
2439 }
2440
2441 return 0;
2442 }
2443
2444 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
2445 {
2446 int err;
2447 int res = 0;
2448 int bearer_id;
2449 char *name;
2450 struct tipc_link *link;
2451 struct tipc_node *node;
2452 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2453
2454 if (!info->attrs[TIPC_NLA_LINK])
2455 return -EINVAL;
2456
2457 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2458 info->attrs[TIPC_NLA_LINK],
2459 tipc_nl_link_policy);
2460 if (err)
2461 return err;
2462
2463 if (!attrs[TIPC_NLA_LINK_NAME])
2464 return -EINVAL;
2465
2466 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2467
2468 node = tipc_link_find_owner(name, &bearer_id);
2469 if (!node)
2470 return -EINVAL;
2471
2472 tipc_node_lock(node);
2473
2474 link = node->links[bearer_id];
2475 if (!link) {
2476 res = -EINVAL;
2477 goto out;
2478 }
2479
2480 if (attrs[TIPC_NLA_LINK_PROP]) {
2481 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2482
2483 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2484 props);
2485 if (err) {
2486 res = err;
2487 goto out;
2488 }
2489
2490 if (props[TIPC_NLA_PROP_TOL]) {
2491 u32 tol;
2492
2493 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2494 link_set_supervision_props(link, tol);
2495 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
2496 }
2497 if (props[TIPC_NLA_PROP_PRIO]) {
2498 u32 prio;
2499
2500 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2501 link->priority = prio;
2502 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
2503 }
2504 if (props[TIPC_NLA_PROP_WIN]) {
2505 u32 win;
2506
2507 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2508 tipc_link_set_queue_limits(link, win);
2509 }
2510 }
2511
2512 out:
2513 tipc_node_unlock(node);
2514
2515 return res;
2516 }
2517
2518 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2519 {
2520 int i;
2521 struct nlattr *stats;
2522
2523 struct nla_map {
2524 u32 key;
2525 u32 val;
2526 };
2527
2528 struct nla_map map[] = {
2529 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
2530 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2531 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2532 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2533 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2534 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
2535 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2536 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2537 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2538 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2539 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2540 s->msg_length_counts : 1},
2541 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2542 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2543 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2544 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2545 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2546 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2547 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2548 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2549 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2550 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2551 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2552 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2553 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2554 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2555 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2556 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2557 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2558 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2559 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2560 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2561 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2562 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2563 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2564 };
2565
2566 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2567 if (!stats)
2568 return -EMSGSIZE;
2569
2570 for (i = 0; i < ARRAY_SIZE(map); i++)
2571 if (nla_put_u32(skb, map[i].key, map[i].val))
2572 goto msg_full;
2573
2574 nla_nest_end(skb, stats);
2575
2576 return 0;
2577 msg_full:
2578 nla_nest_cancel(skb, stats);
2579
2580 return -EMSGSIZE;
2581 }
2582
2583 /* Caller should hold appropriate locks to protect the link */
2584 static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
2585 {
2586 int err;
2587 void *hdr;
2588 struct nlattr *attrs;
2589 struct nlattr *prop;
2590
2591 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
2592 NLM_F_MULTI, TIPC_NL_LINK_GET);
2593 if (!hdr)
2594 return -EMSGSIZE;
2595
2596 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2597 if (!attrs)
2598 goto msg_full;
2599
2600 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2601 goto attr_msg_full;
2602 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2603 tipc_cluster_mask(tipc_own_addr)))
2604 goto attr_msg_full;
2605 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2606 goto attr_msg_full;
2607 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2608 goto attr_msg_full;
2609 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2610 goto attr_msg_full;
2611
2612 if (tipc_link_is_up(link))
2613 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2614 goto attr_msg_full;
2615 if (tipc_link_is_active(link))
2616 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2617 goto attr_msg_full;
2618
2619 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2620 if (!prop)
2621 goto attr_msg_full;
2622 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2623 goto prop_msg_full;
2624 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2625 goto prop_msg_full;
2626 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2627 link->queue_limit[TIPC_LOW_IMPORTANCE]))
2628 goto prop_msg_full;
2629 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2630 goto prop_msg_full;
2631 nla_nest_end(msg->skb, prop);
2632
2633 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2634 if (err)
2635 goto attr_msg_full;
2636
2637 nla_nest_end(msg->skb, attrs);
2638 genlmsg_end(msg->skb, hdr);
2639
2640 return 0;
2641
2642 prop_msg_full:
2643 nla_nest_cancel(msg->skb, prop);
2644 attr_msg_full:
2645 nla_nest_cancel(msg->skb, attrs);
2646 msg_full:
2647 genlmsg_cancel(msg->skb, hdr);
2648
2649 return -EMSGSIZE;
2650 }
2651
2652 /* Caller should hold node lock */
2653 static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
2654 struct tipc_node *node,
2655 u32 *prev_link)
2656 {
2657 u32 i;
2658 int err;
2659
2660 for (i = *prev_link; i < MAX_BEARERS; i++) {
2661 *prev_link = i;
2662
2663 if (!node->links[i])
2664 continue;
2665
2666 err = __tipc_nl_add_link(msg, node->links[i]);
2667 if (err)
2668 return err;
2669 }
2670 *prev_link = 0;
2671
2672 return 0;
2673 }
2674
2675 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2676 {
2677 struct tipc_node *node;
2678 struct tipc_nl_msg msg;
2679 u32 prev_node = cb->args[0];
2680 u32 prev_link = cb->args[1];
2681 int done = cb->args[2];
2682 int err;
2683
2684 if (done)
2685 return 0;
2686
2687 msg.skb = skb;
2688 msg.portid = NETLINK_CB(cb->skb).portid;
2689 msg.seq = cb->nlh->nlmsg_seq;
2690
2691 rcu_read_lock();
2692
2693 if (prev_node) {
2694 node = tipc_node_find(prev_node);
2695 if (!node) {
2696 /* We never set seq or call nl_dump_check_consistent()
2697 * this means that setting prev_seq here will cause the
2698 * consistence check to fail in the netlink callback
2699 * handler. Resulting in the last NLMSG_DONE message
2700 * having the NLM_F_DUMP_INTR flag set.
2701 */
2702 cb->prev_seq = 1;
2703 goto out;
2704 }
2705
2706 list_for_each_entry_continue_rcu(node, &tipc_node_list, list) {
2707 tipc_node_lock(node);
2708 err = __tipc_nl_add_node_links(&msg, node, &prev_link);
2709 tipc_node_unlock(node);
2710 if (err)
2711 goto out;
2712
2713 prev_node = node->addr;
2714 }
2715 } else {
2716 err = tipc_nl_add_bc_link(&msg);
2717 if (err)
2718 goto out;
2719
2720 list_for_each_entry_rcu(node, &tipc_node_list, list) {
2721 tipc_node_lock(node);
2722 err = __tipc_nl_add_node_links(&msg, node, &prev_link);
2723 tipc_node_unlock(node);
2724 if (err)
2725 goto out;
2726
2727 prev_node = node->addr;
2728 }
2729 }
2730 done = 1;
2731 out:
2732 rcu_read_unlock();
2733
2734 cb->args[0] = prev_node;
2735 cb->args[1] = prev_link;
2736 cb->args[2] = done;
2737
2738 return skb->len;
2739 }
2740
2741 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2742 {
2743 struct sk_buff *ans_skb;
2744 struct tipc_nl_msg msg;
2745 struct tipc_link *link;
2746 struct tipc_node *node;
2747 char *name;
2748 int bearer_id;
2749 int err;
2750
2751 if (!info->attrs[TIPC_NLA_LINK_NAME])
2752 return -EINVAL;
2753
2754 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2755 node = tipc_link_find_owner(name, &bearer_id);
2756 if (!node)
2757 return -EINVAL;
2758
2759 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2760 if (!ans_skb)
2761 return -ENOMEM;
2762
2763 msg.skb = ans_skb;
2764 msg.portid = info->snd_portid;
2765 msg.seq = info->snd_seq;
2766
2767 tipc_node_lock(node);
2768 link = node->links[bearer_id];
2769 if (!link) {
2770 err = -EINVAL;
2771 goto err_out;
2772 }
2773
2774 err = __tipc_nl_add_link(&msg, link);
2775 if (err)
2776 goto err_out;
2777
2778 tipc_node_unlock(node);
2779
2780 return genlmsg_reply(ans_skb, info);
2781
2782 err_out:
2783 tipc_node_unlock(node);
2784 nlmsg_free(ans_skb);
2785
2786 return err;
2787 }
2788
2789 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2790 {
2791 int err;
2792 char *link_name;
2793 unsigned int bearer_id;
2794 struct tipc_link *link;
2795 struct tipc_node *node;
2796 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2797
2798 if (!info->attrs[TIPC_NLA_LINK])
2799 return -EINVAL;
2800
2801 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2802 info->attrs[TIPC_NLA_LINK],
2803 tipc_nl_link_policy);
2804 if (err)
2805 return err;
2806
2807 if (!attrs[TIPC_NLA_LINK_NAME])
2808 return -EINVAL;
2809
2810 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2811
2812 if (strcmp(link_name, tipc_bclink_name) == 0) {
2813 err = tipc_bclink_reset_stats();
2814 if (err)
2815 return err;
2816 return 0;
2817 }
2818
2819 node = tipc_link_find_owner(link_name, &bearer_id);
2820 if (!node)
2821 return -EINVAL;
2822
2823 tipc_node_lock(node);
2824
2825 link = node->links[bearer_id];
2826 if (!link) {
2827 tipc_node_unlock(node);
2828 return -EINVAL;
2829 }
2830
2831 link_reset_statistics(link);
2832
2833 tipc_node_unlock(node);
2834
2835 return 0;
2836 }
This page took 0.08575 seconds and 4 git commands to generate.