tipc: relocate/coalesce node cast in tipc_named_node_up
[deliverable/linux.git] / net / tipc / link.c
CommitLineData
b97bf3fd
PL
1/*
2 * net/tipc/link.c: TIPC link code
c4307285 3 *
05646c91 4 * Copyright (c) 1996-2007, Ericsson AB
23dd4cce 5 * Copyright (c) 2004-2007, 2010-2011, Wind River Systems
b97bf3fd
PL
6 * All rights reserved.
7 *
9ea1fd3c 8 * Redistribution and use in source and binary forms, with or without
b97bf3fd
PL
9 * modification, are permitted provided that the following conditions are met:
10 *
9ea1fd3c
PL
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
b97bf3fd 19 *
9ea1fd3c
PL
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
b97bf3fd
PL
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
b97bf3fd 38#include "link.h"
b97bf3fd 39#include "port.h"
b97bf3fd 40#include "name_distr.h"
b97bf3fd
PL
41#include "discover.h"
42#include "config.h"
b97bf3fd
PL
43
44
a686e685
AS
45/*
46 * Out-of-range value for link session numbers
47 */
48
49#define INVALID_SESSION 0x10000
50
c4307285
YH
51/*
52 * Link state events:
b97bf3fd
PL
53 */
54
55#define STARTING_EVT 856384768 /* link processing trigger */
56#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
57#define TIMEOUT_EVT 560817u /* link timer expired */
58
c4307285
YH
59/*
60 * The following two 'message types' is really just implementation
61 * data conveniently stored in the message header.
b97bf3fd
PL
62 * They must not be considered part of the protocol
63 */
64#define OPEN_MSG 0
65#define CLOSED_MSG 1
66
c4307285 67/*
b97bf3fd
PL
68 * State value stored in 'exp_msg_count'
69 */
70
71#define START_CHANGEOVER 100000u
72
73/**
74 * struct link_name - deconstructed link name
75 * @addr_local: network address of node at this end
76 * @if_local: name of interface at this end
77 * @addr_peer: network address of node at far end
78 * @if_peer: name of interface at far end
79 */
80
81struct link_name {
82 u32 addr_local;
83 char if_local[TIPC_MAX_IF_NAME];
84 u32 addr_peer;
85 char if_peer[TIPC_MAX_IF_NAME];
86};
87
b97bf3fd
PL
88static void link_handle_out_of_seq_msg(struct link *l_ptr,
89 struct sk_buff *buf);
90static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
91static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
92static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
23dd4cce 93static int link_send_sections_long(struct tipc_port *sender,
b97bf3fd 94 struct iovec const *msg_sect,
26896904
AS
95 u32 num_sect, unsigned int total_len,
96 u32 destnode);
b97bf3fd
PL
97static void link_check_defragm_bufs(struct link *l_ptr);
98static void link_state_event(struct link *l_ptr, u32 event);
99static void link_reset_statistics(struct link *l_ptr);
8d64a5ba 100static void link_print(struct link *l_ptr, const char *str);
31e3c3f6 101static void link_start(struct link *l_ptr);
102static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
103
b97bf3fd 104/*
05790c64 105 * Simple link routines
b97bf3fd
PL
106 */
107
05790c64 108static unsigned int align(unsigned int i)
b97bf3fd
PL
109{
110 return (i + 3) & ~3u;
111}
112
05790c64 113static void link_init_max_pkt(struct link *l_ptr)
b97bf3fd
PL
114{
115 u32 max_pkt;
c4307285 116
2d627b92 117 max_pkt = (l_ptr->b_ptr->mtu & ~3);
b97bf3fd
PL
118 if (max_pkt > MAX_MSG_SIZE)
119 max_pkt = MAX_MSG_SIZE;
120
c4307285 121 l_ptr->max_pkt_target = max_pkt;
b97bf3fd
PL
122 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
123 l_ptr->max_pkt = l_ptr->max_pkt_target;
c4307285 124 else
b97bf3fd
PL
125 l_ptr->max_pkt = MAX_PKT_DEFAULT;
126
c4307285 127 l_ptr->max_pkt_probes = 0;
b97bf3fd
PL
128}
129
05790c64 130static u32 link_next_sent(struct link *l_ptr)
b97bf3fd
PL
131{
132 if (l_ptr->next_out)
133 return msg_seqno(buf_msg(l_ptr->next_out));
134 return mod(l_ptr->next_out_no);
135}
136
05790c64 137static u32 link_last_sent(struct link *l_ptr)
b97bf3fd
PL
138{
139 return mod(link_next_sent(l_ptr) - 1);
140}
141
142/*
05790c64 143 * Simple non-static link routines (i.e. referenced outside this file)
b97bf3fd
PL
144 */
145
4323add6 146int tipc_link_is_up(struct link *l_ptr)
b97bf3fd
PL
147{
148 if (!l_ptr)
149 return 0;
a02cec21 150 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
b97bf3fd
PL
151}
152
4323add6 153int tipc_link_is_active(struct link *l_ptr)
b97bf3fd 154{
a02cec21
ED
155 return (l_ptr->owner->active_links[0] == l_ptr) ||
156 (l_ptr->owner->active_links[1] == l_ptr);
b97bf3fd
PL
157}
158
159/**
160 * link_name_validate - validate & (optionally) deconstruct link name
161 * @name - ptr to link name string
162 * @name_parts - ptr to area for link name components (or NULL if not needed)
c4307285 163 *
b97bf3fd
PL
164 * Returns 1 if link name is valid, otherwise 0.
165 */
166
167static int link_name_validate(const char *name, struct link_name *name_parts)
168{
169 char name_copy[TIPC_MAX_LINK_NAME];
170 char *addr_local;
171 char *if_local;
172 char *addr_peer;
173 char *if_peer;
174 char dummy;
175 u32 z_local, c_local, n_local;
176 u32 z_peer, c_peer, n_peer;
177 u32 if_local_len;
178 u32 if_peer_len;
179
180 /* copy link name & ensure length is OK */
181
182 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
183 /* need above in case non-Posix strncpy() doesn't pad with nulls */
184 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
185 if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
186 return 0;
187
188 /* ensure all component parts of link name are present */
189
190 addr_local = name_copy;
2db9983a
AS
191 if_local = strchr(addr_local, ':');
192 if (if_local == NULL)
b97bf3fd
PL
193 return 0;
194 *(if_local++) = 0;
2db9983a
AS
195 addr_peer = strchr(if_local, '-');
196 if (addr_peer == NULL)
b97bf3fd
PL
197 return 0;
198 *(addr_peer++) = 0;
199 if_local_len = addr_peer - if_local;
2db9983a
AS
200 if_peer = strchr(addr_peer, ':');
201 if (if_peer == NULL)
b97bf3fd
PL
202 return 0;
203 *(if_peer++) = 0;
204 if_peer_len = strlen(if_peer) + 1;
205
206 /* validate component parts of link name */
207
208 if ((sscanf(addr_local, "%u.%u.%u%c",
209 &z_local, &c_local, &n_local, &dummy) != 3) ||
210 (sscanf(addr_peer, "%u.%u.%u%c",
211 &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
212 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
213 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
c4307285
YH
214 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
215 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
b97bf3fd
PL
216 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
217 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
218 return 0;
219
220 /* return link name components, if necessary */
221
222 if (name_parts) {
223 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
224 strcpy(name_parts->if_local, if_local);
225 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
226 strcpy(name_parts->if_peer, if_peer);
227 }
228 return 1;
229}
230
231/**
232 * link_timeout - handle expiration of link timer
233 * @l_ptr: pointer to link
c4307285 234 *
4323add6
PL
235 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
236 * with tipc_link_delete(). (There is no risk that the node will be deleted by
237 * another thread because tipc_link_delete() always cancels the link timer before
238 * tipc_node_delete() is called.)
b97bf3fd
PL
239 */
240
241static void link_timeout(struct link *l_ptr)
242{
4323add6 243 tipc_node_lock(l_ptr->owner);
b97bf3fd
PL
244
245 /* update counters used in statistical profiling of send traffic */
246
247 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
248 l_ptr->stats.queue_sz_counts++;
249
b97bf3fd
PL
250 if (l_ptr->first_out) {
251 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
252 u32 length = msg_size(msg);
253
f64f9e71
JP
254 if ((msg_user(msg) == MSG_FRAGMENTER) &&
255 (msg_type(msg) == FIRST_FRAGMENT)) {
b97bf3fd
PL
256 length = msg_size(msg_get_wrapped(msg));
257 }
258 if (length) {
259 l_ptr->stats.msg_lengths_total += length;
260 l_ptr->stats.msg_length_counts++;
261 if (length <= 64)
262 l_ptr->stats.msg_length_profile[0]++;
263 else if (length <= 256)
264 l_ptr->stats.msg_length_profile[1]++;
265 else if (length <= 1024)
266 l_ptr->stats.msg_length_profile[2]++;
267 else if (length <= 4096)
268 l_ptr->stats.msg_length_profile[3]++;
269 else if (length <= 16384)
270 l_ptr->stats.msg_length_profile[4]++;
271 else if (length <= 32768)
272 l_ptr->stats.msg_length_profile[5]++;
273 else
274 l_ptr->stats.msg_length_profile[6]++;
275 }
276 }
277
278 /* do all other link processing performed on a periodic basis */
279
280 link_check_defragm_bufs(l_ptr);
281
282 link_state_event(l_ptr, TIMEOUT_EVT);
283
284 if (l_ptr->next_out)
4323add6 285 tipc_link_push_queue(l_ptr);
b97bf3fd 286
4323add6 287 tipc_node_unlock(l_ptr->owner);
b97bf3fd
PL
288}
289
05790c64 290static void link_set_timer(struct link *l_ptr, u32 time)
b97bf3fd
PL
291{
292 k_start_timer(&l_ptr->timer, time);
293}
294
295/**
4323add6 296 * tipc_link_create - create a new link
37b9c08a 297 * @n_ptr: pointer to associated node
b97bf3fd 298 * @b_ptr: pointer to associated bearer
b97bf3fd 299 * @media_addr: media address to use when sending messages over link
c4307285 300 *
b97bf3fd
PL
301 * Returns pointer to link.
302 */
303
37b9c08a
AS
304struct link *tipc_link_create(struct tipc_node *n_ptr,
305 struct tipc_bearer *b_ptr,
4323add6 306 const struct tipc_media_addr *media_addr)
b97bf3fd
PL
307{
308 struct link *l_ptr;
309 struct tipc_msg *msg;
310 char *if_name;
37b9c08a
AS
311 char addr_string[16];
312 u32 peer = n_ptr->addr;
313
314 if (n_ptr->link_cnt >= 2) {
315 tipc_addr_string_fill(addr_string, n_ptr->addr);
316 err("Attempt to establish third link to %s\n", addr_string);
317 return NULL;
318 }
319
320 if (n_ptr->links[b_ptr->identity]) {
321 tipc_addr_string_fill(addr_string, n_ptr->addr);
322 err("Attempt to establish second link on <%s> to %s\n",
323 b_ptr->name, addr_string);
324 return NULL;
325 }
b97bf3fd 326
0da974f4 327 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
b97bf3fd 328 if (!l_ptr) {
a10bd924 329 warn("Link creation failed, no memory\n");
b97bf3fd
PL
330 return NULL;
331 }
b97bf3fd
PL
332
333 l_ptr->addr = peer;
2d627b92 334 if_name = strchr(b_ptr->name, ':') + 1;
062b4c99 335 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
b97bf3fd 336 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
c4307285 337 tipc_node(tipc_own_addr),
b97bf3fd
PL
338 if_name,
339 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
062b4c99 340 /* note: peer i/f name is updated by reset/activate message */
b97bf3fd 341 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
37b9c08a 342 l_ptr->owner = n_ptr;
b97bf3fd 343 l_ptr->checkpoint = 1;
f882cb76 344 l_ptr->peer_session = INVALID_SESSION;
b97bf3fd
PL
345 l_ptr->b_ptr = b_ptr;
346 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
347 l_ptr->state = RESET_UNKNOWN;
348
349 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
350 msg = l_ptr->pmsg;
c68ca7b7 351 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd 352 msg_set_size(msg, sizeof(l_ptr->proto_msg));
a686e685 353 msg_set_session(msg, (tipc_random & 0xffff));
b97bf3fd
PL
354 msg_set_bearer_id(msg, b_ptr->identity);
355 strcpy((char *)msg_data(msg), if_name);
356
357 l_ptr->priority = b_ptr->priority;
4323add6 358 tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
b97bf3fd
PL
359
360 link_init_max_pkt(l_ptr);
361
362 l_ptr->next_out_no = 1;
363 INIT_LIST_HEAD(&l_ptr->waiting_ports);
364
365 link_reset_statistics(l_ptr);
366
37b9c08a 367 tipc_node_attach_link(n_ptr, l_ptr);
b97bf3fd 368
94571065
FW
369 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
370 list_add_tail(&l_ptr->link_list, &b_ptr->links);
31e3c3f6 371 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
b97bf3fd 372
b97bf3fd
PL
373 return l_ptr;
374}
375
c4307285 376/**
4323add6 377 * tipc_link_delete - delete a link
b97bf3fd 378 * @l_ptr: pointer to link
c4307285 379 *
4323add6 380 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
b97bf3fd 381 * This routine must not grab the node lock until after link timer cancellation
c4307285 382 * to avoid a potential deadlock situation.
b97bf3fd
PL
383 */
384
4323add6 385void tipc_link_delete(struct link *l_ptr)
b97bf3fd
PL
386{
387 if (!l_ptr) {
388 err("Attempt to delete non-existent link\n");
389 return;
390 }
391
b97bf3fd 392 k_cancel_timer(&l_ptr->timer);
c4307285 393
4323add6
PL
394 tipc_node_lock(l_ptr->owner);
395 tipc_link_reset(l_ptr);
396 tipc_node_detach_link(l_ptr->owner, l_ptr);
397 tipc_link_stop(l_ptr);
b97bf3fd 398 list_del_init(&l_ptr->link_list);
4323add6 399 tipc_node_unlock(l_ptr->owner);
b97bf3fd
PL
400 k_term_timer(&l_ptr->timer);
401 kfree(l_ptr);
402}
403
31e3c3f6 404static void link_start(struct link *l_ptr)
b97bf3fd 405{
214dda4a 406 tipc_node_lock(l_ptr->owner);
b97bf3fd 407 link_state_event(l_ptr, STARTING_EVT);
214dda4a 408 tipc_node_unlock(l_ptr->owner);
b97bf3fd
PL
409}
410
411/**
c4307285 412 * link_schedule_port - schedule port for deferred sending
b97bf3fd
PL
413 * @l_ptr: pointer to link
414 * @origport: reference to sending port
415 * @sz: amount of data to be sent
c4307285
YH
416 *
417 * Schedules port for renewed sending of messages after link congestion
b97bf3fd
PL
418 * has abated.
419 */
420
421static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
422{
23dd4cce 423 struct tipc_port *p_ptr;
b97bf3fd 424
4323add6
PL
425 spin_lock_bh(&tipc_port_list_lock);
426 p_ptr = tipc_port_lock(origport);
b97bf3fd
PL
427 if (p_ptr) {
428 if (!p_ptr->wakeup)
429 goto exit;
430 if (!list_empty(&p_ptr->wait_list))
431 goto exit;
23dd4cce 432 p_ptr->congested = 1;
15e979da 433 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
b97bf3fd
PL
434 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
435 l_ptr->stats.link_congs++;
436exit:
4323add6 437 tipc_port_unlock(p_ptr);
b97bf3fd 438 }
4323add6 439 spin_unlock_bh(&tipc_port_list_lock);
b97bf3fd
PL
440 return -ELINKCONG;
441}
442
4323add6 443void tipc_link_wakeup_ports(struct link *l_ptr, int all)
b97bf3fd 444{
23dd4cce
AS
445 struct tipc_port *p_ptr;
446 struct tipc_port *temp_p_ptr;
b97bf3fd
PL
447 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
448
449 if (all)
450 win = 100000;
451 if (win <= 0)
452 return;
4323add6 453 if (!spin_trylock_bh(&tipc_port_list_lock))
b97bf3fd
PL
454 return;
455 if (link_congested(l_ptr))
456 goto exit;
c4307285 457 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
b97bf3fd
PL
458 wait_list) {
459 if (win <= 0)
460 break;
461 list_del_init(&p_ptr->wait_list);
23dd4cce
AS
462 spin_lock_bh(p_ptr->lock);
463 p_ptr->congested = 0;
464 p_ptr->wakeup(p_ptr);
b97bf3fd 465 win -= p_ptr->waiting_pkts;
23dd4cce 466 spin_unlock_bh(p_ptr->lock);
b97bf3fd
PL
467 }
468
469exit:
4323add6 470 spin_unlock_bh(&tipc_port_list_lock);
b97bf3fd
PL
471}
472
c4307285 473/**
b97bf3fd
PL
474 * link_release_outqueue - purge link's outbound message queue
475 * @l_ptr: pointer to link
476 */
477
478static void link_release_outqueue(struct link *l_ptr)
479{
480 struct sk_buff *buf = l_ptr->first_out;
481 struct sk_buff *next;
482
483 while (buf) {
484 next = buf->next;
485 buf_discard(buf);
486 buf = next;
487 }
488 l_ptr->first_out = NULL;
489 l_ptr->out_queue_size = 0;
490}
491
492/**
4323add6 493 * tipc_link_reset_fragments - purge link's inbound message fragments queue
b97bf3fd
PL
494 * @l_ptr: pointer to link
495 */
496
4323add6 497void tipc_link_reset_fragments(struct link *l_ptr)
b97bf3fd
PL
498{
499 struct sk_buff *buf = l_ptr->defragm_buf;
500 struct sk_buff *next;
501
502 while (buf) {
503 next = buf->next;
504 buf_discard(buf);
505 buf = next;
506 }
507 l_ptr->defragm_buf = NULL;
508}
509
c4307285 510/**
4323add6 511 * tipc_link_stop - purge all inbound and outbound messages associated with link
b97bf3fd
PL
512 * @l_ptr: pointer to link
513 */
514
4323add6 515void tipc_link_stop(struct link *l_ptr)
b97bf3fd
PL
516{
517 struct sk_buff *buf;
518 struct sk_buff *next;
519
520 buf = l_ptr->oldest_deferred_in;
521 while (buf) {
522 next = buf->next;
523 buf_discard(buf);
524 buf = next;
525 }
526
527 buf = l_ptr->first_out;
528 while (buf) {
529 next = buf->next;
530 buf_discard(buf);
531 buf = next;
532 }
533
4323add6 534 tipc_link_reset_fragments(l_ptr);
b97bf3fd
PL
535
536 buf_discard(l_ptr->proto_msg_queue);
537 l_ptr->proto_msg_queue = NULL;
538}
539
b97bf3fd 540/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
b97bf3fd
PL
541#define link_send_event(fcn, l_ptr, up) do { } while (0)
542
4323add6 543void tipc_link_reset(struct link *l_ptr)
b97bf3fd
PL
544{
545 struct sk_buff *buf;
546 u32 prev_state = l_ptr->state;
547 u32 checkpoint = l_ptr->next_in_no;
5392d646 548 int was_active_link = tipc_link_is_active(l_ptr);
c4307285 549
a686e685 550 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
b97bf3fd 551
a686e685
AS
552 /* Link is down, accept any session */
553 l_ptr->peer_session = INVALID_SESSION;
b97bf3fd 554
c4307285 555 /* Prepare for max packet size negotiation */
b97bf3fd 556 link_init_max_pkt(l_ptr);
c4307285 557
b97bf3fd 558 l_ptr->state = RESET_UNKNOWN;
b97bf3fd
PL
559
560 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
561 return;
562
4323add6
PL
563 tipc_node_link_down(l_ptr->owner, l_ptr);
564 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
7368ddf1 565
8f19afb2 566 if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
b97bf3fd
PL
567 l_ptr->owner->permit_changeover) {
568 l_ptr->reset_checkpoint = checkpoint;
569 l_ptr->exp_msg_count = START_CHANGEOVER;
570 }
571
572 /* Clean up all queues: */
573
574 link_release_outqueue(l_ptr);
575 buf_discard(l_ptr->proto_msg_queue);
576 l_ptr->proto_msg_queue = NULL;
577 buf = l_ptr->oldest_deferred_in;
578 while (buf) {
579 struct sk_buff *next = buf->next;
580 buf_discard(buf);
581 buf = next;
582 }
583 if (!list_empty(&l_ptr->waiting_ports))
4323add6 584 tipc_link_wakeup_ports(l_ptr, 1);
b97bf3fd
PL
585
586 l_ptr->retransm_queue_head = 0;
587 l_ptr->retransm_queue_size = 0;
588 l_ptr->last_out = NULL;
589 l_ptr->first_out = NULL;
590 l_ptr->next_out = NULL;
591 l_ptr->unacked_window = 0;
592 l_ptr->checkpoint = 1;
593 l_ptr->next_out_no = 1;
594 l_ptr->deferred_inqueue_sz = 0;
595 l_ptr->oldest_deferred_in = NULL;
596 l_ptr->newest_deferred_in = NULL;
597 l_ptr->fsm_msg_cnt = 0;
598 l_ptr->stale_count = 0;
599 link_reset_statistics(l_ptr);
600
4323add6 601 link_send_event(tipc_cfg_link_event, l_ptr, 0);
b97bf3fd 602 if (!in_own_cluster(l_ptr->addr))
4323add6 603 link_send_event(tipc_disc_link_event, l_ptr, 0);
b97bf3fd
PL
604}
605
606
607static void link_activate(struct link *l_ptr)
608{
5392d646 609 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
4323add6
PL
610 tipc_node_link_up(l_ptr->owner, l_ptr);
611 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
612 link_send_event(tipc_cfg_link_event, l_ptr, 1);
b97bf3fd 613 if (!in_own_cluster(l_ptr->addr))
4323add6 614 link_send_event(tipc_disc_link_event, l_ptr, 1);
b97bf3fd
PL
615}
616
617/**
618 * link_state_event - link finite state machine
619 * @l_ptr: pointer to link
620 * @event: state machine event to process
621 */
622
623static void link_state_event(struct link *l_ptr, unsigned event)
624{
c4307285 625 struct link *other;
b97bf3fd
PL
626 u32 cont_intv = l_ptr->continuity_interval;
627
628 if (!l_ptr->started && (event != STARTING_EVT))
629 return; /* Not yet. */
630
631 if (link_blocked(l_ptr)) {
a016892c 632 if (event == TIMEOUT_EVT)
b97bf3fd 633 link_set_timer(l_ptr, cont_intv);
b97bf3fd
PL
634 return; /* Changeover going on */
635 }
b97bf3fd
PL
636
637 switch (l_ptr->state) {
638 case WORKING_WORKING:
b97bf3fd
PL
639 switch (event) {
640 case TRAFFIC_MSG_EVT:
b97bf3fd 641 case ACTIVATE_MSG:
b97bf3fd
PL
642 break;
643 case TIMEOUT_EVT:
b97bf3fd
PL
644 if (l_ptr->next_in_no != l_ptr->checkpoint) {
645 l_ptr->checkpoint = l_ptr->next_in_no;
4323add6 646 if (tipc_bclink_acks_missing(l_ptr->owner)) {
c4307285 647 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 648 0, 0, 0, 0, 0);
b97bf3fd
PL
649 l_ptr->fsm_msg_cnt++;
650 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
c4307285 651 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 652 1, 0, 0, 0, 0);
b97bf3fd
PL
653 l_ptr->fsm_msg_cnt++;
654 }
655 link_set_timer(l_ptr, cont_intv);
656 break;
657 }
b97bf3fd
PL
658 l_ptr->state = WORKING_UNKNOWN;
659 l_ptr->fsm_msg_cnt = 0;
4323add6 660 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
661 l_ptr->fsm_msg_cnt++;
662 link_set_timer(l_ptr, cont_intv / 4);
663 break;
664 case RESET_MSG:
c4307285 665 info("Resetting link <%s>, requested by peer\n",
a10bd924 666 l_ptr->name);
4323add6 667 tipc_link_reset(l_ptr);
b97bf3fd
PL
668 l_ptr->state = RESET_RESET;
669 l_ptr->fsm_msg_cnt = 0;
4323add6 670 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
671 l_ptr->fsm_msg_cnt++;
672 link_set_timer(l_ptr, cont_intv);
673 break;
674 default:
675 err("Unknown link event %u in WW state\n", event);
676 }
677 break;
678 case WORKING_UNKNOWN:
b97bf3fd
PL
679 switch (event) {
680 case TRAFFIC_MSG_EVT:
b97bf3fd 681 case ACTIVATE_MSG:
b97bf3fd
PL
682 l_ptr->state = WORKING_WORKING;
683 l_ptr->fsm_msg_cnt = 0;
684 link_set_timer(l_ptr, cont_intv);
685 break;
686 case RESET_MSG:
a10bd924
AS
687 info("Resetting link <%s>, requested by peer "
688 "while probing\n", l_ptr->name);
4323add6 689 tipc_link_reset(l_ptr);
b97bf3fd
PL
690 l_ptr->state = RESET_RESET;
691 l_ptr->fsm_msg_cnt = 0;
4323add6 692 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
693 l_ptr->fsm_msg_cnt++;
694 link_set_timer(l_ptr, cont_intv);
695 break;
696 case TIMEOUT_EVT:
b97bf3fd 697 if (l_ptr->next_in_no != l_ptr->checkpoint) {
b97bf3fd
PL
698 l_ptr->state = WORKING_WORKING;
699 l_ptr->fsm_msg_cnt = 0;
700 l_ptr->checkpoint = l_ptr->next_in_no;
4323add6
PL
701 if (tipc_bclink_acks_missing(l_ptr->owner)) {
702 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
703 0, 0, 0, 0, 0);
b97bf3fd
PL
704 l_ptr->fsm_msg_cnt++;
705 }
706 link_set_timer(l_ptr, cont_intv);
707 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
c4307285 708 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 709 1, 0, 0, 0, 0);
b97bf3fd
PL
710 l_ptr->fsm_msg_cnt++;
711 link_set_timer(l_ptr, cont_intv / 4);
712 } else { /* Link has failed */
a10bd924
AS
713 warn("Resetting link <%s>, peer not responding\n",
714 l_ptr->name);
4323add6 715 tipc_link_reset(l_ptr);
b97bf3fd
PL
716 l_ptr->state = RESET_UNKNOWN;
717 l_ptr->fsm_msg_cnt = 0;
4323add6
PL
718 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
719 0, 0, 0, 0, 0);
b97bf3fd
PL
720 l_ptr->fsm_msg_cnt++;
721 link_set_timer(l_ptr, cont_intv);
722 }
723 break;
724 default:
725 err("Unknown link event %u in WU state\n", event);
726 }
727 break;
728 case RESET_UNKNOWN:
b97bf3fd
PL
729 switch (event) {
730 case TRAFFIC_MSG_EVT:
b97bf3fd
PL
731 break;
732 case ACTIVATE_MSG:
733 other = l_ptr->owner->active_links[0];
8d64a5ba 734 if (other && link_working_unknown(other))
b97bf3fd 735 break;
b97bf3fd
PL
736 l_ptr->state = WORKING_WORKING;
737 l_ptr->fsm_msg_cnt = 0;
738 link_activate(l_ptr);
4323add6 739 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
740 l_ptr->fsm_msg_cnt++;
741 link_set_timer(l_ptr, cont_intv);
742 break;
743 case RESET_MSG:
b97bf3fd
PL
744 l_ptr->state = RESET_RESET;
745 l_ptr->fsm_msg_cnt = 0;
4323add6 746 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
747 l_ptr->fsm_msg_cnt++;
748 link_set_timer(l_ptr, cont_intv);
749 break;
750 case STARTING_EVT:
b97bf3fd
PL
751 l_ptr->started = 1;
752 /* fall through */
753 case TIMEOUT_EVT:
4323add6 754 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
755 l_ptr->fsm_msg_cnt++;
756 link_set_timer(l_ptr, cont_intv);
757 break;
758 default:
759 err("Unknown link event %u in RU state\n", event);
760 }
761 break;
762 case RESET_RESET:
b97bf3fd
PL
763 switch (event) {
764 case TRAFFIC_MSG_EVT:
b97bf3fd
PL
765 case ACTIVATE_MSG:
766 other = l_ptr->owner->active_links[0];
8d64a5ba 767 if (other && link_working_unknown(other))
b97bf3fd 768 break;
b97bf3fd
PL
769 l_ptr->state = WORKING_WORKING;
770 l_ptr->fsm_msg_cnt = 0;
771 link_activate(l_ptr);
4323add6 772 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
b97bf3fd
PL
773 l_ptr->fsm_msg_cnt++;
774 link_set_timer(l_ptr, cont_intv);
775 break;
776 case RESET_MSG:
b97bf3fd
PL
777 break;
778 case TIMEOUT_EVT:
4323add6 779 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
780 l_ptr->fsm_msg_cnt++;
781 link_set_timer(l_ptr, cont_intv);
b97bf3fd
PL
782 break;
783 default:
784 err("Unknown link event %u in RR state\n", event);
785 }
786 break;
787 default:
788 err("Unknown link state %u/%u\n", l_ptr->state, event);
789 }
790}
791
792/*
793 * link_bundle_buf(): Append contents of a buffer to
c4307285 794 * the tail of an existing one.
b97bf3fd
PL
795 */
796
797static int link_bundle_buf(struct link *l_ptr,
c4307285 798 struct sk_buff *bundler,
b97bf3fd
PL
799 struct sk_buff *buf)
800{
801 struct tipc_msg *bundler_msg = buf_msg(bundler);
802 struct tipc_msg *msg = buf_msg(buf);
803 u32 size = msg_size(msg);
e49060c7
AS
804 u32 bundle_size = msg_size(bundler_msg);
805 u32 to_pos = align(bundle_size);
806 u32 pad = to_pos - bundle_size;
b97bf3fd
PL
807
808 if (msg_user(bundler_msg) != MSG_BUNDLER)
809 return 0;
810 if (msg_type(bundler_msg) != OPEN_MSG)
811 return 0;
e49060c7 812 if (skb_tailroom(bundler) < (pad + size))
b97bf3fd 813 return 0;
15e979da 814 if (l_ptr->max_pkt < (to_pos + size))
863fae66 815 return 0;
b97bf3fd 816
e49060c7 817 skb_put(bundler, pad + size);
27d7ff46 818 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
b97bf3fd
PL
819 msg_set_size(bundler_msg, to_pos + size);
820 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
b97bf3fd
PL
821 buf_discard(buf);
822 l_ptr->stats.sent_bundled++;
823 return 1;
824}
825
05790c64
SR
826static void link_add_to_outqueue(struct link *l_ptr,
827 struct sk_buff *buf,
828 struct tipc_msg *msg)
b97bf3fd
PL
829{
830 u32 ack = mod(l_ptr->next_in_no - 1);
831 u32 seqno = mod(l_ptr->next_out_no++);
832
833 msg_set_word(msg, 2, ((ack << 16) | seqno));
834 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
835 buf->next = NULL;
836 if (l_ptr->first_out) {
837 l_ptr->last_out->next = buf;
838 l_ptr->last_out = buf;
839 } else
840 l_ptr->first_out = l_ptr->last_out = buf;
9bd80b60 841
b97bf3fd 842 l_ptr->out_queue_size++;
9bd80b60
AS
843 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
844 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
b97bf3fd
PL
845}
846
dc63d91e
AS
847static void link_add_chain_to_outqueue(struct link *l_ptr,
848 struct sk_buff *buf_chain,
849 u32 long_msgno)
850{
851 struct sk_buff *buf;
852 struct tipc_msg *msg;
853
854 if (!l_ptr->next_out)
855 l_ptr->next_out = buf_chain;
856 while (buf_chain) {
857 buf = buf_chain;
858 buf_chain = buf_chain->next;
859
860 msg = buf_msg(buf);
861 msg_set_long_msgno(msg, long_msgno);
862 link_add_to_outqueue(l_ptr, buf, msg);
863 }
864}
865
c4307285
YH
866/*
867 * tipc_link_send_buf() is the 'full path' for messages, called from
b97bf3fd
PL
868 * inside TIPC when the 'fast path' in tipc_send_buf
869 * has failed, and from link_send()
870 */
871
4323add6 872int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
b97bf3fd
PL
873{
874 struct tipc_msg *msg = buf_msg(buf);
875 u32 size = msg_size(msg);
876 u32 dsz = msg_data_sz(msg);
877 u32 queue_size = l_ptr->out_queue_size;
c68ca7b7 878 u32 imp = tipc_msg_tot_importance(msg);
b97bf3fd 879 u32 queue_limit = l_ptr->queue_limit[imp];
15e979da 880 u32 max_packet = l_ptr->max_pkt;
b97bf3fd
PL
881
882 msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
883
884 /* Match msg importance against queue limits: */
885
886 if (unlikely(queue_size >= queue_limit)) {
887 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
bebc55ae
AS
888 link_schedule_port(l_ptr, msg_origport(msg), size);
889 buf_discard(buf);
890 return -ELINKCONG;
b97bf3fd 891 }
b97bf3fd
PL
892 buf_discard(buf);
893 if (imp > CONN_MANAGER) {
a10bd924 894 warn("Resetting link <%s>, send queue full", l_ptr->name);
4323add6 895 tipc_link_reset(l_ptr);
b97bf3fd
PL
896 }
897 return dsz;
898 }
899
900 /* Fragmentation needed ? */
901
902 if (size > max_packet)
31e3c3f6 903 return link_send_long_buf(l_ptr, buf);
b97bf3fd
PL
904
905 /* Packet can be queued or sent: */
906
c4307285 907 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
b97bf3fd
PL
908 !link_congested(l_ptr))) {
909 link_add_to_outqueue(l_ptr, buf, msg);
910
4323add6 911 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
b97bf3fd
PL
912 l_ptr->unacked_window = 0;
913 } else {
4323add6 914 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
b97bf3fd
PL
915 l_ptr->stats.bearer_congs++;
916 l_ptr->next_out = buf;
917 }
918 return dsz;
919 }
920 /* Congestion: can message be bundled ?: */
921
922 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
923 (msg_user(msg) != MSG_FRAGMENTER)) {
924
925 /* Try adding message to an existing bundle */
926
c4307285 927 if (l_ptr->next_out &&
b97bf3fd 928 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
4323add6 929 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
b97bf3fd
PL
930 return dsz;
931 }
932
933 /* Try creating a new bundle */
934
935 if (size <= max_packet * 2 / 3) {
31e3c3f6 936 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
b97bf3fd
PL
937 struct tipc_msg bundler_hdr;
938
939 if (bundler) {
c68ca7b7 940 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
75715217 941 INT_H_SIZE, l_ptr->addr);
27d7ff46
ACM
942 skb_copy_to_linear_data(bundler, &bundler_hdr,
943 INT_H_SIZE);
b97bf3fd
PL
944 skb_trim(bundler, INT_H_SIZE);
945 link_bundle_buf(l_ptr, bundler, buf);
946 buf = bundler;
947 msg = buf_msg(buf);
948 l_ptr->stats.sent_bundles++;
949 }
950 }
951 }
952 if (!l_ptr->next_out)
953 l_ptr->next_out = buf;
954 link_add_to_outqueue(l_ptr, buf, msg);
4323add6 955 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
b97bf3fd
PL
956 return dsz;
957}
958
c4307285
YH
959/*
960 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
b97bf3fd
PL
961 * not been selected yet, and the the owner node is not locked
962 * Called by TIPC internal users, e.g. the name distributor
963 */
964
4323add6 965int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
b97bf3fd
PL
966{
967 struct link *l_ptr;
6c00055a 968 struct tipc_node *n_ptr;
b97bf3fd
PL
969 int res = -ELINKCONG;
970
4323add6 971 read_lock_bh(&tipc_net_lock);
51a8e4de 972 n_ptr = tipc_node_find(dest);
b97bf3fd 973 if (n_ptr) {
4323add6 974 tipc_node_lock(n_ptr);
b97bf3fd 975 l_ptr = n_ptr->active_links[selector & 1];
a016892c 976 if (l_ptr)
4323add6 977 res = tipc_link_send_buf(l_ptr, buf);
a016892c 978 else
c33d53b2 979 buf_discard(buf);
4323add6 980 tipc_node_unlock(n_ptr);
b97bf3fd 981 } else {
b97bf3fd
PL
982 buf_discard(buf);
983 }
4323add6 984 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
985 return res;
986}
987
c4307285
YH
988/*
989 * link_send_buf_fast: Entry for data messages where the
b97bf3fd
PL
990 * destination link is known and the header is complete,
991 * inclusive total message length. Very time critical.
992 * Link is locked. Returns user data length.
993 */
994
05790c64
SR
995static int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
996 u32 *used_max_pkt)
b97bf3fd
PL
997{
998 struct tipc_msg *msg = buf_msg(buf);
999 int res = msg_data_sz(msg);
1000
1001 if (likely(!link_congested(l_ptr))) {
15e979da 1002 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
b97bf3fd
PL
1003 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1004 link_add_to_outqueue(l_ptr, buf, msg);
4323add6
PL
1005 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1006 &l_ptr->media_addr))) {
b97bf3fd 1007 l_ptr->unacked_window = 0;
b97bf3fd
PL
1008 return res;
1009 }
4323add6 1010 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
b97bf3fd
PL
1011 l_ptr->stats.bearer_congs++;
1012 l_ptr->next_out = buf;
1013 return res;
1014 }
0e65967e 1015 } else
15e979da 1016 *used_max_pkt = l_ptr->max_pkt;
b97bf3fd 1017 }
4323add6 1018 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
b97bf3fd
PL
1019}
1020
c4307285
YH
1021/*
1022 * tipc_send_buf_fast: Entry for data messages where the
b97bf3fd
PL
1023 * destination node is known and the header is complete,
1024 * inclusive total message length.
1025 * Returns user data length.
1026 */
1027int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1028{
1029 struct link *l_ptr;
6c00055a 1030 struct tipc_node *n_ptr;
b97bf3fd
PL
1031 int res;
1032 u32 selector = msg_origport(buf_msg(buf)) & 1;
1033 u32 dummy;
1034
4323add6 1035 read_lock_bh(&tipc_net_lock);
51a8e4de 1036 n_ptr = tipc_node_find(destnode);
b97bf3fd 1037 if (likely(n_ptr)) {
4323add6 1038 tipc_node_lock(n_ptr);
b97bf3fd 1039 l_ptr = n_ptr->active_links[selector];
b97bf3fd
PL
1040 if (likely(l_ptr)) {
1041 res = link_send_buf_fast(l_ptr, buf, &dummy);
4323add6
PL
1042 tipc_node_unlock(n_ptr);
1043 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1044 return res;
1045 }
4323add6 1046 tipc_node_unlock(n_ptr);
b97bf3fd 1047 }
4323add6 1048 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1049 res = msg_data_sz(buf_msg(buf));
1050 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1051 return res;
1052}
1053
1054
c4307285
YH
1055/*
1056 * tipc_link_send_sections_fast: Entry for messages where the
b97bf3fd 1057 * destination processor is known and the header is complete,
c4307285 1058 * except for total message length.
b97bf3fd
PL
1059 * Returns user data length or errno.
1060 */
23dd4cce 1061int tipc_link_send_sections_fast(struct tipc_port *sender,
4323add6 1062 struct iovec const *msg_sect,
c4307285 1063 const u32 num_sect,
26896904 1064 unsigned int total_len,
4323add6 1065 u32 destaddr)
b97bf3fd 1066{
23dd4cce 1067 struct tipc_msg *hdr = &sender->phdr;
b97bf3fd
PL
1068 struct link *l_ptr;
1069 struct sk_buff *buf;
6c00055a 1070 struct tipc_node *node;
b97bf3fd
PL
1071 int res;
1072 u32 selector = msg_origport(hdr) & 1;
1073
b97bf3fd
PL
1074again:
1075 /*
1076 * Try building message using port's max_pkt hint.
1077 * (Must not hold any locks while building message.)
1078 */
1079
26896904
AS
1080 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1081 sender->max_pkt, !sender->user_port, &buf);
b97bf3fd 1082
4323add6 1083 read_lock_bh(&tipc_net_lock);
51a8e4de 1084 node = tipc_node_find(destaddr);
b97bf3fd 1085 if (likely(node)) {
4323add6 1086 tipc_node_lock(node);
b97bf3fd
PL
1087 l_ptr = node->active_links[selector];
1088 if (likely(l_ptr)) {
1089 if (likely(buf)) {
1090 res = link_send_buf_fast(l_ptr, buf,
23dd4cce 1091 &sender->max_pkt);
b97bf3fd 1092exit:
4323add6
PL
1093 tipc_node_unlock(node);
1094 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1095 return res;
1096 }
1097
1098 /* Exit if build request was invalid */
1099
1100 if (unlikely(res < 0))
1101 goto exit;
1102
1103 /* Exit if link (or bearer) is congested */
1104
c4307285 1105 if (link_congested(l_ptr) ||
b97bf3fd
PL
1106 !list_empty(&l_ptr->b_ptr->cong_links)) {
1107 res = link_schedule_port(l_ptr,
23dd4cce 1108 sender->ref, res);
b97bf3fd
PL
1109 goto exit;
1110 }
1111
c4307285 1112 /*
b97bf3fd
PL
1113 * Message size exceeds max_pkt hint; update hint,
1114 * then re-try fast path or fragment the message
1115 */
1116
23dd4cce 1117 sender->max_pkt = l_ptr->max_pkt;
4323add6
PL
1118 tipc_node_unlock(node);
1119 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1120
1121
23dd4cce 1122 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
b97bf3fd
PL
1123 goto again;
1124
1125 return link_send_sections_long(sender, msg_sect,
26896904
AS
1126 num_sect, total_len,
1127 destaddr);
b97bf3fd 1128 }
4323add6 1129 tipc_node_unlock(node);
b97bf3fd 1130 }
4323add6 1131 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1132
1133 /* Couldn't find a link to the destination node */
1134
1135 if (buf)
1136 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1137 if (res >= 0)
4323add6 1138 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
26896904 1139 total_len, TIPC_ERR_NO_NODE);
b97bf3fd
PL
1140 return res;
1141}
1142
c4307285
YH
1143/*
1144 * link_send_sections_long(): Entry for long messages where the
b97bf3fd 1145 * destination node is known and the header is complete,
c4307285 1146 * inclusive total message length.
b97bf3fd
PL
1147 * Link and bearer congestion status have been checked to be ok,
1148 * and are ignored if they change.
1149 *
1150 * Note that fragments do not use the full link MTU so that they won't have
1151 * to undergo refragmentation if link changeover causes them to be sent
1152 * over another link with an additional tunnel header added as prefix.
1153 * (Refragmentation will still occur if the other link has a smaller MTU.)
1154 *
1155 * Returns user data length or errno.
1156 */
23dd4cce 1157static int link_send_sections_long(struct tipc_port *sender,
b97bf3fd
PL
1158 struct iovec const *msg_sect,
1159 u32 num_sect,
26896904 1160 unsigned int total_len,
b97bf3fd
PL
1161 u32 destaddr)
1162{
1163 struct link *l_ptr;
6c00055a 1164 struct tipc_node *node;
23dd4cce 1165 struct tipc_msg *hdr = &sender->phdr;
26896904 1166 u32 dsz = total_len;
0e65967e 1167 u32 max_pkt, fragm_sz, rest;
b97bf3fd 1168 struct tipc_msg fragm_hdr;
0e65967e
AS
1169 struct sk_buff *buf, *buf_chain, *prev;
1170 u32 fragm_crs, fragm_rest, hsz, sect_rest;
b97bf3fd
PL
1171 const unchar *sect_crs;
1172 int curr_sect;
1173 u32 fragm_no;
1174
1175again:
1176 fragm_no = 1;
23dd4cce 1177 max_pkt = sender->max_pkt - INT_H_SIZE;
b97bf3fd 1178 /* leave room for tunnel header in case of link changeover */
c4307285 1179 fragm_sz = max_pkt - INT_H_SIZE;
b97bf3fd
PL
1180 /* leave room for fragmentation header in each fragment */
1181 rest = dsz;
1182 fragm_crs = 0;
1183 fragm_rest = 0;
1184 sect_rest = 0;
1fc54d8f 1185 sect_crs = NULL;
b97bf3fd
PL
1186 curr_sect = -1;
1187
1188 /* Prepare reusable fragment header: */
1189
c68ca7b7 1190 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
75715217 1191 INT_H_SIZE, msg_destnode(hdr));
b97bf3fd
PL
1192 msg_set_size(&fragm_hdr, max_pkt);
1193 msg_set_fragm_no(&fragm_hdr, 1);
1194
1195 /* Prepare header of first fragment: */
1196
31e3c3f6 1197 buf_chain = buf = tipc_buf_acquire(max_pkt);
b97bf3fd
PL
1198 if (!buf)
1199 return -ENOMEM;
1200 buf->next = NULL;
27d7ff46 1201 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
b97bf3fd 1202 hsz = msg_hdr_sz(hdr);
27d7ff46 1203 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
b97bf3fd
PL
1204
1205 /* Chop up message: */
1206
1207 fragm_crs = INT_H_SIZE + hsz;
1208 fragm_rest = fragm_sz - hsz;
1209
1210 do { /* For all sections */
1211 u32 sz;
1212
1213 if (!sect_rest) {
1214 sect_rest = msg_sect[++curr_sect].iov_len;
1215 sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1216 }
1217
1218 if (sect_rest < fragm_rest)
1219 sz = sect_rest;
1220 else
1221 sz = fragm_rest;
1222
1223 if (likely(!sender->user_port)) {
1224 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1225error:
1226 for (; buf_chain; buf_chain = buf) {
1227 buf = buf_chain->next;
1228 buf_discard(buf_chain);
1229 }
1230 return -EFAULT;
1231 }
1232 } else
27d7ff46
ACM
1233 skb_copy_to_linear_data_offset(buf, fragm_crs,
1234 sect_crs, sz);
b97bf3fd
PL
1235 sect_crs += sz;
1236 sect_rest -= sz;
1237 fragm_crs += sz;
1238 fragm_rest -= sz;
1239 rest -= sz;
1240
1241 if (!fragm_rest && rest) {
1242
1243 /* Initiate new fragment: */
1244 if (rest <= fragm_sz) {
1245 fragm_sz = rest;
0e65967e 1246 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
b97bf3fd
PL
1247 } else {
1248 msg_set_type(&fragm_hdr, FRAGMENT);
1249 }
1250 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1251 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1252 prev = buf;
31e3c3f6 1253 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
b97bf3fd
PL
1254 if (!buf)
1255 goto error;
1256
c4307285 1257 buf->next = NULL;
b97bf3fd 1258 prev->next = buf;
27d7ff46 1259 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
b97bf3fd
PL
1260 fragm_crs = INT_H_SIZE;
1261 fragm_rest = fragm_sz;
b97bf3fd 1262 }
0e65967e 1263 } while (rest > 0);
b97bf3fd 1264
c4307285 1265 /*
b97bf3fd
PL
1266 * Now we have a buffer chain. Select a link and check
1267 * that packet size is still OK
1268 */
51a8e4de 1269 node = tipc_node_find(destaddr);
b97bf3fd 1270 if (likely(node)) {
4323add6 1271 tipc_node_lock(node);
23dd4cce 1272 l_ptr = node->active_links[sender->ref & 1];
b97bf3fd 1273 if (!l_ptr) {
4323add6 1274 tipc_node_unlock(node);
b97bf3fd
PL
1275 goto reject;
1276 }
15e979da 1277 if (l_ptr->max_pkt < max_pkt) {
23dd4cce 1278 sender->max_pkt = l_ptr->max_pkt;
4323add6 1279 tipc_node_unlock(node);
b97bf3fd
PL
1280 for (; buf_chain; buf_chain = buf) {
1281 buf = buf_chain->next;
1282 buf_discard(buf_chain);
1283 }
1284 goto again;
1285 }
1286 } else {
1287reject:
1288 for (; buf_chain; buf_chain = buf) {
1289 buf = buf_chain->next;
1290 buf_discard(buf_chain);
1291 }
4323add6 1292 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
26896904 1293 total_len, TIPC_ERR_NO_NODE);
b97bf3fd
PL
1294 }
1295
dc63d91e 1296 /* Append chain of fragments to send queue & send them */
b97bf3fd 1297
e0f08596 1298 l_ptr->long_msg_seq_no++;
dc63d91e
AS
1299 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1300 l_ptr->stats.sent_fragments += fragm_no;
b97bf3fd 1301 l_ptr->stats.sent_fragmented++;
4323add6
PL
1302 tipc_link_push_queue(l_ptr);
1303 tipc_node_unlock(node);
b97bf3fd
PL
1304 return dsz;
1305}
1306
c4307285 1307/*
4323add6 1308 * tipc_link_push_packet: Push one unsent packet to the media
b97bf3fd 1309 */
4323add6 1310u32 tipc_link_push_packet(struct link *l_ptr)
b97bf3fd
PL
1311{
1312 struct sk_buff *buf = l_ptr->first_out;
1313 u32 r_q_size = l_ptr->retransm_queue_size;
1314 u32 r_q_head = l_ptr->retransm_queue_head;
1315
1316 /* Step to position where retransmission failed, if any, */
1317 /* consider that buffers may have been released in meantime */
1318
1319 if (r_q_size && buf) {
c4307285 1320 u32 last = lesser(mod(r_q_head + r_q_size),
b97bf3fd
PL
1321 link_last_sent(l_ptr));
1322 u32 first = msg_seqno(buf_msg(buf));
1323
1324 while (buf && less(first, r_q_head)) {
1325 first = mod(first + 1);
1326 buf = buf->next;
1327 }
1328 l_ptr->retransm_queue_head = r_q_head = first;
1329 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1330 }
1331
1332 /* Continue retransmission now, if there is anything: */
1333
ca509101 1334 if (r_q_size && buf) {
b97bf3fd 1335 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
c4307285 1336 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
4323add6 1337 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
b97bf3fd
PL
1338 l_ptr->retransm_queue_head = mod(++r_q_head);
1339 l_ptr->retransm_queue_size = --r_q_size;
1340 l_ptr->stats.retransmitted++;
0e35fd5e 1341 return 0;
b97bf3fd
PL
1342 } else {
1343 l_ptr->stats.bearer_congs++;
b97bf3fd
PL
1344 return PUSH_FAILED;
1345 }
1346 }
1347
1348 /* Send deferred protocol message, if any: */
1349
1350 buf = l_ptr->proto_msg_queue;
1351 if (buf) {
1352 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
0e65967e 1353 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
4323add6 1354 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
b97bf3fd
PL
1355 l_ptr->unacked_window = 0;
1356 buf_discard(buf);
1fc54d8f 1357 l_ptr->proto_msg_queue = NULL;
0e35fd5e 1358 return 0;
b97bf3fd 1359 } else {
b97bf3fd
PL
1360 l_ptr->stats.bearer_congs++;
1361 return PUSH_FAILED;
1362 }
1363 }
1364
1365 /* Send one deferred data message, if send window not full: */
1366
1367 buf = l_ptr->next_out;
1368 if (buf) {
1369 struct tipc_msg *msg = buf_msg(buf);
1370 u32 next = msg_seqno(msg);
1371 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1372
1373 if (mod(next - first) < l_ptr->queue_limit[0]) {
1374 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
c4307285 1375 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
4323add6 1376 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
b97bf3fd
PL
1377 if (msg_user(msg) == MSG_BUNDLER)
1378 msg_set_type(msg, CLOSED_MSG);
b97bf3fd 1379 l_ptr->next_out = buf->next;
0e35fd5e 1380 return 0;
b97bf3fd 1381 } else {
b97bf3fd
PL
1382 l_ptr->stats.bearer_congs++;
1383 return PUSH_FAILED;
1384 }
1385 }
1386 }
1387 return PUSH_FINISHED;
1388}
1389
1390/*
1391 * push_queue(): push out the unsent messages of a link where
1392 * congestion has abated. Node is locked
1393 */
4323add6 1394void tipc_link_push_queue(struct link *l_ptr)
b97bf3fd
PL
1395{
1396 u32 res;
1397
4323add6 1398 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
b97bf3fd
PL
1399 return;
1400
1401 do {
4323add6 1402 res = tipc_link_push_packet(l_ptr);
0e35fd5e
AS
1403 } while (!res);
1404
b97bf3fd 1405 if (res == PUSH_FAILED)
4323add6 1406 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
b97bf3fd
PL
1407}
1408
d356eeba
AS
1409static void link_reset_all(unsigned long addr)
1410{
6c00055a 1411 struct tipc_node *n_ptr;
d356eeba
AS
1412 char addr_string[16];
1413 u32 i;
1414
1415 read_lock_bh(&tipc_net_lock);
1416 n_ptr = tipc_node_find((u32)addr);
1417 if (!n_ptr) {
1418 read_unlock_bh(&tipc_net_lock);
1419 return; /* node no longer exists */
1420 }
1421
1422 tipc_node_lock(n_ptr);
1423
c4307285 1424 warn("Resetting all links to %s\n",
c68ca7b7 1425 tipc_addr_string_fill(addr_string, n_ptr->addr));
d356eeba
AS
1426
1427 for (i = 0; i < MAX_BEARERS; i++) {
1428 if (n_ptr->links[i]) {
8d64a5ba 1429 link_print(n_ptr->links[i], "Resetting link\n");
d356eeba
AS
1430 tipc_link_reset(n_ptr->links[i]);
1431 }
1432 }
1433
1434 tipc_node_unlock(n_ptr);
1435 read_unlock_bh(&tipc_net_lock);
1436}
1437
1438static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1439{
1440 struct tipc_msg *msg = buf_msg(buf);
1441
1442 warn("Retransmission failure on link <%s>\n", l_ptr->name);
d356eeba
AS
1443
1444 if (l_ptr->addr) {
1445
1446 /* Handle failure on standard link */
1447
8d64a5ba 1448 link_print(l_ptr, "Resetting link\n");
d356eeba
AS
1449 tipc_link_reset(l_ptr);
1450
1451 } else {
1452
1453 /* Handle failure on broadcast link */
1454
6c00055a 1455 struct tipc_node *n_ptr;
d356eeba
AS
1456 char addr_string[16];
1457
8d64a5ba
AS
1458 info("Msg seq number: %u, ", msg_seqno(msg));
1459 info("Outstanding acks: %lu\n",
1460 (unsigned long) TIPC_SKB_CB(buf)->handle);
617dbeaa 1461
01d83edd 1462 n_ptr = tipc_bclink_retransmit_to();
d356eeba
AS
1463 tipc_node_lock(n_ptr);
1464
c68ca7b7 1465 tipc_addr_string_fill(addr_string, n_ptr->addr);
8d64a5ba
AS
1466 info("Multicast link info for %s\n", addr_string);
1467 info("Supported: %d, ", n_ptr->bclink.supported);
1468 info("Acked: %u\n", n_ptr->bclink.acked);
1469 info("Last in: %u, ", n_ptr->bclink.last_in);
1470 info("Gap after: %u, ", n_ptr->bclink.gap_after);
1471 info("Gap to: %u\n", n_ptr->bclink.gap_to);
1472 info("Nack sync: %u\n\n", n_ptr->bclink.nack_sync);
d356eeba
AS
1473
1474 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1475
1476 tipc_node_unlock(n_ptr);
1477
1478 l_ptr->stale_count = 0;
1479 }
1480}
1481
c4307285 1482void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
4323add6 1483 u32 retransmits)
b97bf3fd
PL
1484{
1485 struct tipc_msg *msg;
1486
d356eeba
AS
1487 if (!buf)
1488 return;
1489
1490 msg = buf_msg(buf);
c4307285 1491
d356eeba 1492 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
ca509101 1493 if (l_ptr->retransm_queue_size == 0) {
d356eeba
AS
1494 l_ptr->retransm_queue_head = msg_seqno(msg);
1495 l_ptr->retransm_queue_size = retransmits;
d356eeba 1496 } else {
ca509101
NH
1497 err("Unexpected retransmit on link %s (qsize=%d)\n",
1498 l_ptr->name, l_ptr->retransm_queue_size);
d356eeba 1499 }
ca509101 1500 return;
d356eeba
AS
1501 } else {
1502 /* Detect repeated retransmit failures on uncongested bearer */
1503
1504 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1505 if (++l_ptr->stale_count > 100) {
1506 link_retransmit_failure(l_ptr, buf);
1507 return;
1508 }
1509 } else {
1510 l_ptr->last_retransmitted = msg_seqno(msg);
1511 l_ptr->stale_count = 1;
1512 }
b97bf3fd 1513 }
d356eeba 1514
ca509101 1515 while (retransmits && (buf != l_ptr->next_out) && buf) {
b97bf3fd
PL
1516 msg = buf_msg(buf);
1517 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
c4307285 1518 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
4323add6 1519 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
b97bf3fd
PL
1520 buf = buf->next;
1521 retransmits--;
1522 l_ptr->stats.retransmitted++;
1523 } else {
4323add6 1524 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
b97bf3fd
PL
1525 l_ptr->stats.bearer_congs++;
1526 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1527 l_ptr->retransm_queue_size = retransmits;
1528 return;
1529 }
1530 }
d356eeba 1531
b97bf3fd
PL
1532 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1533}
1534
c4307285 1535/**
b97bf3fd
PL
1536 * link_insert_deferred_queue - insert deferred messages back into receive chain
1537 */
1538
c4307285 1539static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
b97bf3fd
PL
1540 struct sk_buff *buf)
1541{
1542 u32 seq_no;
1543
1544 if (l_ptr->oldest_deferred_in == NULL)
1545 return buf;
1546
1547 seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1548 if (seq_no == mod(l_ptr->next_in_no)) {
1549 l_ptr->newest_deferred_in->next = buf;
1550 buf = l_ptr->oldest_deferred_in;
1551 l_ptr->oldest_deferred_in = NULL;
1552 l_ptr->deferred_inqueue_sz = 0;
1553 }
1554 return buf;
1555}
1556
85035568
AS
1557/**
1558 * link_recv_buf_validate - validate basic format of received message
1559 *
1560 * This routine ensures a TIPC message has an acceptable header, and at least
1561 * as much data as the header indicates it should. The routine also ensures
1562 * that the entire message header is stored in the main fragment of the message
1563 * buffer, to simplify future access to message header fields.
1564 *
1565 * Note: Having extra info present in the message header or data areas is OK.
1566 * TIPC will ignore the excess, under the assumption that it is optional info
1567 * introduced by a later release of the protocol.
1568 */
1569
1570static int link_recv_buf_validate(struct sk_buff *buf)
1571{
1572 static u32 min_data_hdr_size[8] = {
741d9eb7 1573 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
85035568
AS
1574 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1575 };
1576
1577 struct tipc_msg *msg;
1578 u32 tipc_hdr[2];
1579 u32 size;
1580 u32 hdr_size;
1581 u32 min_hdr_size;
1582
1583 if (unlikely(buf->len < MIN_H_SIZE))
1584 return 0;
1585
1586 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1587 if (msg == NULL)
1588 return 0;
1589
1590 if (unlikely(msg_version(msg) != TIPC_VERSION))
1591 return 0;
1592
1593 size = msg_size(msg);
1594 hdr_size = msg_hdr_sz(msg);
1595 min_hdr_size = msg_isdata(msg) ?
1596 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1597
1598 if (unlikely((hdr_size < min_hdr_size) ||
1599 (size < hdr_size) ||
1600 (buf->len < size) ||
1601 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1602 return 0;
1603
1604 return pskb_may_pull(buf, hdr_size);
1605}
1606
b02b69c8
AS
1607/**
1608 * tipc_recv_msg - process TIPC messages arriving from off-node
1609 * @head: pointer to message buffer chain
1610 * @tb_ptr: pointer to bearer message arrived on
1611 *
1612 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1613 * structure (i.e. cannot be NULL), but bearer can be inactive.
1614 */
1615
2d627b92 1616void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
b97bf3fd 1617{
4323add6 1618 read_lock_bh(&tipc_net_lock);
b97bf3fd 1619 while (head) {
6c00055a 1620 struct tipc_node *n_ptr;
b97bf3fd
PL
1621 struct link *l_ptr;
1622 struct sk_buff *crs;
1623 struct sk_buff *buf = head;
85035568
AS
1624 struct tipc_msg *msg;
1625 u32 seq_no;
1626 u32 ackd;
b97bf3fd
PL
1627 u32 released = 0;
1628 int type;
1629
b97bf3fd 1630 head = head->next;
85035568 1631
b02b69c8
AS
1632 /* Ensure bearer is still enabled */
1633
1634 if (unlikely(!b_ptr->active))
1635 goto cont;
1636
85035568
AS
1637 /* Ensure message is well-formed */
1638
1639 if (unlikely(!link_recv_buf_validate(buf)))
b97bf3fd 1640 goto cont;
b97bf3fd 1641
fe13dda2
AS
1642 /* Ensure message data is a single contiguous unit */
1643
a016892c 1644 if (unlikely(buf_linearize(buf)))
fe13dda2 1645 goto cont;
fe13dda2 1646
85035568
AS
1647 /* Handle arrival of a non-unicast link message */
1648
1649 msg = buf_msg(buf);
1650
b97bf3fd 1651 if (unlikely(msg_non_seq(msg))) {
1265a021
AS
1652 if (msg_user(msg) == LINK_CONFIG)
1653 tipc_disc_recv_msg(buf, b_ptr);
1654 else
1655 tipc_bclink_recv_pkt(buf);
b97bf3fd
PL
1656 continue;
1657 }
c4307285 1658
ed33a9c4
AS
1659 /* Discard unicast link messages destined for another node */
1660
26008247
AS
1661 if (unlikely(!msg_short(msg) &&
1662 (msg_destnode(msg) != tipc_own_addr)))
1663 goto cont;
c4307285 1664
5a68d5ee 1665 /* Locate neighboring node that sent message */
85035568 1666
4323add6 1667 n_ptr = tipc_node_find(msg_prevnode(msg));
b97bf3fd
PL
1668 if (unlikely(!n_ptr))
1669 goto cont;
4323add6 1670 tipc_node_lock(n_ptr);
85035568 1671
b4b56102 1672 /* Locate unicast link endpoint that should handle message */
5a68d5ee 1673
b4b56102
AS
1674 l_ptr = n_ptr->links[b_ptr->identity];
1675 if (unlikely(!l_ptr)) {
5a68d5ee
AS
1676 tipc_node_unlock(n_ptr);
1677 goto cont;
1678 }
1679
b4b56102 1680 /* Verify that communication with node is currently allowed */
5a68d5ee 1681
b4b56102
AS
1682 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1683 msg_user(msg) == LINK_PROTOCOL &&
1684 (msg_type(msg) == RESET_MSG ||
1685 msg_type(msg) == ACTIVATE_MSG) &&
1686 !msg_redundant_link(msg))
1687 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1688
1689 if (n_ptr->block_setup) {
4323add6 1690 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1691 goto cont;
1692 }
85035568
AS
1693
1694 /* Validate message sequence number info */
1695
1696 seq_no = msg_seqno(msg);
1697 ackd = msg_ack(msg);
1698
1699 /* Release acked messages */
1700
b97bf3fd 1701 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
4323add6
PL
1702 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1703 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
b97bf3fd
PL
1704 }
1705
1706 crs = l_ptr->first_out;
c4307285 1707 while ((crs != l_ptr->next_out) &&
b97bf3fd
PL
1708 less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1709 struct sk_buff *next = crs->next;
1710
1711 buf_discard(crs);
1712 crs = next;
1713 released++;
1714 }
1715 if (released) {
1716 l_ptr->first_out = crs;
1717 l_ptr->out_queue_size -= released;
1718 }
85035568
AS
1719
1720 /* Try sending any messages link endpoint has pending */
1721
b97bf3fd 1722 if (unlikely(l_ptr->next_out))
4323add6 1723 tipc_link_push_queue(l_ptr);
b97bf3fd 1724 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
4323add6 1725 tipc_link_wakeup_ports(l_ptr, 0);
b97bf3fd
PL
1726 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1727 l_ptr->stats.sent_acks++;
4323add6 1728 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
1729 }
1730
85035568
AS
1731 /* Now (finally!) process the incoming message */
1732
b97bf3fd
PL
1733protocol_check:
1734 if (likely(link_working_working(l_ptr))) {
1735 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1736 l_ptr->next_in_no++;
1737 if (unlikely(l_ptr->oldest_deferred_in))
1738 head = link_insert_deferred_queue(l_ptr,
1739 head);
1740 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1741deliver:
1742 if (likely(msg_isdata(msg))) {
4323add6
PL
1743 tipc_node_unlock(n_ptr);
1744 tipc_port_recv_msg(buf);
b97bf3fd
PL
1745 continue;
1746 }
1747 switch (msg_user(msg)) {
1748 case MSG_BUNDLER:
1749 l_ptr->stats.recv_bundles++;
c4307285 1750 l_ptr->stats.recv_bundled +=
b97bf3fd 1751 msg_msgcnt(msg);
4323add6
PL
1752 tipc_node_unlock(n_ptr);
1753 tipc_link_recv_bundle(buf);
b97bf3fd 1754 continue;
b97bf3fd 1755 case NAME_DISTRIBUTOR:
4323add6
PL
1756 tipc_node_unlock(n_ptr);
1757 tipc_named_recv(buf);
b97bf3fd
PL
1758 continue;
1759 case CONN_MANAGER:
4323add6
PL
1760 tipc_node_unlock(n_ptr);
1761 tipc_port_recv_proto_msg(buf);
b97bf3fd
PL
1762 continue;
1763 case MSG_FRAGMENTER:
1764 l_ptr->stats.recv_fragments++;
c4307285 1765 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
4323add6 1766 &buf, &msg)) {
b97bf3fd
PL
1767 l_ptr->stats.recv_fragmented++;
1768 goto deliver;
1769 }
1770 break;
1771 case CHANGEOVER_PROTOCOL:
1772 type = msg_type(msg);
4323add6 1773 if (link_recv_changeover_msg(&l_ptr, &buf)) {
b97bf3fd
PL
1774 msg = buf_msg(buf);
1775 seq_no = msg_seqno(msg);
b97bf3fd
PL
1776 if (type == ORIGINAL_MSG)
1777 goto deliver;
1778 goto protocol_check;
1779 }
1780 break;
7945c1fb
AS
1781 default:
1782 buf_discard(buf);
1783 buf = NULL;
1784 break;
b97bf3fd
PL
1785 }
1786 }
4323add6
PL
1787 tipc_node_unlock(n_ptr);
1788 tipc_net_route_msg(buf);
b97bf3fd
PL
1789 continue;
1790 }
1791 link_handle_out_of_seq_msg(l_ptr, buf);
1792 head = link_insert_deferred_queue(l_ptr, head);
4323add6 1793 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1794 continue;
1795 }
1796
1797 if (msg_user(msg) == LINK_PROTOCOL) {
1798 link_recv_proto_msg(l_ptr, buf);
1799 head = link_insert_deferred_queue(l_ptr, head);
4323add6 1800 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1801 continue;
1802 }
b97bf3fd
PL
1803 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1804
1805 if (link_working_working(l_ptr)) {
1806 /* Re-insert in front of queue */
b97bf3fd
PL
1807 buf->next = head;
1808 head = buf;
4323add6 1809 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1810 continue;
1811 }
4323add6 1812 tipc_node_unlock(n_ptr);
b97bf3fd
PL
1813cont:
1814 buf_discard(buf);
1815 }
4323add6 1816 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
1817}
1818
c4307285
YH
1819/*
1820 * link_defer_buf(): Sort a received out-of-sequence packet
b97bf3fd
PL
1821 * into the deferred reception queue.
1822 * Returns the increase of the queue length,i.e. 0 or 1
1823 */
1824
4323add6
PL
1825u32 tipc_link_defer_pkt(struct sk_buff **head,
1826 struct sk_buff **tail,
1827 struct sk_buff *buf)
b97bf3fd 1828{
1fc54d8f 1829 struct sk_buff *prev = NULL;
b97bf3fd
PL
1830 struct sk_buff *crs = *head;
1831 u32 seq_no = msg_seqno(buf_msg(buf));
1832
1833 buf->next = NULL;
1834
1835 /* Empty queue ? */
1836 if (*head == NULL) {
1837 *head = *tail = buf;
1838 return 1;
1839 }
1840
1841 /* Last ? */
1842 if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1843 (*tail)->next = buf;
1844 *tail = buf;
1845 return 1;
1846 }
1847
1848 /* Scan through queue and sort it in */
1849 do {
1850 struct tipc_msg *msg = buf_msg(crs);
1851
1852 if (less(seq_no, msg_seqno(msg))) {
1853 buf->next = crs;
1854 if (prev)
1855 prev->next = buf;
1856 else
c4307285 1857 *head = buf;
b97bf3fd
PL
1858 return 1;
1859 }
a016892c 1860 if (seq_no == msg_seqno(msg))
b97bf3fd 1861 break;
b97bf3fd
PL
1862 prev = crs;
1863 crs = crs->next;
0e65967e 1864 } while (crs);
b97bf3fd
PL
1865
1866 /* Message is a duplicate of an existing message */
1867
1868 buf_discard(buf);
1869 return 0;
1870}
1871
c4307285 1872/**
b97bf3fd
PL
1873 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1874 */
1875
c4307285 1876static void link_handle_out_of_seq_msg(struct link *l_ptr,
b97bf3fd
PL
1877 struct sk_buff *buf)
1878{
1879 u32 seq_no = msg_seqno(buf_msg(buf));
1880
1881 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1882 link_recv_proto_msg(l_ptr, buf);
1883 return;
1884 }
1885
b97bf3fd
PL
1886 /* Record OOS packet arrival (force mismatch on next timeout) */
1887
1888 l_ptr->checkpoint--;
1889
c4307285 1890 /*
b97bf3fd
PL
1891 * Discard packet if a duplicate; otherwise add it to deferred queue
1892 * and notify peer of gap as per protocol specification
1893 */
1894
1895 if (less(seq_no, mod(l_ptr->next_in_no))) {
1896 l_ptr->stats.duplicates++;
1897 buf_discard(buf);
1898 return;
1899 }
1900
4323add6
PL
1901 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1902 &l_ptr->newest_deferred_in, buf)) {
b97bf3fd
PL
1903 l_ptr->deferred_inqueue_sz++;
1904 l_ptr->stats.deferred_recv++;
1905 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
4323add6 1906 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
b97bf3fd
PL
1907 } else
1908 l_ptr->stats.duplicates++;
1909}
1910
1911/*
1912 * Send protocol message to the other endpoint.
1913 */
4323add6
PL
1914void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1915 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
b97bf3fd 1916{
1fc54d8f 1917 struct sk_buff *buf = NULL;
b97bf3fd 1918 struct tipc_msg *msg = l_ptr->pmsg;
c4307285 1919 u32 msg_size = sizeof(l_ptr->proto_msg);
75f0aa49 1920 int r_flag;
b97bf3fd
PL
1921
1922 if (link_blocked(l_ptr))
1923 return;
b4b56102
AS
1924
1925 /* Abort non-RESET send if communication with node is prohibited */
1926
1927 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1928 return;
1929
b97bf3fd
PL
1930 msg_set_type(msg, msg_typ);
1931 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
c4307285 1932 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
4323add6 1933 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
b97bf3fd
PL
1934
1935 if (msg_typ == STATE_MSG) {
1936 u32 next_sent = mod(l_ptr->next_out_no);
1937
4323add6 1938 if (!tipc_link_is_up(l_ptr))
b97bf3fd
PL
1939 return;
1940 if (l_ptr->next_out)
1941 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1942 msg_set_next_sent(msg, next_sent);
1943 if (l_ptr->oldest_deferred_in) {
1944 u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1945 gap = mod(rec - mod(l_ptr->next_in_no));
1946 }
1947 msg_set_seq_gap(msg, gap);
1948 if (gap)
1949 l_ptr->stats.sent_nacks++;
1950 msg_set_link_tolerance(msg, tolerance);
1951 msg_set_linkprio(msg, priority);
1952 msg_set_max_pkt(msg, ack_mtu);
1953 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1954 msg_set_probe(msg, probe_msg != 0);
c4307285 1955 if (probe_msg) {
b97bf3fd
PL
1956 u32 mtu = l_ptr->max_pkt;
1957
c4307285 1958 if ((mtu < l_ptr->max_pkt_target) &&
b97bf3fd
PL
1959 link_working_working(l_ptr) &&
1960 l_ptr->fsm_msg_cnt) {
1961 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
c4307285
YH
1962 if (l_ptr->max_pkt_probes == 10) {
1963 l_ptr->max_pkt_target = (msg_size - 4);
1964 l_ptr->max_pkt_probes = 0;
b97bf3fd 1965 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
c4307285 1966 }
b97bf3fd 1967 l_ptr->max_pkt_probes++;
c4307285 1968 }
b97bf3fd
PL
1969
1970 l_ptr->stats.sent_probes++;
c4307285 1971 }
b97bf3fd
PL
1972 l_ptr->stats.sent_states++;
1973 } else { /* RESET_MSG or ACTIVATE_MSG */
1974 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1975 msg_set_seq_gap(msg, 0);
1976 msg_set_next_sent(msg, 1);
f23d9bf2 1977 msg_set_probe(msg, 0);
b97bf3fd
PL
1978 msg_set_link_tolerance(msg, l_ptr->tolerance);
1979 msg_set_linkprio(msg, l_ptr->priority);
1980 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1981 }
1982
75f0aa49
AS
1983 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1984 msg_set_redundant_link(msg, r_flag);
b97bf3fd
PL
1985 msg_set_linkprio(msg, l_ptr->priority);
1986
1987 /* Ensure sequence number will not fit : */
1988
1989 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1990
1991 /* Congestion? */
1992
4323add6 1993 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
b97bf3fd
PL
1994 if (!l_ptr->proto_msg_queue) {
1995 l_ptr->proto_msg_queue =
31e3c3f6 1996 tipc_buf_acquire(sizeof(l_ptr->proto_msg));
b97bf3fd
PL
1997 }
1998 buf = l_ptr->proto_msg_queue;
1999 if (!buf)
2000 return;
27d7ff46 2001 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
b97bf3fd
PL
2002 return;
2003 }
b97bf3fd
PL
2004
2005 /* Message can be sent */
2006
31e3c3f6 2007 buf = tipc_buf_acquire(msg_size);
b97bf3fd
PL
2008 if (!buf)
2009 return;
2010
27d7ff46 2011 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
c4307285 2012 msg_set_size(buf_msg(buf), msg_size);
b97bf3fd 2013
4323add6 2014 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
b97bf3fd
PL
2015 l_ptr->unacked_window = 0;
2016 buf_discard(buf);
2017 return;
2018 }
2019
2020 /* New congestion */
4323add6 2021 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
b97bf3fd
PL
2022 l_ptr->proto_msg_queue = buf;
2023 l_ptr->stats.bearer_congs++;
2024}
2025
2026/*
2027 * Receive protocol message :
c4307285
YH
2028 * Note that network plane id propagates through the network, and may
2029 * change at any time. The node with lowest address rules
b97bf3fd
PL
2030 */
2031
2032static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2033{
2034 u32 rec_gap = 0;
2035 u32 max_pkt_info;
c4307285 2036 u32 max_pkt_ack;
b97bf3fd
PL
2037 u32 msg_tol;
2038 struct tipc_msg *msg = buf_msg(buf);
2039
b97bf3fd
PL
2040 if (link_blocked(l_ptr))
2041 goto exit;
2042
2043 /* record unnumbered packet arrival (force mismatch on next timeout) */
2044
2045 l_ptr->checkpoint--;
2046
2047 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2048 if (tipc_own_addr > msg_prevnode(msg))
2049 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2050
2051 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2052
2053 switch (msg_type(msg)) {
c4307285 2054
b97bf3fd 2055 case RESET_MSG:
a686e685
AS
2056 if (!link_working_unknown(l_ptr) &&
2057 (l_ptr->peer_session != INVALID_SESSION)) {
641c218d
AS
2058 if (less_eq(msg_session(msg), l_ptr->peer_session))
2059 break; /* duplicate or old reset: ignore */
b97bf3fd 2060 }
b4b56102
AS
2061
2062 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
2063 link_working_unknown(l_ptr))) {
2064 /*
2065 * peer has lost contact -- don't allow peer's links
2066 * to reactivate before we recognize loss & clean up
2067 */
2068 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
2069 }
2070
b97bf3fd
PL
2071 /* fall thru' */
2072 case ACTIVATE_MSG:
2073 /* Update link settings according other endpoint's values */
2074
2075 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2076
2db9983a
AS
2077 msg_tol = msg_link_tolerance(msg);
2078 if (msg_tol > l_ptr->tolerance)
b97bf3fd
PL
2079 link_set_supervision_props(l_ptr, msg_tol);
2080
2081 if (msg_linkprio(msg) > l_ptr->priority)
2082 l_ptr->priority = msg_linkprio(msg);
2083
2084 max_pkt_info = msg_max_pkt(msg);
c4307285 2085 if (max_pkt_info) {
b97bf3fd
PL
2086 if (max_pkt_info < l_ptr->max_pkt_target)
2087 l_ptr->max_pkt_target = max_pkt_info;
2088 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2089 l_ptr->max_pkt = l_ptr->max_pkt_target;
2090 } else {
c4307285 2091 l_ptr->max_pkt = l_ptr->max_pkt_target;
b97bf3fd
PL
2092 }
2093 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2094
2095 link_state_event(l_ptr, msg_type(msg));
2096
2097 l_ptr->peer_session = msg_session(msg);
2098 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2099
2100 /* Synchronize broadcast sequence numbers */
8f19afb2 2101 if (!tipc_node_redundant_links(l_ptr->owner))
b97bf3fd 2102 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
b97bf3fd
PL
2103 break;
2104 case STATE_MSG:
2105
2db9983a
AS
2106 msg_tol = msg_link_tolerance(msg);
2107 if (msg_tol)
b97bf3fd 2108 link_set_supervision_props(l_ptr, msg_tol);
c4307285
YH
2109
2110 if (msg_linkprio(msg) &&
b97bf3fd 2111 (msg_linkprio(msg) != l_ptr->priority)) {
a10bd924 2112 warn("Resetting link <%s>, priority change %u->%u\n",
b97bf3fd
PL
2113 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2114 l_ptr->priority = msg_linkprio(msg);
4323add6 2115 tipc_link_reset(l_ptr); /* Enforce change to take effect */
b97bf3fd
PL
2116 break;
2117 }
2118 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2119 l_ptr->stats.recv_states++;
2120 if (link_reset_unknown(l_ptr))
2121 break;
2122
2123 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
c4307285 2124 rec_gap = mod(msg_next_sent(msg) -
b97bf3fd
PL
2125 mod(l_ptr->next_in_no));
2126 }
2127
2128 max_pkt_ack = msg_max_pkt(msg);
c4307285 2129 if (max_pkt_ack > l_ptr->max_pkt) {
c4307285
YH
2130 l_ptr->max_pkt = max_pkt_ack;
2131 l_ptr->max_pkt_probes = 0;
2132 }
b97bf3fd
PL
2133
2134 max_pkt_ack = 0;
c4307285 2135 if (msg_probe(msg)) {
b97bf3fd 2136 l_ptr->stats.recv_probes++;
a016892c 2137 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
c4307285 2138 max_pkt_ack = msg_size(msg);
c4307285 2139 }
b97bf3fd
PL
2140
2141 /* Protocol message before retransmits, reduce loss risk */
2142
4323add6 2143 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
b97bf3fd
PL
2144
2145 if (rec_gap || (msg_probe(msg))) {
4323add6
PL
2146 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2147 0, rec_gap, 0, 0, max_pkt_ack);
b97bf3fd
PL
2148 }
2149 if (msg_seq_gap(msg)) {
b97bf3fd 2150 l_ptr->stats.recv_nacks++;
4323add6
PL
2151 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2152 msg_seq_gap(msg));
b97bf3fd
PL
2153 }
2154 break;
b97bf3fd
PL
2155 }
2156exit:
2157 buf_discard(buf);
2158}
2159
2160
2161/*
c4307285 2162 * tipc_link_tunnel(): Send one message via a link belonging to
b97bf3fd
PL
2163 * another bearer. Owner node is locked.
2164 */
31e3c3f6 2165static void tipc_link_tunnel(struct link *l_ptr,
2166 struct tipc_msg *tunnel_hdr,
2167 struct tipc_msg *msg,
2168 u32 selector)
b97bf3fd
PL
2169{
2170 struct link *tunnel;
2171 struct sk_buff *buf;
2172 u32 length = msg_size(msg);
2173
2174 tunnel = l_ptr->owner->active_links[selector & 1];
5392d646
AS
2175 if (!tipc_link_is_up(tunnel)) {
2176 warn("Link changeover error, "
2177 "tunnel link no longer available\n");
b97bf3fd 2178 return;
5392d646 2179 }
b97bf3fd 2180 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
31e3c3f6 2181 buf = tipc_buf_acquire(length + INT_H_SIZE);
5392d646
AS
2182 if (!buf) {
2183 warn("Link changeover error, "
2184 "unable to send tunnel msg\n");
b97bf3fd 2185 return;
5392d646 2186 }
27d7ff46
ACM
2187 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2188 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
4323add6 2189 tipc_link_send_buf(tunnel, buf);
b97bf3fd
PL
2190}
2191
2192
2193
2194/*
2195 * changeover(): Send whole message queue via the remaining link
2196 * Owner node is locked.
2197 */
2198
4323add6 2199void tipc_link_changeover(struct link *l_ptr)
b97bf3fd
PL
2200{
2201 u32 msgcount = l_ptr->out_queue_size;
2202 struct sk_buff *crs = l_ptr->first_out;
2203 struct link *tunnel = l_ptr->owner->active_links[0];
b97bf3fd 2204 struct tipc_msg tunnel_hdr;
5392d646 2205 int split_bundles;
b97bf3fd
PL
2206
2207 if (!tunnel)
2208 return;
2209
5392d646
AS
2210 if (!l_ptr->owner->permit_changeover) {
2211 warn("Link changeover error, "
2212 "peer did not permit changeover\n");
b97bf3fd 2213 return;
5392d646 2214 }
b97bf3fd 2215
c68ca7b7 2216 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
75715217 2217 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd
PL
2218 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2219 msg_set_msgcnt(&tunnel_hdr, msgcount);
f131072c 2220
b97bf3fd
PL
2221 if (!l_ptr->first_out) {
2222 struct sk_buff *buf;
2223
31e3c3f6 2224 buf = tipc_buf_acquire(INT_H_SIZE);
b97bf3fd 2225 if (buf) {
27d7ff46 2226 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
b97bf3fd 2227 msg_set_size(&tunnel_hdr, INT_H_SIZE);
4323add6 2228 tipc_link_send_buf(tunnel, buf);
b97bf3fd 2229 } else {
a10bd924
AS
2230 warn("Link changeover error, "
2231 "unable to send changeover msg\n");
b97bf3fd
PL
2232 }
2233 return;
2234 }
f131072c 2235
c4307285 2236 split_bundles = (l_ptr->owner->active_links[0] !=
5392d646
AS
2237 l_ptr->owner->active_links[1]);
2238
b97bf3fd
PL
2239 while (crs) {
2240 struct tipc_msg *msg = buf_msg(crs);
2241
2242 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
b97bf3fd 2243 struct tipc_msg *m = msg_get_wrapped(msg);
0e65967e 2244 unchar *pos = (unchar *)m;
b97bf3fd 2245
d788d805 2246 msgcount = msg_msgcnt(msg);
b97bf3fd 2247 while (msgcount--) {
0e65967e 2248 msg_set_seqno(m, msg_seqno(msg));
4323add6
PL
2249 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2250 msg_link_selector(m));
b97bf3fd
PL
2251 pos += align(msg_size(m));
2252 m = (struct tipc_msg *)pos;
2253 }
2254 } else {
4323add6
PL
2255 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2256 msg_link_selector(msg));
b97bf3fd
PL
2257 }
2258 crs = crs->next;
2259 }
2260}
2261
4323add6 2262void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
b97bf3fd
PL
2263{
2264 struct sk_buff *iter;
2265 struct tipc_msg tunnel_hdr;
2266
c68ca7b7 2267 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
75715217 2268 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
b97bf3fd
PL
2269 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2270 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2271 iter = l_ptr->first_out;
2272 while (iter) {
2273 struct sk_buff *outbuf;
2274 struct tipc_msg *msg = buf_msg(iter);
2275 u32 length = msg_size(msg);
2276
2277 if (msg_user(msg) == MSG_BUNDLER)
2278 msg_set_type(msg, CLOSED_MSG);
2279 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
c4307285 2280 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
b97bf3fd 2281 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
31e3c3f6 2282 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
b97bf3fd 2283 if (outbuf == NULL) {
a10bd924
AS
2284 warn("Link changeover error, "
2285 "unable to send duplicate msg\n");
b97bf3fd
PL
2286 return;
2287 }
27d7ff46
ACM
2288 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2289 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2290 length);
4323add6
PL
2291 tipc_link_send_buf(tunnel, outbuf);
2292 if (!tipc_link_is_up(l_ptr))
b97bf3fd
PL
2293 return;
2294 iter = iter->next;
2295 }
2296}
2297
2298
2299
2300/**
2301 * buf_extract - extracts embedded TIPC message from another message
2302 * @skb: encapsulating message buffer
2303 * @from_pos: offset to extract from
2304 *
c4307285 2305 * Returns a new message buffer containing an embedded message. The
b97bf3fd
PL
2306 * encapsulating message itself is left unchanged.
2307 */
2308
2309static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2310{
2311 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2312 u32 size = msg_size(msg);
2313 struct sk_buff *eb;
2314
31e3c3f6 2315 eb = tipc_buf_acquire(size);
b97bf3fd 2316 if (eb)
27d7ff46 2317 skb_copy_to_linear_data(eb, msg, size);
b97bf3fd
PL
2318 return eb;
2319}
2320
c4307285 2321/*
b97bf3fd
PL
2322 * link_recv_changeover_msg(): Receive tunneled packet sent
2323 * via other link. Node is locked. Return extracted buffer.
2324 */
2325
2326static int link_recv_changeover_msg(struct link **l_ptr,
2327 struct sk_buff **buf)
2328{
2329 struct sk_buff *tunnel_buf = *buf;
2330 struct link *dest_link;
2331 struct tipc_msg *msg;
2332 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2333 u32 msg_typ = msg_type(tunnel_msg);
2334 u32 msg_count = msg_msgcnt(tunnel_msg);
2335
2336 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
b29f1428 2337 if (!dest_link)
b97bf3fd 2338 goto exit;
f131072c 2339 if (dest_link == *l_ptr) {
c4307285 2340 err("Unexpected changeover message on link <%s>\n",
f131072c
AS
2341 (*l_ptr)->name);
2342 goto exit;
2343 }
b97bf3fd
PL
2344 *l_ptr = dest_link;
2345 msg = msg_get_wrapped(tunnel_msg);
2346
2347 if (msg_typ == DUPLICATE_MSG) {
b29f1428 2348 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
b97bf3fd 2349 goto exit;
0e65967e 2350 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
b97bf3fd 2351 if (*buf == NULL) {
a10bd924 2352 warn("Link changeover error, duplicate msg dropped\n");
b97bf3fd
PL
2353 goto exit;
2354 }
b97bf3fd
PL
2355 buf_discard(tunnel_buf);
2356 return 1;
2357 }
2358
2359 /* First original message ?: */
2360
4323add6 2361 if (tipc_link_is_up(dest_link)) {
a10bd924
AS
2362 info("Resetting link <%s>, changeover initiated by peer\n",
2363 dest_link->name);
4323add6 2364 tipc_link_reset(dest_link);
b97bf3fd
PL
2365 dest_link->exp_msg_count = msg_count;
2366 if (!msg_count)
2367 goto exit;
2368 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
b97bf3fd
PL
2369 dest_link->exp_msg_count = msg_count;
2370 if (!msg_count)
2371 goto exit;
2372 }
2373
2374 /* Receive original message */
2375
2376 if (dest_link->exp_msg_count == 0) {
5392d646
AS
2377 warn("Link switchover error, "
2378 "got too many tunnelled messages\n");
b97bf3fd
PL
2379 goto exit;
2380 }
2381 dest_link->exp_msg_count--;
2382 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
b97bf3fd
PL
2383 goto exit;
2384 } else {
2385 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2386 if (*buf != NULL) {
b97bf3fd
PL
2387 buf_discard(tunnel_buf);
2388 return 1;
2389 } else {
a10bd924 2390 warn("Link changeover error, original msg dropped\n");
b97bf3fd
PL
2391 }
2392 }
2393exit:
1fc54d8f 2394 *buf = NULL;
b97bf3fd
PL
2395 buf_discard(tunnel_buf);
2396 return 0;
2397}
2398
2399/*
2400 * Bundler functionality:
2401 */
4323add6 2402void tipc_link_recv_bundle(struct sk_buff *buf)
b97bf3fd
PL
2403{
2404 u32 msgcount = msg_msgcnt(buf_msg(buf));
2405 u32 pos = INT_H_SIZE;
2406 struct sk_buff *obuf;
2407
b97bf3fd
PL
2408 while (msgcount--) {
2409 obuf = buf_extract(buf, pos);
2410 if (obuf == NULL) {
a10bd924
AS
2411 warn("Link unable to unbundle message(s)\n");
2412 break;
3ff50b79 2413 }
b97bf3fd 2414 pos += align(msg_size(buf_msg(obuf)));
4323add6 2415 tipc_net_route_msg(obuf);
b97bf3fd
PL
2416 }
2417 buf_discard(buf);
2418}
2419
2420/*
2421 * Fragmentation/defragmentation:
2422 */
2423
2424
c4307285 2425/*
31e3c3f6 2426 * link_send_long_buf: Entry for buffers needing fragmentation.
c4307285 2427 * The buffer is complete, inclusive total message length.
b97bf3fd
PL
2428 * Returns user data length.
2429 */
31e3c3f6 2430static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
b97bf3fd 2431{
77561557
AS
2432 struct sk_buff *buf_chain = NULL;
2433 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
b97bf3fd
PL
2434 struct tipc_msg *inmsg = buf_msg(buf);
2435 struct tipc_msg fragm_hdr;
2436 u32 insize = msg_size(inmsg);
2437 u32 dsz = msg_data_sz(inmsg);
2438 unchar *crs = buf->data;
2439 u32 rest = insize;
15e979da 2440 u32 pack_sz = l_ptr->max_pkt;
b97bf3fd 2441 u32 fragm_sz = pack_sz - INT_H_SIZE;
77561557 2442 u32 fragm_no = 0;
9c396a7b 2443 u32 destaddr;
b97bf3fd
PL
2444
2445 if (msg_short(inmsg))
2446 destaddr = l_ptr->addr;
9c396a7b
AS
2447 else
2448 destaddr = msg_destnode(inmsg);
b97bf3fd 2449
b97bf3fd
PL
2450 /* Prepare reusable fragment header: */
2451
c68ca7b7 2452 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
75715217 2453 INT_H_SIZE, destaddr);
b97bf3fd
PL
2454
2455 /* Chop up message: */
2456
2457 while (rest > 0) {
2458 struct sk_buff *fragm;
2459
2460 if (rest <= fragm_sz) {
2461 fragm_sz = rest;
2462 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2463 }
31e3c3f6 2464 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
b97bf3fd 2465 if (fragm == NULL) {
77561557
AS
2466 buf_discard(buf);
2467 while (buf_chain) {
2468 buf = buf_chain;
2469 buf_chain = buf_chain->next;
2470 buf_discard(buf);
2471 }
2472 return -ENOMEM;
b97bf3fd
PL
2473 }
2474 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
77561557
AS
2475 fragm_no++;
2476 msg_set_fragm_no(&fragm_hdr, fragm_no);
27d7ff46
ACM
2477 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2478 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2479 fragm_sz);
77561557
AS
2480 buf_chain_tail->next = fragm;
2481 buf_chain_tail = fragm;
b97bf3fd 2482
b97bf3fd
PL
2483 rest -= fragm_sz;
2484 crs += fragm_sz;
2485 msg_set_type(&fragm_hdr, FRAGMENT);
2486 }
b97bf3fd 2487 buf_discard(buf);
77561557
AS
2488
2489 /* Append chain of fragments to send queue & send them */
2490
2491 l_ptr->long_msg_seq_no++;
2492 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2493 l_ptr->stats.sent_fragments += fragm_no;
2494 l_ptr->stats.sent_fragmented++;
2495 tipc_link_push_queue(l_ptr);
2496
b97bf3fd
PL
2497 return dsz;
2498}
2499
c4307285
YH
2500/*
2501 * A pending message being re-assembled must store certain values
2502 * to handle subsequent fragments correctly. The following functions
b97bf3fd 2503 * help storing these values in unused, available fields in the
25985edc 2504 * pending message. This makes dynamic memory allocation unnecessary.
b97bf3fd
PL
2505 */
2506
05790c64 2507static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
b97bf3fd
PL
2508{
2509 msg_set_seqno(buf_msg(buf), seqno);
2510}
2511
05790c64 2512static u32 get_fragm_size(struct sk_buff *buf)
b97bf3fd
PL
2513{
2514 return msg_ack(buf_msg(buf));
2515}
2516
05790c64 2517static void set_fragm_size(struct sk_buff *buf, u32 sz)
b97bf3fd
PL
2518{
2519 msg_set_ack(buf_msg(buf), sz);
2520}
2521
05790c64 2522static u32 get_expected_frags(struct sk_buff *buf)
b97bf3fd
PL
2523{
2524 return msg_bcast_ack(buf_msg(buf));
2525}
2526
05790c64 2527static void set_expected_frags(struct sk_buff *buf, u32 exp)
b97bf3fd
PL
2528{
2529 msg_set_bcast_ack(buf_msg(buf), exp);
2530}
2531
05790c64 2532static u32 get_timer_cnt(struct sk_buff *buf)
b97bf3fd
PL
2533{
2534 return msg_reroute_cnt(buf_msg(buf));
2535}
2536
05790c64 2537static void incr_timer_cnt(struct sk_buff *buf)
b97bf3fd
PL
2538{
2539 msg_incr_reroute_cnt(buf_msg(buf));
2540}
2541
c4307285
YH
2542/*
2543 * tipc_link_recv_fragment(): Called with node lock on. Returns
b97bf3fd
PL
2544 * the reassembled buffer if message is complete.
2545 */
c4307285 2546int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
4323add6 2547 struct tipc_msg **m)
b97bf3fd 2548{
1fc54d8f 2549 struct sk_buff *prev = NULL;
b97bf3fd
PL
2550 struct sk_buff *fbuf = *fb;
2551 struct tipc_msg *fragm = buf_msg(fbuf);
2552 struct sk_buff *pbuf = *pending;
2553 u32 long_msg_seq_no = msg_long_msgno(fragm);
2554
1fc54d8f 2555 *fb = NULL;
b97bf3fd
PL
2556
2557 /* Is there an incomplete message waiting for this fragment? */
2558
f64f9e71
JP
2559 while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no) ||
2560 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
b97bf3fd
PL
2561 prev = pbuf;
2562 pbuf = pbuf->next;
2563 }
2564
2565 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2566 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2567 u32 msg_sz = msg_size(imsg);
2568 u32 fragm_sz = msg_data_sz(fragm);
2569 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
741d9eb7 2570 u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
b97bf3fd
PL
2571 if (msg_type(imsg) == TIPC_MCAST_MSG)
2572 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2573 if (msg_size(imsg) > max) {
b97bf3fd
PL
2574 buf_discard(fbuf);
2575 return 0;
2576 }
31e3c3f6 2577 pbuf = tipc_buf_acquire(msg_size(imsg));
b97bf3fd
PL
2578 if (pbuf != NULL) {
2579 pbuf->next = *pending;
2580 *pending = pbuf;
27d7ff46
ACM
2581 skb_copy_to_linear_data(pbuf, imsg,
2582 msg_data_sz(fragm));
b97bf3fd
PL
2583 /* Prepare buffer for subsequent fragments. */
2584
c4307285 2585 set_long_msg_seqno(pbuf, long_msg_seq_no);
0e65967e
AS
2586 set_fragm_size(pbuf, fragm_sz);
2587 set_expected_frags(pbuf, exp_fragm_cnt - 1);
b97bf3fd 2588 } else {
a10bd924 2589 warn("Link unable to reassemble fragmented message\n");
b97bf3fd
PL
2590 }
2591 buf_discard(fbuf);
2592 return 0;
2593 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2594 u32 dsz = msg_data_sz(fragm);
2595 u32 fsz = get_fragm_size(pbuf);
2596 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2597 u32 exp_frags = get_expected_frags(pbuf) - 1;
27d7ff46
ACM
2598 skb_copy_to_linear_data_offset(pbuf, crs,
2599 msg_data(fragm), dsz);
b97bf3fd
PL
2600 buf_discard(fbuf);
2601
2602 /* Is message complete? */
2603
2604 if (exp_frags == 0) {
2605 if (prev)
2606 prev->next = pbuf->next;
2607 else
2608 *pending = pbuf->next;
2609 msg_reset_reroute_cnt(buf_msg(pbuf));
2610 *fb = pbuf;
2611 *m = buf_msg(pbuf);
2612 return 1;
2613 }
0e65967e 2614 set_expected_frags(pbuf, exp_frags);
b97bf3fd
PL
2615 return 0;
2616 }
b97bf3fd
PL
2617 buf_discard(fbuf);
2618 return 0;
2619}
2620
2621/**
2622 * link_check_defragm_bufs - flush stale incoming message fragments
2623 * @l_ptr: pointer to link
2624 */
2625
2626static void link_check_defragm_bufs(struct link *l_ptr)
2627{
1fc54d8f
SR
2628 struct sk_buff *prev = NULL;
2629 struct sk_buff *next = NULL;
b97bf3fd
PL
2630 struct sk_buff *buf = l_ptr->defragm_buf;
2631
2632 if (!buf)
2633 return;
2634 if (!link_working_working(l_ptr))
2635 return;
2636 while (buf) {
2637 u32 cnt = get_timer_cnt(buf);
2638
2639 next = buf->next;
2640 if (cnt < 4) {
2641 incr_timer_cnt(buf);
2642 prev = buf;
2643 } else {
b97bf3fd
PL
2644 if (prev)
2645 prev->next = buf->next;
2646 else
2647 l_ptr->defragm_buf = buf->next;
2648 buf_discard(buf);
2649 }
2650 buf = next;
2651 }
2652}
2653
2654
2655
2656static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2657{
5413b4c6
AS
2658 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2659 return;
2660
b97bf3fd
PL
2661 l_ptr->tolerance = tolerance;
2662 l_ptr->continuity_interval =
2663 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2664 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2665}
2666
2667
4323add6 2668void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
b97bf3fd
PL
2669{
2670 /* Data messages from this node, inclusive FIRST_FRAGM */
06d82c91
AS
2671 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2672 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2673 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2674 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
b97bf3fd 2675 /* Transiting data messages,inclusive FIRST_FRAGM */
06d82c91
AS
2676 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2677 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2678 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2679 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
b97bf3fd 2680 l_ptr->queue_limit[CONN_MANAGER] = 1200;
b97bf3fd
PL
2681 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2682 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2683 /* FRAGMENT and LAST_FRAGMENT packets */
2684 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2685}
2686
2687/**
2688 * link_find_link - locate link by name
2689 * @name - ptr to link name string
2690 * @node - ptr to area to be filled with ptr to associated node
c4307285 2691 *
4323add6 2692 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
b97bf3fd 2693 * this also prevents link deletion.
c4307285 2694 *
b97bf3fd
PL
2695 * Returns pointer to link (or 0 if invalid link name).
2696 */
2697
6c00055a 2698static struct link *link_find_link(const char *name, struct tipc_node **node)
b97bf3fd
PL
2699{
2700 struct link_name link_name_parts;
2d627b92 2701 struct tipc_bearer *b_ptr;
c4307285 2702 struct link *l_ptr;
b97bf3fd
PL
2703
2704 if (!link_name_validate(name, &link_name_parts))
1fc54d8f 2705 return NULL;
b97bf3fd 2706
4323add6 2707 b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
b97bf3fd 2708 if (!b_ptr)
1fc54d8f 2709 return NULL;
b97bf3fd 2710
c4307285 2711 *node = tipc_node_find(link_name_parts.addr_peer);
b97bf3fd 2712 if (!*node)
1fc54d8f 2713 return NULL;
b97bf3fd
PL
2714
2715 l_ptr = (*node)->links[b_ptr->identity];
2716 if (!l_ptr || strcmp(l_ptr->name, name))
1fc54d8f 2717 return NULL;
b97bf3fd
PL
2718
2719 return l_ptr;
2720}
2721
c4307285 2722struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
4323add6 2723 u16 cmd)
b97bf3fd
PL
2724{
2725 struct tipc_link_config *args;
c4307285 2726 u32 new_value;
b97bf3fd 2727 struct link *l_ptr;
6c00055a 2728 struct tipc_node *node;
c4307285 2729 int res;
b97bf3fd
PL
2730
2731 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
4323add6 2732 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd
PL
2733
2734 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2735 new_value = ntohl(args->value);
2736
4323add6 2737 if (!strcmp(args->name, tipc_bclink_name)) {
b97bf3fd 2738 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
4323add6
PL
2739 (tipc_bclink_set_queue_limits(new_value) == 0))
2740 return tipc_cfg_reply_none();
c4307285 2741 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
4323add6 2742 " (cannot change setting on broadcast link)");
b97bf3fd
PL
2743 }
2744
4323add6 2745 read_lock_bh(&tipc_net_lock);
c4307285 2746 l_ptr = link_find_link(args->name, &node);
b97bf3fd 2747 if (!l_ptr) {
4323add6 2748 read_unlock_bh(&tipc_net_lock);
c4307285 2749 return tipc_cfg_reply_error_string("link not found");
b97bf3fd
PL
2750 }
2751
4323add6 2752 tipc_node_lock(node);
b97bf3fd
PL
2753 res = -EINVAL;
2754 switch (cmd) {
c4307285
YH
2755 case TIPC_CMD_SET_LINK_TOL:
2756 if ((new_value >= TIPC_MIN_LINK_TOL) &&
b97bf3fd
PL
2757 (new_value <= TIPC_MAX_LINK_TOL)) {
2758 link_set_supervision_props(l_ptr, new_value);
c4307285 2759 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 2760 0, 0, new_value, 0, 0);
0e35fd5e 2761 res = 0;
b97bf3fd
PL
2762 }
2763 break;
c4307285 2764 case TIPC_CMD_SET_LINK_PRI:
16cb4b33
PL
2765 if ((new_value >= TIPC_MIN_LINK_PRI) &&
2766 (new_value <= TIPC_MAX_LINK_PRI)) {
b97bf3fd 2767 l_ptr->priority = new_value;
c4307285 2768 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
4323add6 2769 0, 0, 0, new_value, 0);
0e35fd5e 2770 res = 0;
b97bf3fd
PL
2771 }
2772 break;
c4307285
YH
2773 case TIPC_CMD_SET_LINK_WINDOW:
2774 if ((new_value >= TIPC_MIN_LINK_WIN) &&
b97bf3fd 2775 (new_value <= TIPC_MAX_LINK_WIN)) {
4323add6 2776 tipc_link_set_queue_limits(l_ptr, new_value);
0e35fd5e 2777 res = 0;
b97bf3fd
PL
2778 }
2779 break;
2780 }
4323add6 2781 tipc_node_unlock(node);
b97bf3fd 2782
4323add6 2783 read_unlock_bh(&tipc_net_lock);
b97bf3fd 2784 if (res)
c4307285 2785 return tipc_cfg_reply_error_string("cannot change link setting");
b97bf3fd 2786
4323add6 2787 return tipc_cfg_reply_none();
b97bf3fd
PL
2788}
2789
2790/**
2791 * link_reset_statistics - reset link statistics
2792 * @l_ptr: pointer to link
2793 */
2794
2795static void link_reset_statistics(struct link *l_ptr)
2796{
2797 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2798 l_ptr->stats.sent_info = l_ptr->next_out_no;
2799 l_ptr->stats.recv_info = l_ptr->next_in_no;
2800}
2801
4323add6 2802struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
b97bf3fd
PL
2803{
2804 char *link_name;
c4307285 2805 struct link *l_ptr;
6c00055a 2806 struct tipc_node *node;
b97bf3fd
PL
2807
2808 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
4323add6 2809 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd
PL
2810
2811 link_name = (char *)TLV_DATA(req_tlv_area);
4323add6
PL
2812 if (!strcmp(link_name, tipc_bclink_name)) {
2813 if (tipc_bclink_reset_stats())
2814 return tipc_cfg_reply_error_string("link not found");
2815 return tipc_cfg_reply_none();
b97bf3fd
PL
2816 }
2817
4323add6 2818 read_lock_bh(&tipc_net_lock);
c4307285 2819 l_ptr = link_find_link(link_name, &node);
b97bf3fd 2820 if (!l_ptr) {
4323add6
PL
2821 read_unlock_bh(&tipc_net_lock);
2822 return tipc_cfg_reply_error_string("link not found");
b97bf3fd
PL
2823 }
2824
4323add6 2825 tipc_node_lock(node);
b97bf3fd 2826 link_reset_statistics(l_ptr);
4323add6
PL
2827 tipc_node_unlock(node);
2828 read_unlock_bh(&tipc_net_lock);
2829 return tipc_cfg_reply_none();
b97bf3fd
PL
2830}
2831
2832/**
2833 * percent - convert count to a percentage of total (rounding up or down)
2834 */
2835
2836static u32 percent(u32 count, u32 total)
2837{
2838 return (count * 100 + (total / 2)) / total;
2839}
2840
2841/**
4323add6 2842 * tipc_link_stats - print link statistics
b97bf3fd
PL
2843 * @name: link name
2844 * @buf: print buffer area
2845 * @buf_size: size of print buffer area
c4307285 2846 *
b97bf3fd
PL
2847 * Returns length of print buffer data string (or 0 if error)
2848 */
2849
4323add6 2850static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
b97bf3fd
PL
2851{
2852 struct print_buf pb;
c4307285 2853 struct link *l_ptr;
6c00055a 2854 struct tipc_node *node;
b97bf3fd
PL
2855 char *status;
2856 u32 profile_total = 0;
2857
4323add6
PL
2858 if (!strcmp(name, tipc_bclink_name))
2859 return tipc_bclink_stats(buf, buf_size);
b97bf3fd 2860
4323add6 2861 tipc_printbuf_init(&pb, buf, buf_size);
b97bf3fd 2862
4323add6 2863 read_lock_bh(&tipc_net_lock);
c4307285 2864 l_ptr = link_find_link(name, &node);
b97bf3fd 2865 if (!l_ptr) {
4323add6 2866 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
2867 return 0;
2868 }
4323add6 2869 tipc_node_lock(node);
b97bf3fd 2870
4323add6 2871 if (tipc_link_is_active(l_ptr))
b97bf3fd 2872 status = "ACTIVE";
4323add6 2873 else if (tipc_link_is_up(l_ptr))
b97bf3fd
PL
2874 status = "STANDBY";
2875 else
2876 status = "DEFUNCT";
2877 tipc_printf(&pb, "Link <%s>\n"
c4307285
YH
2878 " %s MTU:%u Priority:%u Tolerance:%u ms"
2879 " Window:%u packets\n",
15e979da 2880 l_ptr->name, status, l_ptr->max_pkt,
b97bf3fd 2881 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
c4307285 2882 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
b97bf3fd
PL
2883 l_ptr->next_in_no - l_ptr->stats.recv_info,
2884 l_ptr->stats.recv_fragments,
2885 l_ptr->stats.recv_fragmented,
2886 l_ptr->stats.recv_bundles,
2887 l_ptr->stats.recv_bundled);
c4307285 2888 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
b97bf3fd
PL
2889 l_ptr->next_out_no - l_ptr->stats.sent_info,
2890 l_ptr->stats.sent_fragments,
c4307285 2891 l_ptr->stats.sent_fragmented,
b97bf3fd
PL
2892 l_ptr->stats.sent_bundles,
2893 l_ptr->stats.sent_bundled);
2894 profile_total = l_ptr->stats.msg_length_counts;
2895 if (!profile_total)
2896 profile_total = 1;
2897 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
c4307285 2898 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
0f305bf4 2899 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
b97bf3fd
PL
2900 l_ptr->stats.msg_length_counts,
2901 l_ptr->stats.msg_lengths_total / profile_total,
2902 percent(l_ptr->stats.msg_length_profile[0], profile_total),
2903 percent(l_ptr->stats.msg_length_profile[1], profile_total),
2904 percent(l_ptr->stats.msg_length_profile[2], profile_total),
2905 percent(l_ptr->stats.msg_length_profile[3], profile_total),
2906 percent(l_ptr->stats.msg_length_profile[4], profile_total),
2907 percent(l_ptr->stats.msg_length_profile[5], profile_total),
2908 percent(l_ptr->stats.msg_length_profile[6], profile_total));
c4307285 2909 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
b97bf3fd
PL
2910 l_ptr->stats.recv_states,
2911 l_ptr->stats.recv_probes,
2912 l_ptr->stats.recv_nacks,
c4307285 2913 l_ptr->stats.deferred_recv,
b97bf3fd 2914 l_ptr->stats.duplicates);
c4307285
YH
2915 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2916 l_ptr->stats.sent_states,
2917 l_ptr->stats.sent_probes,
2918 l_ptr->stats.sent_nacks,
2919 l_ptr->stats.sent_acks,
b97bf3fd
PL
2920 l_ptr->stats.retransmitted);
2921 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
2922 l_ptr->stats.bearer_congs,
c4307285 2923 l_ptr->stats.link_congs,
b97bf3fd
PL
2924 l_ptr->stats.max_queue_sz,
2925 l_ptr->stats.queue_sz_counts
2926 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2927 : 0);
2928
4323add6
PL
2929 tipc_node_unlock(node);
2930 read_unlock_bh(&tipc_net_lock);
2931 return tipc_printbuf_validate(&pb);
b97bf3fd
PL
2932}
2933
2934#define MAX_LINK_STATS_INFO 2000
2935
4323add6 2936struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
b97bf3fd
PL
2937{
2938 struct sk_buff *buf;
2939 struct tlv_desc *rep_tlv;
2940 int str_len;
2941
2942 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
4323add6 2943 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
b97bf3fd 2944
4323add6 2945 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
b97bf3fd
PL
2946 if (!buf)
2947 return NULL;
2948
2949 rep_tlv = (struct tlv_desc *)buf->data;
2950
4323add6
PL
2951 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2952 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
b97bf3fd
PL
2953 if (!str_len) {
2954 buf_discard(buf);
c4307285 2955 return tipc_cfg_reply_error_string("link not found");
b97bf3fd
PL
2956 }
2957
2958 skb_put(buf, TLV_SPACE(str_len));
2959 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2960
2961 return buf;
2962}
2963
b97bf3fd 2964/**
4323add6 2965 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
b97bf3fd
PL
2966 * @dest: network address of destination node
2967 * @selector: used to select from set of active links
c4307285 2968 *
b97bf3fd
PL
2969 * If no active link can be found, uses default maximum packet size.
2970 */
2971
4323add6 2972u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
b97bf3fd 2973{
6c00055a 2974 struct tipc_node *n_ptr;
b97bf3fd
PL
2975 struct link *l_ptr;
2976 u32 res = MAX_PKT_DEFAULT;
c4307285 2977
b97bf3fd
PL
2978 if (dest == tipc_own_addr)
2979 return MAX_MSG_SIZE;
2980
c4307285 2981 read_lock_bh(&tipc_net_lock);
51a8e4de 2982 n_ptr = tipc_node_find(dest);
b97bf3fd 2983 if (n_ptr) {
4323add6 2984 tipc_node_lock(n_ptr);
b97bf3fd
PL
2985 l_ptr = n_ptr->active_links[selector & 1];
2986 if (l_ptr)
15e979da 2987 res = l_ptr->max_pkt;
4323add6 2988 tipc_node_unlock(n_ptr);
b97bf3fd 2989 }
c4307285 2990 read_unlock_bh(&tipc_net_lock);
b97bf3fd
PL
2991 return res;
2992}
2993
8d64a5ba 2994static void link_print(struct link *l_ptr, const char *str)
b97bf3fd 2995{
8d64a5ba
AS
2996 char print_area[256];
2997 struct print_buf pb;
2998 struct print_buf *buf = &pb;
2999
3000 tipc_printbuf_init(buf, print_area, sizeof(print_area));
3001
b97bf3fd 3002 tipc_printf(buf, str);
b97bf3fd 3003 tipc_printf(buf, "Link %x<%s>:",
2d627b92 3004 l_ptr->addr, l_ptr->b_ptr->name);
8d64a5ba
AS
3005
3006#ifdef CONFIG_TIPC_DEBUG
3007 if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3008 goto print_state;
3009
b97bf3fd
PL
3010 tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3011 tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3012 tipc_printf(buf, "SQUE");
3013 if (l_ptr->first_out) {
3014 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3015 if (l_ptr->next_out)
3016 tipc_printf(buf, "%u..",
3017 msg_seqno(buf_msg(l_ptr->next_out)));
b82834e6 3018 tipc_printf(buf, "%u]", msg_seqno(buf_msg(l_ptr->last_out)));
c4307285
YH
3019 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3020 msg_seqno(buf_msg(l_ptr->first_out)))
f64f9e71
JP
3021 != (l_ptr->out_queue_size - 1)) ||
3022 (l_ptr->last_out->next != NULL)) {
b97bf3fd 3023 tipc_printf(buf, "\nSend queue inconsistency\n");
c8a61b52
AS
3024 tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
3025 tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
3026 tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
b97bf3fd
PL
3027 }
3028 } else
3029 tipc_printf(buf, "[]");
3030 tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3031 if (l_ptr->oldest_deferred_in) {
3032 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3033 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3034 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3035 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3036 tipc_printf(buf, ":RQSIZ(%u)",
3037 l_ptr->deferred_inqueue_sz);
3038 }
3039 }
8d64a5ba
AS
3040print_state:
3041#endif
3042
b97bf3fd
PL
3043 if (link_working_unknown(l_ptr))
3044 tipc_printf(buf, ":WU");
8d64a5ba 3045 else if (link_reset_reset(l_ptr))
b97bf3fd 3046 tipc_printf(buf, ":RR");
8d64a5ba 3047 else if (link_reset_unknown(l_ptr))
b97bf3fd 3048 tipc_printf(buf, ":RU");
8d64a5ba 3049 else if (link_working_working(l_ptr))
b97bf3fd
PL
3050 tipc_printf(buf, ":WW");
3051 tipc_printf(buf, "\n");
8d64a5ba
AS
3052
3053 tipc_printbuf_validate(buf);
3054 info("%s", print_area);
b97bf3fd
PL
3055}
3056
This page took 0.729212 seconds and 5 git commands to generate.