sctp: Make the association hashtable handle multiple network namespaces
[deliverable/linux.git] / net / sctp / associola.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP association.
11 *
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
28 *
29 * Please send any bug reports or fixes you make to the
30 * email address(es):
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 *
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
35 *
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Jon Grimm <jgrimm@us.ibm.com>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Daisy Chang <daisyc@us.ibm.com>
44 * Ryan Layer <rmlayer@us.ibm.com>
45 * Kevin Gao <kevin.gao@intel.com>
46 *
47 * Any bugs reported given to us we will try to fix... any fixes shared will
48 * be incorporated into the next SCTP release.
49 */
50
51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
53 #include <linux/types.h>
54 #include <linux/fcntl.h>
55 #include <linux/poll.h>
56 #include <linux/init.h>
57
58 #include <linux/slab.h>
59 #include <linux/in.h>
60 #include <net/ipv6.h>
61 #include <net/sctp/sctp.h>
62 #include <net/sctp/sm.h>
63
64 /* Forward declarations for internal functions. */
65 static void sctp_assoc_bh_rcv(struct work_struct *work);
66 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
68
69 /* Keep track of the new idr low so that we don't re-use association id
70 * numbers too fast. It is protected by they idr spin lock is in the
71 * range of 1 - INT_MAX.
72 */
73 static u32 idr_low = 1;
74
75
76 /* 1st Level Abstractions. */
77
78 /* Initialize a new association from provided memory. */
79 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
80 const struct sctp_endpoint *ep,
81 const struct sock *sk,
82 sctp_scope_t scope,
83 gfp_t gfp)
84 {
85 struct sctp_sock *sp;
86 int i;
87 sctp_paramhdr_t *p;
88 int err;
89
90 /* Retrieve the SCTP per socket area. */
91 sp = sctp_sk((struct sock *)sk);
92
93 /* Discarding const is appropriate here. */
94 asoc->ep = (struct sctp_endpoint *)ep;
95 sctp_endpoint_hold(asoc->ep);
96
97 /* Hold the sock. */
98 asoc->base.sk = (struct sock *)sk;
99 sock_hold(asoc->base.sk);
100
101 /* Initialize the common base substructure. */
102 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
103
104 /* Initialize the object handling fields. */
105 atomic_set(&asoc->base.refcnt, 1);
106 asoc->base.dead = 0;
107 asoc->base.malloced = 0;
108
109 /* Initialize the bind addr area. */
110 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
111
112 asoc->state = SCTP_STATE_CLOSED;
113
114 /* Set these values from the socket values, a conversion between
115 * millsecons to seconds/microseconds must also be done.
116 */
117 asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
118 asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
119 * 1000;
120 asoc->frag_point = 0;
121 asoc->user_frag = sp->user_frag;
122
123 /* Set the association max_retrans and RTO values from the
124 * socket values.
125 */
126 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
127 asoc->pf_retrans = sctp_pf_retrans;
128
129 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
130 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
131 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
132
133 asoc->overall_error_count = 0;
134
135 /* Initialize the association's heartbeat interval based on the
136 * sock configured value.
137 */
138 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
139
140 /* Initialize path max retrans value. */
141 asoc->pathmaxrxt = sp->pathmaxrxt;
142
143 /* Initialize default path MTU. */
144 asoc->pathmtu = sp->pathmtu;
145
146 /* Set association default SACK delay */
147 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
148 asoc->sackfreq = sp->sackfreq;
149
150 /* Set the association default flags controlling
151 * Heartbeat, SACK delay, and Path MTU Discovery.
152 */
153 asoc->param_flags = sp->param_flags;
154
155 /* Initialize the maximum mumber of new data packets that can be sent
156 * in a burst.
157 */
158 asoc->max_burst = sp->max_burst;
159
160 /* initialize association timers */
161 asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
162 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
163 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
164 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
165 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
166 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
167
168 /* sctpimpguide Section 2.12.2
169 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
170 * recommended value of 5 times 'RTO.Max'.
171 */
172 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
173 = 5 * asoc->rto_max;
174
175 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
176 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
177 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
178 min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
179
180 /* Initializes the timers */
181 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
182 setup_timer(&asoc->timers[i], sctp_timer_events[i],
183 (unsigned long)asoc);
184
185 /* Pull default initialization values from the sock options.
186 * Note: This assumes that the values have already been
187 * validated in the sock.
188 */
189 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
190 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
191 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
192
193 asoc->max_init_timeo =
194 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
195
196 /* Allocate storage for the ssnmap after the inbound and outbound
197 * streams have been negotiated during Init.
198 */
199 asoc->ssnmap = NULL;
200
201 /* Set the local window size for receive.
202 * This is also the rcvbuf space per association.
203 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
204 * 1500 bytes in one SCTP packet.
205 */
206 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
207 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
208 else
209 asoc->rwnd = sk->sk_rcvbuf/2;
210
211 asoc->a_rwnd = asoc->rwnd;
212
213 asoc->rwnd_over = 0;
214 asoc->rwnd_press = 0;
215
216 /* Use my own max window until I learn something better. */
217 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
218
219 /* Set the sndbuf size for transmit. */
220 asoc->sndbuf_used = 0;
221
222 /* Initialize the receive memory counter */
223 atomic_set(&asoc->rmem_alloc, 0);
224
225 init_waitqueue_head(&asoc->wait);
226
227 asoc->c.my_vtag = sctp_generate_tag(ep);
228 asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */
229 asoc->c.peer_vtag = 0;
230 asoc->c.my_ttag = 0;
231 asoc->c.peer_ttag = 0;
232 asoc->c.my_port = ep->base.bind_addr.port;
233
234 asoc->c.initial_tsn = sctp_generate_tsn(ep);
235
236 asoc->next_tsn = asoc->c.initial_tsn;
237
238 asoc->ctsn_ack_point = asoc->next_tsn - 1;
239 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
240 asoc->highest_sacked = asoc->ctsn_ack_point;
241 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
242 asoc->unack_data = 0;
243
244 /* ADDIP Section 4.1 Asconf Chunk Procedures
245 *
246 * When an endpoint has an ASCONF signaled change to be sent to the
247 * remote endpoint it should do the following:
248 * ...
249 * A2) a serial number should be assigned to the chunk. The serial
250 * number SHOULD be a monotonically increasing number. The serial
251 * numbers SHOULD be initialized at the start of the
252 * association to the same value as the initial TSN.
253 */
254 asoc->addip_serial = asoc->c.initial_tsn;
255
256 INIT_LIST_HEAD(&asoc->addip_chunk_list);
257 INIT_LIST_HEAD(&asoc->asconf_ack_list);
258
259 /* Make an empty list of remote transport addresses. */
260 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
261 asoc->peer.transport_count = 0;
262
263 /* RFC 2960 5.1 Normal Establishment of an Association
264 *
265 * After the reception of the first data chunk in an
266 * association the endpoint must immediately respond with a
267 * sack to acknowledge the data chunk. Subsequent
268 * acknowledgements should be done as described in Section
269 * 6.2.
270 *
271 * [We implement this by telling a new association that it
272 * already received one packet.]
273 */
274 asoc->peer.sack_needed = 1;
275 asoc->peer.sack_cnt = 0;
276 asoc->peer.sack_generation = 1;
277
278 /* Assume that the peer will tell us if he recognizes ASCONF
279 * as part of INIT exchange.
280 * The sctp_addip_noauth option is there for backward compatibilty
281 * and will revert old behavior.
282 */
283 asoc->peer.asconf_capable = 0;
284 if (sctp_addip_noauth)
285 asoc->peer.asconf_capable = 1;
286 asoc->asconf_addr_del_pending = NULL;
287 asoc->src_out_of_asoc_ok = 0;
288 asoc->new_transport = NULL;
289
290 /* Create an input queue. */
291 sctp_inq_init(&asoc->base.inqueue);
292 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
293
294 /* Create an output queue. */
295 sctp_outq_init(asoc, &asoc->outqueue);
296
297 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
298 goto fail_init;
299
300 memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
301
302 asoc->need_ecne = 0;
303
304 asoc->assoc_id = 0;
305
306 /* Assume that peer would support both address types unless we are
307 * told otherwise.
308 */
309 asoc->peer.ipv4_address = 1;
310 if (asoc->base.sk->sk_family == PF_INET6)
311 asoc->peer.ipv6_address = 1;
312 INIT_LIST_HEAD(&asoc->asocs);
313
314 asoc->autoclose = sp->autoclose;
315
316 asoc->default_stream = sp->default_stream;
317 asoc->default_ppid = sp->default_ppid;
318 asoc->default_flags = sp->default_flags;
319 asoc->default_context = sp->default_context;
320 asoc->default_timetolive = sp->default_timetolive;
321 asoc->default_rcv_context = sp->default_rcv_context;
322
323 /* AUTH related initializations */
324 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
325 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
326 if (err)
327 goto fail_init;
328
329 asoc->active_key_id = ep->active_key_id;
330 asoc->asoc_shared_key = NULL;
331
332 asoc->default_hmac_id = 0;
333 /* Save the hmacs and chunks list into this association */
334 if (ep->auth_hmacs_list)
335 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
336 ntohs(ep->auth_hmacs_list->param_hdr.length));
337 if (ep->auth_chunk_list)
338 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
339 ntohs(ep->auth_chunk_list->param_hdr.length));
340
341 /* Get the AUTH random number for this association */
342 p = (sctp_paramhdr_t *)asoc->c.auth_random;
343 p->type = SCTP_PARAM_RANDOM;
344 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
345 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
346
347 return asoc;
348
349 fail_init:
350 sctp_endpoint_put(asoc->ep);
351 sock_put(asoc->base.sk);
352 return NULL;
353 }
354
355 /* Allocate and initialize a new association */
356 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
357 const struct sock *sk,
358 sctp_scope_t scope,
359 gfp_t gfp)
360 {
361 struct sctp_association *asoc;
362
363 asoc = t_new(struct sctp_association, gfp);
364 if (!asoc)
365 goto fail;
366
367 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
368 goto fail_init;
369
370 asoc->base.malloced = 1;
371 SCTP_DBG_OBJCNT_INC(assoc);
372 SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
373
374 return asoc;
375
376 fail_init:
377 kfree(asoc);
378 fail:
379 return NULL;
380 }
381
382 /* Free this association if possible. There may still be users, so
383 * the actual deallocation may be delayed.
384 */
385 void sctp_association_free(struct sctp_association *asoc)
386 {
387 struct sock *sk = asoc->base.sk;
388 struct sctp_transport *transport;
389 struct list_head *pos, *temp;
390 int i;
391
392 /* Only real associations count against the endpoint, so
393 * don't bother for if this is a temporary association.
394 */
395 if (!asoc->temp) {
396 list_del(&asoc->asocs);
397
398 /* Decrement the backlog value for a TCP-style listening
399 * socket.
400 */
401 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
402 sk->sk_ack_backlog--;
403 }
404
405 /* Mark as dead, so other users can know this structure is
406 * going away.
407 */
408 asoc->base.dead = 1;
409
410 /* Dispose of any data lying around in the outqueue. */
411 sctp_outq_free(&asoc->outqueue);
412
413 /* Dispose of any pending messages for the upper layer. */
414 sctp_ulpq_free(&asoc->ulpq);
415
416 /* Dispose of any pending chunks on the inqueue. */
417 sctp_inq_free(&asoc->base.inqueue);
418
419 sctp_tsnmap_free(&asoc->peer.tsn_map);
420
421 /* Free ssnmap storage. */
422 sctp_ssnmap_free(asoc->ssnmap);
423
424 /* Clean up the bound address list. */
425 sctp_bind_addr_free(&asoc->base.bind_addr);
426
427 /* Do we need to go through all of our timers and
428 * delete them? To be safe we will try to delete all, but we
429 * should be able to go through and make a guess based
430 * on our state.
431 */
432 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
433 if (timer_pending(&asoc->timers[i]) &&
434 del_timer(&asoc->timers[i]))
435 sctp_association_put(asoc);
436 }
437
438 /* Free peer's cached cookie. */
439 kfree(asoc->peer.cookie);
440 kfree(asoc->peer.peer_random);
441 kfree(asoc->peer.peer_chunks);
442 kfree(asoc->peer.peer_hmacs);
443
444 /* Release the transport structures. */
445 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
446 transport = list_entry(pos, struct sctp_transport, transports);
447 list_del(pos);
448 sctp_transport_free(transport);
449 }
450
451 asoc->peer.transport_count = 0;
452
453 sctp_asconf_queue_teardown(asoc);
454
455 /* Free pending address space being deleted */
456 if (asoc->asconf_addr_del_pending != NULL)
457 kfree(asoc->asconf_addr_del_pending);
458
459 /* AUTH - Free the endpoint shared keys */
460 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
461
462 /* AUTH - Free the association shared key */
463 sctp_auth_key_put(asoc->asoc_shared_key);
464
465 sctp_association_put(asoc);
466 }
467
468 /* Cleanup and free up an association. */
469 static void sctp_association_destroy(struct sctp_association *asoc)
470 {
471 SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
472
473 sctp_endpoint_put(asoc->ep);
474 sock_put(asoc->base.sk);
475
476 if (asoc->assoc_id != 0) {
477 spin_lock_bh(&sctp_assocs_id_lock);
478 idr_remove(&sctp_assocs_id, asoc->assoc_id);
479 spin_unlock_bh(&sctp_assocs_id_lock);
480 }
481
482 WARN_ON(atomic_read(&asoc->rmem_alloc));
483
484 if (asoc->base.malloced) {
485 kfree(asoc);
486 SCTP_DBG_OBJCNT_DEC(assoc);
487 }
488 }
489
490 /* Change the primary destination address for the peer. */
491 void sctp_assoc_set_primary(struct sctp_association *asoc,
492 struct sctp_transport *transport)
493 {
494 int changeover = 0;
495
496 /* it's a changeover only if we already have a primary path
497 * that we are changing
498 */
499 if (asoc->peer.primary_path != NULL &&
500 asoc->peer.primary_path != transport)
501 changeover = 1 ;
502
503 asoc->peer.primary_path = transport;
504
505 /* Set a default msg_name for events. */
506 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
507 sizeof(union sctp_addr));
508
509 /* If the primary path is changing, assume that the
510 * user wants to use this new path.
511 */
512 if ((transport->state == SCTP_ACTIVE) ||
513 (transport->state == SCTP_UNKNOWN))
514 asoc->peer.active_path = transport;
515
516 /*
517 * SFR-CACC algorithm:
518 * Upon the receipt of a request to change the primary
519 * destination address, on the data structure for the new
520 * primary destination, the sender MUST do the following:
521 *
522 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
523 * to this destination address earlier. The sender MUST set
524 * CYCLING_CHANGEOVER to indicate that this switch is a
525 * double switch to the same destination address.
526 *
527 * Really, only bother is we have data queued or outstanding on
528 * the association.
529 */
530 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
531 return;
532
533 if (transport->cacc.changeover_active)
534 transport->cacc.cycling_changeover = changeover;
535
536 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
537 * a changeover has occurred.
538 */
539 transport->cacc.changeover_active = changeover;
540
541 /* 3) The sender MUST store the next TSN to be sent in
542 * next_tsn_at_change.
543 */
544 transport->cacc.next_tsn_at_change = asoc->next_tsn;
545 }
546
547 /* Remove a transport from an association. */
548 void sctp_assoc_rm_peer(struct sctp_association *asoc,
549 struct sctp_transport *peer)
550 {
551 struct list_head *pos;
552 struct sctp_transport *transport;
553
554 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
555 " port: %d\n",
556 asoc,
557 (&peer->ipaddr),
558 ntohs(peer->ipaddr.v4.sin_port));
559
560 /* If we are to remove the current retran_path, update it
561 * to the next peer before removing this peer from the list.
562 */
563 if (asoc->peer.retran_path == peer)
564 sctp_assoc_update_retran_path(asoc);
565
566 /* Remove this peer from the list. */
567 list_del(&peer->transports);
568
569 /* Get the first transport of asoc. */
570 pos = asoc->peer.transport_addr_list.next;
571 transport = list_entry(pos, struct sctp_transport, transports);
572
573 /* Update any entries that match the peer to be deleted. */
574 if (asoc->peer.primary_path == peer)
575 sctp_assoc_set_primary(asoc, transport);
576 if (asoc->peer.active_path == peer)
577 asoc->peer.active_path = transport;
578 if (asoc->peer.retran_path == peer)
579 asoc->peer.retran_path = transport;
580 if (asoc->peer.last_data_from == peer)
581 asoc->peer.last_data_from = transport;
582
583 /* If we remove the transport an INIT was last sent to, set it to
584 * NULL. Combined with the update of the retran path above, this
585 * will cause the next INIT to be sent to the next available
586 * transport, maintaining the cycle.
587 */
588 if (asoc->init_last_sent_to == peer)
589 asoc->init_last_sent_to = NULL;
590
591 /* If we remove the transport an SHUTDOWN was last sent to, set it
592 * to NULL. Combined with the update of the retran path above, this
593 * will cause the next SHUTDOWN to be sent to the next available
594 * transport, maintaining the cycle.
595 */
596 if (asoc->shutdown_last_sent_to == peer)
597 asoc->shutdown_last_sent_to = NULL;
598
599 /* If we remove the transport an ASCONF was last sent to, set it to
600 * NULL.
601 */
602 if (asoc->addip_last_asconf &&
603 asoc->addip_last_asconf->transport == peer)
604 asoc->addip_last_asconf->transport = NULL;
605
606 /* If we have something on the transmitted list, we have to
607 * save it off. The best place is the active path.
608 */
609 if (!list_empty(&peer->transmitted)) {
610 struct sctp_transport *active = asoc->peer.active_path;
611 struct sctp_chunk *ch;
612
613 /* Reset the transport of each chunk on this list */
614 list_for_each_entry(ch, &peer->transmitted,
615 transmitted_list) {
616 ch->transport = NULL;
617 ch->rtt_in_progress = 0;
618 }
619
620 list_splice_tail_init(&peer->transmitted,
621 &active->transmitted);
622
623 /* Start a T3 timer here in case it wasn't running so
624 * that these migrated packets have a chance to get
625 * retrnasmitted.
626 */
627 if (!timer_pending(&active->T3_rtx_timer))
628 if (!mod_timer(&active->T3_rtx_timer,
629 jiffies + active->rto))
630 sctp_transport_hold(active);
631 }
632
633 asoc->peer.transport_count--;
634
635 sctp_transport_free(peer);
636 }
637
638 /* Add a transport address to an association. */
639 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
640 const union sctp_addr *addr,
641 const gfp_t gfp,
642 const int peer_state)
643 {
644 struct sctp_transport *peer;
645 struct sctp_sock *sp;
646 unsigned short port;
647
648 sp = sctp_sk(asoc->base.sk);
649
650 /* AF_INET and AF_INET6 share common port field. */
651 port = ntohs(addr->v4.sin_port);
652
653 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
654 " port: %d state:%d\n",
655 asoc,
656 addr,
657 port,
658 peer_state);
659
660 /* Set the port if it has not been set yet. */
661 if (0 == asoc->peer.port)
662 asoc->peer.port = port;
663
664 /* Check to see if this is a duplicate. */
665 peer = sctp_assoc_lookup_paddr(asoc, addr);
666 if (peer) {
667 /* An UNKNOWN state is only set on transports added by
668 * user in sctp_connectx() call. Such transports should be
669 * considered CONFIRMED per RFC 4960, Section 5.4.
670 */
671 if (peer->state == SCTP_UNKNOWN) {
672 peer->state = SCTP_ACTIVE;
673 }
674 return peer;
675 }
676
677 peer = sctp_transport_new(addr, gfp);
678 if (!peer)
679 return NULL;
680
681 sctp_transport_set_owner(peer, asoc);
682
683 /* Initialize the peer's heartbeat interval based on the
684 * association configured value.
685 */
686 peer->hbinterval = asoc->hbinterval;
687
688 /* Set the path max_retrans. */
689 peer->pathmaxrxt = asoc->pathmaxrxt;
690
691 /* And the partial failure retrnas threshold */
692 peer->pf_retrans = asoc->pf_retrans;
693
694 /* Initialize the peer's SACK delay timeout based on the
695 * association configured value.
696 */
697 peer->sackdelay = asoc->sackdelay;
698 peer->sackfreq = asoc->sackfreq;
699
700 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
701 * based on association setting.
702 */
703 peer->param_flags = asoc->param_flags;
704
705 sctp_transport_route(peer, NULL, sp);
706
707 /* Initialize the pmtu of the transport. */
708 if (peer->param_flags & SPP_PMTUD_DISABLE) {
709 if (asoc->pathmtu)
710 peer->pathmtu = asoc->pathmtu;
711 else
712 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
713 }
714
715 /* If this is the first transport addr on this association,
716 * initialize the association PMTU to the peer's PMTU.
717 * If not and the current association PMTU is higher than the new
718 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
719 */
720 if (asoc->pathmtu)
721 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
722 else
723 asoc->pathmtu = peer->pathmtu;
724
725 SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
726 "%d\n", asoc, asoc->pathmtu);
727 peer->pmtu_pending = 0;
728
729 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
730
731 /* The asoc->peer.port might not be meaningful yet, but
732 * initialize the packet structure anyway.
733 */
734 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
735 asoc->peer.port);
736
737 /* 7.2.1 Slow-Start
738 *
739 * o The initial cwnd before DATA transmission or after a sufficiently
740 * long idle period MUST be set to
741 * min(4*MTU, max(2*MTU, 4380 bytes))
742 *
743 * o The initial value of ssthresh MAY be arbitrarily high
744 * (for example, implementations MAY use the size of the
745 * receiver advertised window).
746 */
747 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
748
749 /* At this point, we may not have the receiver's advertised window,
750 * so initialize ssthresh to the default value and it will be set
751 * later when we process the INIT.
752 */
753 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
754
755 peer->partial_bytes_acked = 0;
756 peer->flight_size = 0;
757 peer->burst_limited = 0;
758
759 /* Set the transport's RTO.initial value */
760 peer->rto = asoc->rto_initial;
761
762 /* Set the peer's active state. */
763 peer->state = peer_state;
764
765 /* Attach the remote transport to our asoc. */
766 list_add_tail(&peer->transports, &asoc->peer.transport_addr_list);
767 asoc->peer.transport_count++;
768
769 /* If we do not yet have a primary path, set one. */
770 if (!asoc->peer.primary_path) {
771 sctp_assoc_set_primary(asoc, peer);
772 asoc->peer.retran_path = peer;
773 }
774
775 if (asoc->peer.active_path == asoc->peer.retran_path &&
776 peer->state != SCTP_UNCONFIRMED) {
777 asoc->peer.retran_path = peer;
778 }
779
780 return peer;
781 }
782
783 /* Delete a transport address from an association. */
784 void sctp_assoc_del_peer(struct sctp_association *asoc,
785 const union sctp_addr *addr)
786 {
787 struct list_head *pos;
788 struct list_head *temp;
789 struct sctp_transport *transport;
790
791 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
792 transport = list_entry(pos, struct sctp_transport, transports);
793 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
794 /* Do book keeping for removing the peer and free it. */
795 sctp_assoc_rm_peer(asoc, transport);
796 break;
797 }
798 }
799 }
800
801 /* Lookup a transport by address. */
802 struct sctp_transport *sctp_assoc_lookup_paddr(
803 const struct sctp_association *asoc,
804 const union sctp_addr *address)
805 {
806 struct sctp_transport *t;
807
808 /* Cycle through all transports searching for a peer address. */
809
810 list_for_each_entry(t, &asoc->peer.transport_addr_list,
811 transports) {
812 if (sctp_cmp_addr_exact(address, &t->ipaddr))
813 return t;
814 }
815
816 return NULL;
817 }
818
819 /* Remove all transports except a give one */
820 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
821 struct sctp_transport *primary)
822 {
823 struct sctp_transport *temp;
824 struct sctp_transport *t;
825
826 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
827 transports) {
828 /* if the current transport is not the primary one, delete it */
829 if (t != primary)
830 sctp_assoc_rm_peer(asoc, t);
831 }
832 }
833
834 /* Engage in transport control operations.
835 * Mark the transport up or down and send a notification to the user.
836 * Select and update the new active and retran paths.
837 */
838 void sctp_assoc_control_transport(struct sctp_association *asoc,
839 struct sctp_transport *transport,
840 sctp_transport_cmd_t command,
841 sctp_sn_error_t error)
842 {
843 struct sctp_transport *t = NULL;
844 struct sctp_transport *first;
845 struct sctp_transport *second;
846 struct sctp_ulpevent *event;
847 struct sockaddr_storage addr;
848 int spc_state = 0;
849 bool ulp_notify = true;
850
851 /* Record the transition on the transport. */
852 switch (command) {
853 case SCTP_TRANSPORT_UP:
854 /* If we are moving from UNCONFIRMED state due
855 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
856 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
857 */
858 if (SCTP_UNCONFIRMED == transport->state &&
859 SCTP_HEARTBEAT_SUCCESS == error)
860 spc_state = SCTP_ADDR_CONFIRMED;
861 else
862 spc_state = SCTP_ADDR_AVAILABLE;
863 /* Don't inform ULP about transition from PF to
864 * active state and set cwnd to 1, see SCTP
865 * Quick failover draft section 5.1, point 5
866 */
867 if (transport->state == SCTP_PF) {
868 ulp_notify = false;
869 transport->cwnd = 1;
870 }
871 transport->state = SCTP_ACTIVE;
872 break;
873
874 case SCTP_TRANSPORT_DOWN:
875 /* If the transport was never confirmed, do not transition it
876 * to inactive state. Also, release the cached route since
877 * there may be a better route next time.
878 */
879 if (transport->state != SCTP_UNCONFIRMED)
880 transport->state = SCTP_INACTIVE;
881 else {
882 dst_release(transport->dst);
883 transport->dst = NULL;
884 }
885
886 spc_state = SCTP_ADDR_UNREACHABLE;
887 break;
888
889 case SCTP_TRANSPORT_PF:
890 transport->state = SCTP_PF;
891 ulp_notify = false;
892 break;
893
894 default:
895 return;
896 }
897
898 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
899 * user.
900 */
901 if (ulp_notify) {
902 memset(&addr, 0, sizeof(struct sockaddr_storage));
903 memcpy(&addr, &transport->ipaddr,
904 transport->af_specific->sockaddr_len);
905 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
906 0, spc_state, error, GFP_ATOMIC);
907 if (event)
908 sctp_ulpq_tail_event(&asoc->ulpq, event);
909 }
910
911 /* Select new active and retran paths. */
912
913 /* Look for the two most recently used active transports.
914 *
915 * This code produces the wrong ordering whenever jiffies
916 * rolls over, but we still get usable transports, so we don't
917 * worry about it.
918 */
919 first = NULL; second = NULL;
920
921 list_for_each_entry(t, &asoc->peer.transport_addr_list,
922 transports) {
923
924 if ((t->state == SCTP_INACTIVE) ||
925 (t->state == SCTP_UNCONFIRMED) ||
926 (t->state == SCTP_PF))
927 continue;
928 if (!first || t->last_time_heard > first->last_time_heard) {
929 second = first;
930 first = t;
931 }
932 if (!second || t->last_time_heard > second->last_time_heard)
933 second = t;
934 }
935
936 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
937 *
938 * By default, an endpoint should always transmit to the
939 * primary path, unless the SCTP user explicitly specifies the
940 * destination transport address (and possibly source
941 * transport address) to use.
942 *
943 * [If the primary is active but not most recent, bump the most
944 * recently used transport.]
945 */
946 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
947 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
948 first != asoc->peer.primary_path) {
949 second = first;
950 first = asoc->peer.primary_path;
951 }
952
953 /* If we failed to find a usable transport, just camp on the
954 * primary, even if it is inactive.
955 */
956 if (!first) {
957 first = asoc->peer.primary_path;
958 second = asoc->peer.primary_path;
959 }
960
961 /* Set the active and retran transports. */
962 asoc->peer.active_path = first;
963 asoc->peer.retran_path = second;
964 }
965
966 /* Hold a reference to an association. */
967 void sctp_association_hold(struct sctp_association *asoc)
968 {
969 atomic_inc(&asoc->base.refcnt);
970 }
971
972 /* Release a reference to an association and cleanup
973 * if there are no more references.
974 */
975 void sctp_association_put(struct sctp_association *asoc)
976 {
977 if (atomic_dec_and_test(&asoc->base.refcnt))
978 sctp_association_destroy(asoc);
979 }
980
981 /* Allocate the next TSN, Transmission Sequence Number, for the given
982 * association.
983 */
984 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
985 {
986 /* From Section 1.6 Serial Number Arithmetic:
987 * Transmission Sequence Numbers wrap around when they reach
988 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
989 * after transmitting TSN = 2*32 - 1 is TSN = 0.
990 */
991 __u32 retval = asoc->next_tsn;
992 asoc->next_tsn++;
993 asoc->unack_data++;
994
995 return retval;
996 }
997
998 /* Compare two addresses to see if they match. Wildcard addresses
999 * only match themselves.
1000 */
1001 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
1002 const union sctp_addr *ss2)
1003 {
1004 struct sctp_af *af;
1005
1006 af = sctp_get_af_specific(ss1->sa.sa_family);
1007 if (unlikely(!af))
1008 return 0;
1009
1010 return af->cmp_addr(ss1, ss2);
1011 }
1012
1013 /* Return an ecne chunk to get prepended to a packet.
1014 * Note: We are sly and return a shared, prealloced chunk. FIXME:
1015 * No we don't, but we could/should.
1016 */
1017 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
1018 {
1019 struct sctp_chunk *chunk;
1020
1021 /* Send ECNE if needed.
1022 * Not being able to allocate a chunk here is not deadly.
1023 */
1024 if (asoc->need_ecne)
1025 chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
1026 else
1027 chunk = NULL;
1028
1029 return chunk;
1030 }
1031
1032 /*
1033 * Find which transport this TSN was sent on.
1034 */
1035 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1036 __u32 tsn)
1037 {
1038 struct sctp_transport *active;
1039 struct sctp_transport *match;
1040 struct sctp_transport *transport;
1041 struct sctp_chunk *chunk;
1042 __be32 key = htonl(tsn);
1043
1044 match = NULL;
1045
1046 /*
1047 * FIXME: In general, find a more efficient data structure for
1048 * searching.
1049 */
1050
1051 /*
1052 * The general strategy is to search each transport's transmitted
1053 * list. Return which transport this TSN lives on.
1054 *
1055 * Let's be hopeful and check the active_path first.
1056 * Another optimization would be to know if there is only one
1057 * outbound path and not have to look for the TSN at all.
1058 *
1059 */
1060
1061 active = asoc->peer.active_path;
1062
1063 list_for_each_entry(chunk, &active->transmitted,
1064 transmitted_list) {
1065
1066 if (key == chunk->subh.data_hdr->tsn) {
1067 match = active;
1068 goto out;
1069 }
1070 }
1071
1072 /* If not found, go search all the other transports. */
1073 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1074 transports) {
1075
1076 if (transport == active)
1077 break;
1078 list_for_each_entry(chunk, &transport->transmitted,
1079 transmitted_list) {
1080 if (key == chunk->subh.data_hdr->tsn) {
1081 match = transport;
1082 goto out;
1083 }
1084 }
1085 }
1086 out:
1087 return match;
1088 }
1089
1090 /* Is this the association we are looking for? */
1091 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1092 struct net *net,
1093 const union sctp_addr *laddr,
1094 const union sctp_addr *paddr)
1095 {
1096 struct sctp_transport *transport;
1097
1098 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1099 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1100 net_eq(sock_net(asoc->base.sk), net)) {
1101 transport = sctp_assoc_lookup_paddr(asoc, paddr);
1102 if (!transport)
1103 goto out;
1104
1105 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1106 sctp_sk(asoc->base.sk)))
1107 goto out;
1108 }
1109 transport = NULL;
1110
1111 out:
1112 return transport;
1113 }
1114
1115 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
1116 static void sctp_assoc_bh_rcv(struct work_struct *work)
1117 {
1118 struct sctp_association *asoc =
1119 container_of(work, struct sctp_association,
1120 base.inqueue.immediate);
1121 struct sctp_endpoint *ep;
1122 struct sctp_chunk *chunk;
1123 struct sctp_inq *inqueue;
1124 int state;
1125 sctp_subtype_t subtype;
1126 int error = 0;
1127
1128 /* The association should be held so we should be safe. */
1129 ep = asoc->ep;
1130
1131 inqueue = &asoc->base.inqueue;
1132 sctp_association_hold(asoc);
1133 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1134 state = asoc->state;
1135 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1136
1137 /* SCTP-AUTH, Section 6.3:
1138 * The receiver has a list of chunk types which it expects
1139 * to be received only after an AUTH-chunk. This list has
1140 * been sent to the peer during the association setup. It
1141 * MUST silently discard these chunks if they are not placed
1142 * after an AUTH chunk in the packet.
1143 */
1144 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1145 continue;
1146
1147 /* Remember where the last DATA chunk came from so we
1148 * know where to send the SACK.
1149 */
1150 if (sctp_chunk_is_data(chunk))
1151 asoc->peer.last_data_from = chunk->transport;
1152 else
1153 SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
1154
1155 if (chunk->transport)
1156 chunk->transport->last_time_heard = jiffies;
1157
1158 /* Run through the state machine. */
1159 error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype,
1160 state, ep, asoc, chunk, GFP_ATOMIC);
1161
1162 /* Check to see if the association is freed in response to
1163 * the incoming chunk. If so, get out of the while loop.
1164 */
1165 if (asoc->base.dead)
1166 break;
1167
1168 /* If there is an error on chunk, discard this packet. */
1169 if (error && chunk)
1170 chunk->pdiscard = 1;
1171 }
1172 sctp_association_put(asoc);
1173 }
1174
1175 /* This routine moves an association from its old sk to a new sk. */
1176 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1177 {
1178 struct sctp_sock *newsp = sctp_sk(newsk);
1179 struct sock *oldsk = assoc->base.sk;
1180
1181 /* Delete the association from the old endpoint's list of
1182 * associations.
1183 */
1184 list_del_init(&assoc->asocs);
1185
1186 /* Decrement the backlog value for a TCP-style socket. */
1187 if (sctp_style(oldsk, TCP))
1188 oldsk->sk_ack_backlog--;
1189
1190 /* Release references to the old endpoint and the sock. */
1191 sctp_endpoint_put(assoc->ep);
1192 sock_put(assoc->base.sk);
1193
1194 /* Get a reference to the new endpoint. */
1195 assoc->ep = newsp->ep;
1196 sctp_endpoint_hold(assoc->ep);
1197
1198 /* Get a reference to the new sock. */
1199 assoc->base.sk = newsk;
1200 sock_hold(assoc->base.sk);
1201
1202 /* Add the association to the new endpoint's list of associations. */
1203 sctp_endpoint_add_asoc(newsp->ep, assoc);
1204 }
1205
1206 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1207 void sctp_assoc_update(struct sctp_association *asoc,
1208 struct sctp_association *new)
1209 {
1210 struct sctp_transport *trans;
1211 struct list_head *pos, *temp;
1212
1213 /* Copy in new parameters of peer. */
1214 asoc->c = new->c;
1215 asoc->peer.rwnd = new->peer.rwnd;
1216 asoc->peer.sack_needed = new->peer.sack_needed;
1217 asoc->peer.i = new->peer.i;
1218 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1219 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1220
1221 /* Remove any peer addresses not present in the new association. */
1222 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1223 trans = list_entry(pos, struct sctp_transport, transports);
1224 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1225 sctp_assoc_rm_peer(asoc, trans);
1226 continue;
1227 }
1228
1229 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1230 sctp_transport_reset(trans);
1231 }
1232
1233 /* If the case is A (association restart), use
1234 * initial_tsn as next_tsn. If the case is B, use
1235 * current next_tsn in case data sent to peer
1236 * has been discarded and needs retransmission.
1237 */
1238 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1239 asoc->next_tsn = new->next_tsn;
1240 asoc->ctsn_ack_point = new->ctsn_ack_point;
1241 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1242
1243 /* Reinitialize SSN for both local streams
1244 * and peer's streams.
1245 */
1246 sctp_ssnmap_clear(asoc->ssnmap);
1247
1248 /* Flush the ULP reassembly and ordered queue.
1249 * Any data there will now be stale and will
1250 * cause problems.
1251 */
1252 sctp_ulpq_flush(&asoc->ulpq);
1253
1254 /* reset the overall association error count so
1255 * that the restarted association doesn't get torn
1256 * down on the next retransmission timer.
1257 */
1258 asoc->overall_error_count = 0;
1259
1260 } else {
1261 /* Add any peer addresses from the new association. */
1262 list_for_each_entry(trans, &new->peer.transport_addr_list,
1263 transports) {
1264 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1265 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1266 GFP_ATOMIC, trans->state);
1267 }
1268
1269 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1270 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1271 if (!asoc->ssnmap) {
1272 /* Move the ssnmap. */
1273 asoc->ssnmap = new->ssnmap;
1274 new->ssnmap = NULL;
1275 }
1276
1277 if (!asoc->assoc_id) {
1278 /* get a new association id since we don't have one
1279 * yet.
1280 */
1281 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1282 }
1283 }
1284
1285 /* SCTP-AUTH: Save the peer parameters from the new assocaitions
1286 * and also move the association shared keys over
1287 */
1288 kfree(asoc->peer.peer_random);
1289 asoc->peer.peer_random = new->peer.peer_random;
1290 new->peer.peer_random = NULL;
1291
1292 kfree(asoc->peer.peer_chunks);
1293 asoc->peer.peer_chunks = new->peer.peer_chunks;
1294 new->peer.peer_chunks = NULL;
1295
1296 kfree(asoc->peer.peer_hmacs);
1297 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1298 new->peer.peer_hmacs = NULL;
1299
1300 sctp_auth_key_put(asoc->asoc_shared_key);
1301 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1302 }
1303
1304 /* Update the retran path for sending a retransmitted packet.
1305 * Round-robin through the active transports, else round-robin
1306 * through the inactive transports as this is the next best thing
1307 * we can try.
1308 */
1309 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1310 {
1311 struct sctp_transport *t, *next;
1312 struct list_head *head = &asoc->peer.transport_addr_list;
1313 struct list_head *pos;
1314
1315 if (asoc->peer.transport_count == 1)
1316 return;
1317
1318 /* Find the next transport in a round-robin fashion. */
1319 t = asoc->peer.retran_path;
1320 pos = &t->transports;
1321 next = NULL;
1322
1323 while (1) {
1324 /* Skip the head. */
1325 if (pos->next == head)
1326 pos = head->next;
1327 else
1328 pos = pos->next;
1329
1330 t = list_entry(pos, struct sctp_transport, transports);
1331
1332 /* We have exhausted the list, but didn't find any
1333 * other active transports. If so, use the next
1334 * transport.
1335 */
1336 if (t == asoc->peer.retran_path) {
1337 t = next;
1338 break;
1339 }
1340
1341 /* Try to find an active transport. */
1342
1343 if ((t->state == SCTP_ACTIVE) ||
1344 (t->state == SCTP_UNKNOWN)) {
1345 break;
1346 } else {
1347 /* Keep track of the next transport in case
1348 * we don't find any active transport.
1349 */
1350 if (t->state != SCTP_UNCONFIRMED && !next)
1351 next = t;
1352 }
1353 }
1354
1355 if (t)
1356 asoc->peer.retran_path = t;
1357 else
1358 t = asoc->peer.retran_path;
1359
1360 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1361 " %p addr: ",
1362 " port: %d\n",
1363 asoc,
1364 (&t->ipaddr),
1365 ntohs(t->ipaddr.v4.sin_port));
1366 }
1367
1368 /* Choose the transport for sending retransmit packet. */
1369 struct sctp_transport *sctp_assoc_choose_alter_transport(
1370 struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1371 {
1372 /* If this is the first time packet is sent, use the active path,
1373 * else use the retran path. If the last packet was sent over the
1374 * retran path, update the retran path and use it.
1375 */
1376 if (!last_sent_to)
1377 return asoc->peer.active_path;
1378 else {
1379 if (last_sent_to == asoc->peer.retran_path)
1380 sctp_assoc_update_retran_path(asoc);
1381 return asoc->peer.retran_path;
1382 }
1383 }
1384
1385 /* Update the association's pmtu and frag_point by going through all the
1386 * transports. This routine is called when a transport's PMTU has changed.
1387 */
1388 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1389 {
1390 struct sctp_transport *t;
1391 __u32 pmtu = 0;
1392
1393 if (!asoc)
1394 return;
1395
1396 /* Get the lowest pmtu of all the transports. */
1397 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1398 transports) {
1399 if (t->pmtu_pending && t->dst) {
1400 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1401 t->pmtu_pending = 0;
1402 }
1403 if (!pmtu || (t->pathmtu < pmtu))
1404 pmtu = t->pathmtu;
1405 }
1406
1407 if (pmtu) {
1408 asoc->pathmtu = pmtu;
1409 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1410 }
1411
1412 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
1413 __func__, asoc, asoc->pathmtu, asoc->frag_point);
1414 }
1415
1416 /* Should we send a SACK to update our peer? */
1417 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1418 {
1419 switch (asoc->state) {
1420 case SCTP_STATE_ESTABLISHED:
1421 case SCTP_STATE_SHUTDOWN_PENDING:
1422 case SCTP_STATE_SHUTDOWN_RECEIVED:
1423 case SCTP_STATE_SHUTDOWN_SENT:
1424 if ((asoc->rwnd > asoc->a_rwnd) &&
1425 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1426 (asoc->base.sk->sk_rcvbuf >> sctp_rwnd_upd_shift),
1427 asoc->pathmtu)))
1428 return 1;
1429 break;
1430 default:
1431 break;
1432 }
1433 return 0;
1434 }
1435
1436 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1437 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1438 {
1439 struct sctp_chunk *sack;
1440 struct timer_list *timer;
1441
1442 if (asoc->rwnd_over) {
1443 if (asoc->rwnd_over >= len) {
1444 asoc->rwnd_over -= len;
1445 } else {
1446 asoc->rwnd += (len - asoc->rwnd_over);
1447 asoc->rwnd_over = 0;
1448 }
1449 } else {
1450 asoc->rwnd += len;
1451 }
1452
1453 /* If we had window pressure, start recovering it
1454 * once our rwnd had reached the accumulated pressure
1455 * threshold. The idea is to recover slowly, but up
1456 * to the initial advertised window.
1457 */
1458 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1459 int change = min(asoc->pathmtu, asoc->rwnd_press);
1460 asoc->rwnd += change;
1461 asoc->rwnd_press -= change;
1462 }
1463
1464 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1465 "- %u\n", __func__, asoc, len, asoc->rwnd,
1466 asoc->rwnd_over, asoc->a_rwnd);
1467
1468 /* Send a window update SACK if the rwnd has increased by at least the
1469 * minimum of the association's PMTU and half of the receive buffer.
1470 * The algorithm used is similar to the one described in
1471 * Section 4.2.3.3 of RFC 1122.
1472 */
1473 if (sctp_peer_needs_update(asoc)) {
1474 asoc->a_rwnd = asoc->rwnd;
1475 SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
1476 "rwnd: %u a_rwnd: %u\n", __func__,
1477 asoc, asoc->rwnd, asoc->a_rwnd);
1478 sack = sctp_make_sack(asoc);
1479 if (!sack)
1480 return;
1481
1482 asoc->peer.sack_needed = 0;
1483
1484 sctp_outq_tail(&asoc->outqueue, sack);
1485
1486 /* Stop the SACK timer. */
1487 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1488 if (timer_pending(timer) && del_timer(timer))
1489 sctp_association_put(asoc);
1490 }
1491 }
1492
1493 /* Decrease asoc's rwnd by len. */
1494 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1495 {
1496 int rx_count;
1497 int over = 0;
1498
1499 SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
1500 SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
1501
1502 if (asoc->ep->rcvbuf_policy)
1503 rx_count = atomic_read(&asoc->rmem_alloc);
1504 else
1505 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1506
1507 /* If we've reached or overflowed our receive buffer, announce
1508 * a 0 rwnd if rwnd would still be positive. Store the
1509 * the pottential pressure overflow so that the window can be restored
1510 * back to original value.
1511 */
1512 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1513 over = 1;
1514
1515 if (asoc->rwnd >= len) {
1516 asoc->rwnd -= len;
1517 if (over) {
1518 asoc->rwnd_press += asoc->rwnd;
1519 asoc->rwnd = 0;
1520 }
1521 } else {
1522 asoc->rwnd_over = len - asoc->rwnd;
1523 asoc->rwnd = 0;
1524 }
1525 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
1526 __func__, asoc, len, asoc->rwnd,
1527 asoc->rwnd_over, asoc->rwnd_press);
1528 }
1529
1530 /* Build the bind address list for the association based on info from the
1531 * local endpoint and the remote peer.
1532 */
1533 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1534 sctp_scope_t scope, gfp_t gfp)
1535 {
1536 int flags;
1537
1538 /* Use scoping rules to determine the subset of addresses from
1539 * the endpoint.
1540 */
1541 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1542 if (asoc->peer.ipv4_address)
1543 flags |= SCTP_ADDR4_PEERSUPP;
1544 if (asoc->peer.ipv6_address)
1545 flags |= SCTP_ADDR6_PEERSUPP;
1546
1547 return sctp_bind_addr_copy(&asoc->base.bind_addr,
1548 &asoc->ep->base.bind_addr,
1549 scope, gfp, flags);
1550 }
1551
1552 /* Build the association's bind address list from the cookie. */
1553 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1554 struct sctp_cookie *cookie,
1555 gfp_t gfp)
1556 {
1557 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1558 int var_size3 = cookie->raw_addr_list_len;
1559 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1560
1561 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1562 asoc->ep->base.bind_addr.port, gfp);
1563 }
1564
1565 /* Lookup laddr in the bind address list of an association. */
1566 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1567 const union sctp_addr *laddr)
1568 {
1569 int found = 0;
1570
1571 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1572 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1573 sctp_sk(asoc->base.sk)))
1574 found = 1;
1575
1576 return found;
1577 }
1578
1579 /* Set an association id for a given association */
1580 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1581 {
1582 int assoc_id;
1583 int error = 0;
1584
1585 /* If the id is already assigned, keep it. */
1586 if (asoc->assoc_id)
1587 return error;
1588 retry:
1589 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
1590 return -ENOMEM;
1591
1592 spin_lock_bh(&sctp_assocs_id_lock);
1593 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
1594 idr_low, &assoc_id);
1595 if (!error) {
1596 idr_low = assoc_id + 1;
1597 if (idr_low == INT_MAX)
1598 idr_low = 1;
1599 }
1600 spin_unlock_bh(&sctp_assocs_id_lock);
1601 if (error == -EAGAIN)
1602 goto retry;
1603 else if (error)
1604 return error;
1605
1606 asoc->assoc_id = (sctp_assoc_t) assoc_id;
1607 return error;
1608 }
1609
1610 /* Free the ASCONF queue */
1611 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1612 {
1613 struct sctp_chunk *asconf;
1614 struct sctp_chunk *tmp;
1615
1616 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1617 list_del_init(&asconf->list);
1618 sctp_chunk_free(asconf);
1619 }
1620 }
1621
1622 /* Free asconf_ack cache */
1623 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1624 {
1625 struct sctp_chunk *ack;
1626 struct sctp_chunk *tmp;
1627
1628 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1629 transmitted_list) {
1630 list_del_init(&ack->transmitted_list);
1631 sctp_chunk_free(ack);
1632 }
1633 }
1634
1635 /* Clean up the ASCONF_ACK queue */
1636 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1637 {
1638 struct sctp_chunk *ack;
1639 struct sctp_chunk *tmp;
1640
1641 /* We can remove all the entries from the queue up to
1642 * the "Peer-Sequence-Number".
1643 */
1644 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1645 transmitted_list) {
1646 if (ack->subh.addip_hdr->serial ==
1647 htonl(asoc->peer.addip_serial))
1648 break;
1649
1650 list_del_init(&ack->transmitted_list);
1651 sctp_chunk_free(ack);
1652 }
1653 }
1654
1655 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1656 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1657 const struct sctp_association *asoc,
1658 __be32 serial)
1659 {
1660 struct sctp_chunk *ack;
1661
1662 /* Walk through the list of cached ASCONF-ACKs and find the
1663 * ack chunk whose serial number matches that of the request.
1664 */
1665 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1666 if (ack->subh.addip_hdr->serial == serial) {
1667 sctp_chunk_hold(ack);
1668 return ack;
1669 }
1670 }
1671
1672 return NULL;
1673 }
1674
1675 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1676 {
1677 /* Free any cached ASCONF_ACK chunk. */
1678 sctp_assoc_free_asconf_acks(asoc);
1679
1680 /* Free the ASCONF queue. */
1681 sctp_assoc_free_asconf_queue(asoc);
1682
1683 /* Free any cached ASCONF chunk. */
1684 if (asoc->addip_last_asconf)
1685 sctp_chunk_free(asoc->addip_last_asconf);
1686 }
This page took 0.085014 seconds and 5 git commands to generate.