Merge tag 'for-v4.6-rc/omap-fixes-a' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / sctp / sm_sideeffect.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 *
6 * This file is part of the SCTP kernel implementation
7 *
8 * These functions work with the state functions in sctp_sm_statefuns.c
9 * to implement that state operations. These functions implement the
10 * steps which require modifying existing data structures.
11 *
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
27 *
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <linux-sctp@vger.kernel.org>
31 *
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@austin.ibm.com>
36 * Hui Huang <hui.huang@nokia.com>
37 * Dajiang Zhang <dajiang.zhang@nokia.com>
38 * Daisy Chang <daisyc@us.ibm.com>
39 * Sridhar Samudrala <sri@us.ibm.com>
40 * Ardelle Fan <ardelle.fan@intel.com>
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/skbuff.h>
46 #include <linux/types.h>
47 #include <linux/socket.h>
48 #include <linux/ip.h>
49 #include <linux/gfp.h>
50 #include <net/sock.h>
51 #include <net/sctp/sctp.h>
52 #include <net/sctp/sm.h>
53
54 static int sctp_cmd_interpreter(sctp_event_t event_type,
55 sctp_subtype_t subtype,
56 sctp_state_t state,
57 struct sctp_endpoint *ep,
58 struct sctp_association *asoc,
59 void *event_arg,
60 sctp_disposition_t status,
61 sctp_cmd_seq_t *commands,
62 gfp_t gfp);
63 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
64 sctp_state_t state,
65 struct sctp_endpoint *ep,
66 struct sctp_association **asoc,
67 void *event_arg,
68 sctp_disposition_t status,
69 sctp_cmd_seq_t *commands,
70 gfp_t gfp);
71
72 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
73 struct sctp_transport *t);
74 /********************************************************************
75 * Helper functions
76 ********************************************************************/
77
78 /* A helper function for delayed processing of INET ECN CE bit. */
79 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
80 __u32 lowest_tsn)
81 {
82 /* Save the TSN away for comparison when we receive CWR */
83
84 asoc->last_ecne_tsn = lowest_tsn;
85 asoc->need_ecne = 1;
86 }
87
88 /* Helper function for delayed processing of SCTP ECNE chunk. */
89 /* RFC 2960 Appendix A
90 *
91 * RFC 2481 details a specific bit for a sender to send in
92 * the header of its next outbound TCP segment to indicate to
93 * its peer that it has reduced its congestion window. This
94 * is termed the CWR bit. For SCTP the same indication is made
95 * by including the CWR chunk. This chunk contains one data
96 * element, i.e. the TSN number that was sent in the ECNE chunk.
97 * This element represents the lowest TSN number in the datagram
98 * that was originally marked with the CE bit.
99 */
100 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
101 __u32 lowest_tsn,
102 struct sctp_chunk *chunk)
103 {
104 struct sctp_chunk *repl;
105
106 /* Our previously transmitted packet ran into some congestion
107 * so we should take action by reducing cwnd and ssthresh
108 * and then ACK our peer that we we've done so by
109 * sending a CWR.
110 */
111
112 /* First, try to determine if we want to actually lower
113 * our cwnd variables. Only lower them if the ECNE looks more
114 * recent than the last response.
115 */
116 if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
117 struct sctp_transport *transport;
118
119 /* Find which transport's congestion variables
120 * need to be adjusted.
121 */
122 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
123
124 /* Update the congestion variables. */
125 if (transport)
126 sctp_transport_lower_cwnd(transport,
127 SCTP_LOWER_CWND_ECNE);
128 asoc->last_cwr_tsn = lowest_tsn;
129 }
130
131 /* Always try to quiet the other end. In case of lost CWR,
132 * resend last_cwr_tsn.
133 */
134 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
135
136 /* If we run out of memory, it will look like a lost CWR. We'll
137 * get back in sync eventually.
138 */
139 return repl;
140 }
141
142 /* Helper function to do delayed processing of ECN CWR chunk. */
143 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
144 __u32 lowest_tsn)
145 {
146 /* Turn off ECNE getting auto-prepended to every outgoing
147 * packet
148 */
149 asoc->need_ecne = 0;
150 }
151
152 /* Generate SACK if necessary. We call this at the end of a packet. */
153 static int sctp_gen_sack(struct sctp_association *asoc, int force,
154 sctp_cmd_seq_t *commands)
155 {
156 __u32 ctsn, max_tsn_seen;
157 struct sctp_chunk *sack;
158 struct sctp_transport *trans = asoc->peer.last_data_from;
159 int error = 0;
160
161 if (force ||
162 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
163 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
164 asoc->peer.sack_needed = 1;
165
166 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
167 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
168
169 /* From 12.2 Parameters necessary per association (i.e. the TCB):
170 *
171 * Ack State : This flag indicates if the next received packet
172 * : is to be responded to with a SACK. ...
173 * : When DATA chunks are out of order, SACK's
174 * : are not delayed (see Section 6).
175 *
176 * [This is actually not mentioned in Section 6, but we
177 * implement it here anyway. --piggy]
178 */
179 if (max_tsn_seen != ctsn)
180 asoc->peer.sack_needed = 1;
181
182 /* From 6.2 Acknowledgement on Reception of DATA Chunks:
183 *
184 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
185 * an acknowledgement SHOULD be generated for at least every
186 * second packet (not every second DATA chunk) received, and
187 * SHOULD be generated within 200 ms of the arrival of any
188 * unacknowledged DATA chunk. ...
189 */
190 if (!asoc->peer.sack_needed) {
191 asoc->peer.sack_cnt++;
192
193 /* Set the SACK delay timeout based on the
194 * SACK delay for the last transport
195 * data was received from, or the default
196 * for the association.
197 */
198 if (trans) {
199 /* We will need a SACK for the next packet. */
200 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
201 asoc->peer.sack_needed = 1;
202
203 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
204 trans->sackdelay;
205 } else {
206 /* We will need a SACK for the next packet. */
207 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
208 asoc->peer.sack_needed = 1;
209
210 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
211 asoc->sackdelay;
212 }
213
214 /* Restart the SACK timer. */
215 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
216 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
217 } else {
218 __u32 old_a_rwnd = asoc->a_rwnd;
219
220 asoc->a_rwnd = asoc->rwnd;
221 sack = sctp_make_sack(asoc);
222 if (!sack) {
223 asoc->a_rwnd = old_a_rwnd;
224 goto nomem;
225 }
226
227 asoc->peer.sack_needed = 0;
228 asoc->peer.sack_cnt = 0;
229
230 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
231
232 /* Stop the SACK timer. */
233 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
234 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
235 }
236
237 return error;
238 nomem:
239 error = -ENOMEM;
240 return error;
241 }
242
243 /* When the T3-RTX timer expires, it calls this function to create the
244 * relevant state machine event.
245 */
246 void sctp_generate_t3_rtx_event(unsigned long peer)
247 {
248 int error;
249 struct sctp_transport *transport = (struct sctp_transport *) peer;
250 struct sctp_association *asoc = transport->asoc;
251 struct sock *sk = asoc->base.sk;
252 struct net *net = sock_net(sk);
253
254 /* Check whether a task is in the sock. */
255
256 bh_lock_sock(sk);
257 if (sock_owned_by_user(sk)) {
258 pr_debug("%s: sock is busy\n", __func__);
259
260 /* Try again later. */
261 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
262 sctp_transport_hold(transport);
263 goto out_unlock;
264 }
265
266 /* Run through the state machine. */
267 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
268 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
269 asoc->state,
270 asoc->ep, asoc,
271 transport, GFP_ATOMIC);
272
273 if (error)
274 sk->sk_err = -error;
275
276 out_unlock:
277 bh_unlock_sock(sk);
278 sctp_transport_put(transport);
279 }
280
281 /* This is a sa interface for producing timeout events. It works
282 * for timeouts which use the association as their parameter.
283 */
284 static void sctp_generate_timeout_event(struct sctp_association *asoc,
285 sctp_event_timeout_t timeout_type)
286 {
287 struct sock *sk = asoc->base.sk;
288 struct net *net = sock_net(sk);
289 int error = 0;
290
291 bh_lock_sock(sk);
292 if (sock_owned_by_user(sk)) {
293 pr_debug("%s: sock is busy: timer %d\n", __func__,
294 timeout_type);
295
296 /* Try again later. */
297 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
298 sctp_association_hold(asoc);
299 goto out_unlock;
300 }
301
302 /* Is this association really dead and just waiting around for
303 * the timer to let go of the reference?
304 */
305 if (asoc->base.dead)
306 goto out_unlock;
307
308 /* Run through the state machine. */
309 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
310 SCTP_ST_TIMEOUT(timeout_type),
311 asoc->state, asoc->ep, asoc,
312 (void *)timeout_type, GFP_ATOMIC);
313
314 if (error)
315 sk->sk_err = -error;
316
317 out_unlock:
318 bh_unlock_sock(sk);
319 sctp_association_put(asoc);
320 }
321
322 static void sctp_generate_t1_cookie_event(unsigned long data)
323 {
324 struct sctp_association *asoc = (struct sctp_association *) data;
325 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
326 }
327
328 static void sctp_generate_t1_init_event(unsigned long data)
329 {
330 struct sctp_association *asoc = (struct sctp_association *) data;
331 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
332 }
333
334 static void sctp_generate_t2_shutdown_event(unsigned long data)
335 {
336 struct sctp_association *asoc = (struct sctp_association *) data;
337 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
338 }
339
340 static void sctp_generate_t4_rto_event(unsigned long data)
341 {
342 struct sctp_association *asoc = (struct sctp_association *) data;
343 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
344 }
345
346 static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
347 {
348 struct sctp_association *asoc = (struct sctp_association *)data;
349 sctp_generate_timeout_event(asoc,
350 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
351
352 } /* sctp_generate_t5_shutdown_guard_event() */
353
354 static void sctp_generate_autoclose_event(unsigned long data)
355 {
356 struct sctp_association *asoc = (struct sctp_association *) data;
357 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
358 }
359
360 /* Generate a heart beat event. If the sock is busy, reschedule. Make
361 * sure that the transport is still valid.
362 */
363 void sctp_generate_heartbeat_event(unsigned long data)
364 {
365 int error = 0;
366 struct sctp_transport *transport = (struct sctp_transport *) data;
367 struct sctp_association *asoc = transport->asoc;
368 struct sock *sk = asoc->base.sk;
369 struct net *net = sock_net(sk);
370
371 bh_lock_sock(sk);
372 if (sock_owned_by_user(sk)) {
373 pr_debug("%s: sock is busy\n", __func__);
374
375 /* Try again later. */
376 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
377 sctp_transport_hold(transport);
378 goto out_unlock;
379 }
380
381 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
382 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
383 asoc->state, asoc->ep, asoc,
384 transport, GFP_ATOMIC);
385
386 if (error)
387 sk->sk_err = -error;
388
389 out_unlock:
390 bh_unlock_sock(sk);
391 sctp_transport_put(transport);
392 }
393
394 /* Handle the timeout of the ICMP protocol unreachable timer. Trigger
395 * the correct state machine transition that will close the association.
396 */
397 void sctp_generate_proto_unreach_event(unsigned long data)
398 {
399 struct sctp_transport *transport = (struct sctp_transport *) data;
400 struct sctp_association *asoc = transport->asoc;
401 struct sock *sk = asoc->base.sk;
402 struct net *net = sock_net(sk);
403
404 bh_lock_sock(sk);
405 if (sock_owned_by_user(sk)) {
406 pr_debug("%s: sock is busy\n", __func__);
407
408 /* Try again later. */
409 if (!mod_timer(&transport->proto_unreach_timer,
410 jiffies + (HZ/20)))
411 sctp_association_hold(asoc);
412 goto out_unlock;
413 }
414
415 /* Is this structure just waiting around for us to actually
416 * get destroyed?
417 */
418 if (asoc->base.dead)
419 goto out_unlock;
420
421 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
422 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
423 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
424
425 out_unlock:
426 bh_unlock_sock(sk);
427 sctp_association_put(asoc);
428 }
429
430
431 /* Inject a SACK Timeout event into the state machine. */
432 static void sctp_generate_sack_event(unsigned long data)
433 {
434 struct sctp_association *asoc = (struct sctp_association *) data;
435 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
436 }
437
438 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
439 NULL,
440 sctp_generate_t1_cookie_event,
441 sctp_generate_t1_init_event,
442 sctp_generate_t2_shutdown_event,
443 NULL,
444 sctp_generate_t4_rto_event,
445 sctp_generate_t5_shutdown_guard_event,
446 NULL,
447 sctp_generate_sack_event,
448 sctp_generate_autoclose_event,
449 };
450
451
452 /* RFC 2960 8.2 Path Failure Detection
453 *
454 * When its peer endpoint is multi-homed, an endpoint should keep a
455 * error counter for each of the destination transport addresses of the
456 * peer endpoint.
457 *
458 * Each time the T3-rtx timer expires on any address, or when a
459 * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
460 * the error counter of that destination address will be incremented.
461 * When the value in the error counter exceeds the protocol parameter
462 * 'Path.Max.Retrans' of that destination address, the endpoint should
463 * mark the destination transport address as inactive, and a
464 * notification SHOULD be sent to the upper layer.
465 *
466 */
467 static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
468 struct sctp_association *asoc,
469 struct sctp_transport *transport,
470 int is_hb)
471 {
472 struct net *net = sock_net(asoc->base.sk);
473
474 /* The check for association's overall error counter exceeding the
475 * threshold is done in the state function.
476 */
477 /* We are here due to a timer expiration. If the timer was
478 * not a HEARTBEAT, then normal error tracking is done.
479 * If the timer was a heartbeat, we only increment error counts
480 * when we already have an outstanding HEARTBEAT that has not
481 * been acknowledged.
482 * Additionally, some tranport states inhibit error increments.
483 */
484 if (!is_hb) {
485 asoc->overall_error_count++;
486 if (transport->state != SCTP_INACTIVE)
487 transport->error_count++;
488 } else if (transport->hb_sent) {
489 if (transport->state != SCTP_UNCONFIRMED)
490 asoc->overall_error_count++;
491 if (transport->state != SCTP_INACTIVE)
492 transport->error_count++;
493 }
494
495 /* If the transport error count is greater than the pf_retrans
496 * threshold, and less than pathmaxrtx, and if the current state
497 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
498 * see SCTP Quick Failover Draft, section 5.1
499 */
500 if (net->sctp.pf_enable &&
501 (transport->state == SCTP_ACTIVE) &&
502 (asoc->pf_retrans < transport->pathmaxrxt) &&
503 (transport->error_count > asoc->pf_retrans)) {
504
505 sctp_assoc_control_transport(asoc, transport,
506 SCTP_TRANSPORT_PF,
507 0);
508
509 /* Update the hb timer to resend a heartbeat every rto */
510 sctp_cmd_hb_timer_update(commands, transport);
511 }
512
513 if (transport->state != SCTP_INACTIVE &&
514 (transport->error_count > transport->pathmaxrxt)) {
515 pr_debug("%s: association:%p transport addr:%pISpc failed\n",
516 __func__, asoc, &transport->ipaddr.sa);
517
518 sctp_assoc_control_transport(asoc, transport,
519 SCTP_TRANSPORT_DOWN,
520 SCTP_FAILED_THRESHOLD);
521 }
522
523 /* E2) For the destination address for which the timer
524 * expires, set RTO <- RTO * 2 ("back off the timer"). The
525 * maximum value discussed in rule C7 above (RTO.max) may be
526 * used to provide an upper bound to this doubling operation.
527 *
528 * Special Case: the first HB doesn't trigger exponential backoff.
529 * The first unacknowledged HB triggers it. We do this with a flag
530 * that indicates that we have an outstanding HB.
531 */
532 if (!is_hb || transport->hb_sent) {
533 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
534 sctp_max_rto(asoc, transport);
535 }
536 }
537
538 /* Worker routine to handle INIT command failure. */
539 static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
540 struct sctp_association *asoc,
541 unsigned int error)
542 {
543 struct sctp_ulpevent *event;
544
545 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
546 (__u16)error, 0, 0, NULL,
547 GFP_ATOMIC);
548
549 if (event)
550 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
551 SCTP_ULPEVENT(event));
552
553 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
554 SCTP_STATE(SCTP_STATE_CLOSED));
555
556 /* SEND_FAILED sent later when cleaning up the association. */
557 asoc->outqueue.error = error;
558 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
559 }
560
561 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
562 static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
563 struct sctp_association *asoc,
564 sctp_event_t event_type,
565 sctp_subtype_t subtype,
566 struct sctp_chunk *chunk,
567 unsigned int error)
568 {
569 struct sctp_ulpevent *event;
570 struct sctp_chunk *abort;
571 /* Cancel any partial delivery in progress. */
572 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
573
574 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
575 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
576 (__u16)error, 0, 0, chunk,
577 GFP_ATOMIC);
578 else
579 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
580 (__u16)error, 0, 0, NULL,
581 GFP_ATOMIC);
582 if (event)
583 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
584 SCTP_ULPEVENT(event));
585
586 if (asoc->overall_error_count >= asoc->max_retrans) {
587 abort = sctp_make_violation_max_retrans(asoc, chunk);
588 if (abort)
589 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
590 SCTP_CHUNK(abort));
591 }
592
593 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
594 SCTP_STATE(SCTP_STATE_CLOSED));
595
596 /* SEND_FAILED sent later when cleaning up the association. */
597 asoc->outqueue.error = error;
598 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
599 }
600
601 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
602 * inside the cookie. In reality, this is only used for INIT-ACK processing
603 * since all other cases use "temporary" associations and can do all
604 * their work in statefuns directly.
605 */
606 static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
607 struct sctp_association *asoc,
608 struct sctp_chunk *chunk,
609 sctp_init_chunk_t *peer_init,
610 gfp_t gfp)
611 {
612 int error;
613
614 /* We only process the init as a sideeffect in a single
615 * case. This is when we process the INIT-ACK. If we
616 * fail during INIT processing (due to malloc problems),
617 * just return the error and stop processing the stack.
618 */
619 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
620 error = -ENOMEM;
621 else
622 error = 0;
623
624 return error;
625 }
626
627 /* Helper function to break out starting up of heartbeat timers. */
628 static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
629 struct sctp_association *asoc)
630 {
631 struct sctp_transport *t;
632
633 /* Start a heartbeat timer for each transport on the association.
634 * hold a reference on the transport to make sure none of
635 * the needed data structures go away.
636 */
637 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
638
639 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
640 sctp_transport_hold(t);
641 }
642 }
643
644 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
645 struct sctp_association *asoc)
646 {
647 struct sctp_transport *t;
648
649 /* Stop all heartbeat timers. */
650
651 list_for_each_entry(t, &asoc->peer.transport_addr_list,
652 transports) {
653 if (del_timer(&t->hb_timer))
654 sctp_transport_put(t);
655 }
656 }
657
658 /* Helper function to stop any pending T3-RTX timers */
659 static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
660 struct sctp_association *asoc)
661 {
662 struct sctp_transport *t;
663
664 list_for_each_entry(t, &asoc->peer.transport_addr_list,
665 transports) {
666 if (del_timer(&t->T3_rtx_timer))
667 sctp_transport_put(t);
668 }
669 }
670
671
672 /* Helper function to update the heartbeat timer. */
673 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
674 struct sctp_transport *t)
675 {
676 /* Update the heartbeat timer. */
677 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
678 sctp_transport_hold(t);
679 }
680
681 /* Helper function to handle the reception of an HEARTBEAT ACK. */
682 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
683 struct sctp_association *asoc,
684 struct sctp_transport *t,
685 struct sctp_chunk *chunk)
686 {
687 sctp_sender_hb_info_t *hbinfo;
688 int was_unconfirmed = 0;
689
690 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
691 * HEARTBEAT should clear the error counter of the destination
692 * transport address to which the HEARTBEAT was sent.
693 */
694 t->error_count = 0;
695
696 /*
697 * Although RFC4960 specifies that the overall error count must
698 * be cleared when a HEARTBEAT ACK is received, we make an
699 * exception while in SHUTDOWN PENDING. If the peer keeps its
700 * window shut forever, we may never be able to transmit our
701 * outstanding data and rely on the retransmission limit be reached
702 * to shutdown the association.
703 */
704 if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
705 t->asoc->overall_error_count = 0;
706
707 /* Clear the hb_sent flag to signal that we had a good
708 * acknowledgement.
709 */
710 t->hb_sent = 0;
711
712 /* Mark the destination transport address as active if it is not so
713 * marked.
714 */
715 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
716 was_unconfirmed = 1;
717 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
718 SCTP_HEARTBEAT_SUCCESS);
719 }
720
721 if (t->state == SCTP_PF)
722 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
723 SCTP_HEARTBEAT_SUCCESS);
724
725 /* HB-ACK was received for a the proper HB. Consider this
726 * forward progress.
727 */
728 if (t->dst)
729 dst_confirm(t->dst);
730
731 /* The receiver of the HEARTBEAT ACK should also perform an
732 * RTT measurement for that destination transport address
733 * using the time value carried in the HEARTBEAT ACK chunk.
734 * If the transport's rto_pending variable has been cleared,
735 * it was most likely due to a retransmit. However, we want
736 * to re-enable it to properly update the rto.
737 */
738 if (t->rto_pending == 0)
739 t->rto_pending = 1;
740
741 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
742 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
743
744 /* Update the heartbeat timer. */
745 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
746 sctp_transport_hold(t);
747
748 if (was_unconfirmed && asoc->peer.transport_count == 1)
749 sctp_transport_immediate_rtx(t);
750 }
751
752
753 /* Helper function to process the process SACK command. */
754 static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
755 struct sctp_association *asoc,
756 struct sctp_chunk *chunk)
757 {
758 int err = 0;
759
760 if (sctp_outq_sack(&asoc->outqueue, chunk)) {
761 struct net *net = sock_net(asoc->base.sk);
762
763 /* There are no more TSNs awaiting SACK. */
764 err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
765 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
766 asoc->state, asoc->ep, asoc, NULL,
767 GFP_ATOMIC);
768 }
769
770 return err;
771 }
772
773 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
774 * the transport for a shutdown chunk.
775 */
776 static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
777 struct sctp_association *asoc,
778 struct sctp_chunk *chunk)
779 {
780 struct sctp_transport *t;
781
782 if (chunk->transport)
783 t = chunk->transport;
784 else {
785 t = sctp_assoc_choose_alter_transport(asoc,
786 asoc->shutdown_last_sent_to);
787 chunk->transport = t;
788 }
789 asoc->shutdown_last_sent_to = t;
790 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
791 }
792
793 /* Helper function to change the state of an association. */
794 static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
795 struct sctp_association *asoc,
796 sctp_state_t state)
797 {
798 struct sock *sk = asoc->base.sk;
799
800 asoc->state = state;
801
802 pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
803
804 if (sctp_style(sk, TCP)) {
805 /* Change the sk->sk_state of a TCP-style socket that has
806 * successfully completed a connect() call.
807 */
808 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
809 sk->sk_state = SCTP_SS_ESTABLISHED;
810
811 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
812 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
813 sctp_sstate(sk, ESTABLISHED))
814 sk->sk_shutdown |= RCV_SHUTDOWN;
815 }
816
817 if (sctp_state(asoc, COOKIE_WAIT)) {
818 /* Reset init timeouts since they may have been
819 * increased due to timer expirations.
820 */
821 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
822 asoc->rto_initial;
823 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
824 asoc->rto_initial;
825 }
826
827 if (sctp_state(asoc, ESTABLISHED) ||
828 sctp_state(asoc, CLOSED) ||
829 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
830 /* Wake up any processes waiting in the asoc's wait queue in
831 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
832 */
833 if (waitqueue_active(&asoc->wait))
834 wake_up_interruptible(&asoc->wait);
835
836 /* Wake up any processes waiting in the sk's sleep queue of
837 * a TCP-style or UDP-style peeled-off socket in
838 * sctp_wait_for_accept() or sctp_wait_for_packet().
839 * For a UDP-style socket, the waiters are woken up by the
840 * notifications.
841 */
842 if (!sctp_style(sk, UDP))
843 sk->sk_state_change(sk);
844 }
845 }
846
847 /* Helper function to delete an association. */
848 static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
849 struct sctp_association *asoc)
850 {
851 struct sock *sk = asoc->base.sk;
852
853 /* If it is a non-temporary association belonging to a TCP-style
854 * listening socket that is not closed, do not free it so that accept()
855 * can pick it up later.
856 */
857 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
858 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
859 return;
860
861 sctp_association_free(asoc);
862 }
863
864 /*
865 * ADDIP Section 4.1 ASCONF Chunk Procedures
866 * A4) Start a T-4 RTO timer, using the RTO value of the selected
867 * destination address (we use active path instead of primary path just
868 * because primary path may be inactive.
869 */
870 static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
871 struct sctp_association *asoc,
872 struct sctp_chunk *chunk)
873 {
874 struct sctp_transport *t;
875
876 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
877 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
878 chunk->transport = t;
879 }
880
881 /* Process an incoming Operation Error Chunk. */
882 static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
883 struct sctp_association *asoc,
884 struct sctp_chunk *chunk)
885 {
886 struct sctp_errhdr *err_hdr;
887 struct sctp_ulpevent *ev;
888
889 while (chunk->chunk_end > chunk->skb->data) {
890 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
891
892 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
893 GFP_ATOMIC);
894 if (!ev)
895 return;
896
897 sctp_ulpq_tail_event(&asoc->ulpq, ev);
898
899 switch (err_hdr->cause) {
900 case SCTP_ERROR_UNKNOWN_CHUNK:
901 {
902 sctp_chunkhdr_t *unk_chunk_hdr;
903
904 unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
905 switch (unk_chunk_hdr->type) {
906 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
907 * an ERROR chunk reporting that it did not recognized
908 * the ASCONF chunk type, the sender of the ASCONF MUST
909 * NOT send any further ASCONF chunks and MUST stop its
910 * T-4 timer.
911 */
912 case SCTP_CID_ASCONF:
913 if (asoc->peer.asconf_capable == 0)
914 break;
915
916 asoc->peer.asconf_capable = 0;
917 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
918 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
919 break;
920 default:
921 break;
922 }
923 break;
924 }
925 default:
926 break;
927 }
928 }
929 }
930
931 /* Process variable FWDTSN chunk information. */
932 static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
933 struct sctp_chunk *chunk)
934 {
935 struct sctp_fwdtsn_skip *skip;
936 /* Walk through all the skipped SSNs */
937 sctp_walk_fwdtsn(skip, chunk) {
938 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
939 }
940 }
941
942 /* Helper function to remove the association non-primary peer
943 * transports.
944 */
945 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
946 {
947 struct sctp_transport *t;
948 struct list_head *pos;
949 struct list_head *temp;
950
951 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
952 t = list_entry(pos, struct sctp_transport, transports);
953 if (!sctp_cmp_addr_exact(&t->ipaddr,
954 &asoc->peer.primary_addr)) {
955 sctp_assoc_rm_peer(asoc, t);
956 }
957 }
958 }
959
960 /* Helper function to set sk_err on a 1-1 style socket. */
961 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
962 {
963 struct sock *sk = asoc->base.sk;
964
965 if (!sctp_style(sk, UDP))
966 sk->sk_err = error;
967 }
968
969 /* Helper function to generate an association change event */
970 static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands,
971 struct sctp_association *asoc,
972 u8 state)
973 {
974 struct sctp_ulpevent *ev;
975
976 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
977 asoc->c.sinit_num_ostreams,
978 asoc->c.sinit_max_instreams,
979 NULL, GFP_ATOMIC);
980 if (ev)
981 sctp_ulpq_tail_event(&asoc->ulpq, ev);
982 }
983
984 /* Helper function to generate an adaptation indication event */
985 static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands,
986 struct sctp_association *asoc)
987 {
988 struct sctp_ulpevent *ev;
989
990 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
991
992 if (ev)
993 sctp_ulpq_tail_event(&asoc->ulpq, ev);
994 }
995
996
997 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
998 sctp_event_timeout_t timer,
999 char *name)
1000 {
1001 struct sctp_transport *t;
1002
1003 t = asoc->init_last_sent_to;
1004 asoc->init_err_counter++;
1005
1006 if (t->init_sent_count > (asoc->init_cycle + 1)) {
1007 asoc->timeouts[timer] *= 2;
1008 if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1009 asoc->timeouts[timer] = asoc->max_init_timeo;
1010 }
1011 asoc->init_cycle++;
1012
1013 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1014 " cycle:%d timeout:%ld\n", __func__, name,
1015 asoc->init_err_counter, asoc->init_cycle,
1016 asoc->timeouts[timer]);
1017 }
1018
1019 }
1020
1021 /* Send the whole message, chunk by chunk, to the outqueue.
1022 * This way the whole message is queued up and bundling if
1023 * encouraged for small fragments.
1024 */
1025 static int sctp_cmd_send_msg(struct sctp_association *asoc,
1026 struct sctp_datamsg *msg, gfp_t gfp)
1027 {
1028 struct sctp_chunk *chunk;
1029 int error = 0;
1030
1031 list_for_each_entry(chunk, &msg->chunks, frag_list) {
1032 error = sctp_outq_tail(&asoc->outqueue, chunk, gfp);
1033 if (error)
1034 break;
1035 }
1036
1037 return error;
1038 }
1039
1040
1041 /* Sent the next ASCONF packet currently stored in the association.
1042 * This happens after the ASCONF_ACK was succeffully processed.
1043 */
1044 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1045 {
1046 struct net *net = sock_net(asoc->base.sk);
1047
1048 /* Send the next asconf chunk from the addip chunk
1049 * queue.
1050 */
1051 if (!list_empty(&asoc->addip_chunk_list)) {
1052 struct list_head *entry = asoc->addip_chunk_list.next;
1053 struct sctp_chunk *asconf = list_entry(entry,
1054 struct sctp_chunk, list);
1055 list_del_init(entry);
1056
1057 /* Hold the chunk until an ASCONF_ACK is received. */
1058 sctp_chunk_hold(asconf);
1059 if (sctp_primitive_ASCONF(net, asoc, asconf))
1060 sctp_chunk_free(asconf);
1061 else
1062 asoc->addip_last_asconf = asconf;
1063 }
1064 }
1065
1066
1067 /* These three macros allow us to pull the debugging code out of the
1068 * main flow of sctp_do_sm() to keep attention focused on the real
1069 * functionality there.
1070 */
1071 #define debug_pre_sfn() \
1072 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1073 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \
1074 asoc, sctp_state_tbl[state], state_fn->name)
1075
1076 #define debug_post_sfn() \
1077 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1078 sctp_status_tbl[status])
1079
1080 #define debug_post_sfx() \
1081 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1082 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1083 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1084
1085 /*
1086 * This is the master state machine processing function.
1087 *
1088 * If you want to understand all of lksctp, this is a
1089 * good place to start.
1090 */
1091 int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
1092 sctp_state_t state,
1093 struct sctp_endpoint *ep,
1094 struct sctp_association *asoc,
1095 void *event_arg,
1096 gfp_t gfp)
1097 {
1098 sctp_cmd_seq_t commands;
1099 const sctp_sm_table_entry_t *state_fn;
1100 sctp_disposition_t status;
1101 int error = 0;
1102 typedef const char *(printfn_t)(sctp_subtype_t);
1103 static printfn_t *table[] = {
1104 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1105 };
1106 printfn_t *debug_fn __attribute__ ((unused)) = table[event_type];
1107
1108 /* Look up the state function, run it, and then process the
1109 * side effects. These three steps are the heart of lksctp.
1110 */
1111 state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1112
1113 sctp_init_cmd_seq(&commands);
1114
1115 debug_pre_sfn();
1116 status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
1117 debug_post_sfn();
1118
1119 error = sctp_side_effects(event_type, subtype, state,
1120 ep, &asoc, event_arg, status,
1121 &commands, gfp);
1122 debug_post_sfx();
1123
1124 return error;
1125 }
1126
1127 /*****************************************************************
1128 * This the master state function side effect processing function.
1129 *****************************************************************/
1130 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1131 sctp_state_t state,
1132 struct sctp_endpoint *ep,
1133 struct sctp_association **asoc,
1134 void *event_arg,
1135 sctp_disposition_t status,
1136 sctp_cmd_seq_t *commands,
1137 gfp_t gfp)
1138 {
1139 int error;
1140
1141 /* FIXME - Most of the dispositions left today would be categorized
1142 * as "exceptional" dispositions. For those dispositions, it
1143 * may not be proper to run through any of the commands at all.
1144 * For example, the command interpreter might be run only with
1145 * disposition SCTP_DISPOSITION_CONSUME.
1146 */
1147 if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1148 ep, *asoc,
1149 event_arg, status,
1150 commands, gfp)))
1151 goto bail;
1152
1153 switch (status) {
1154 case SCTP_DISPOSITION_DISCARD:
1155 pr_debug("%s: ignored sctp protocol event - state:%d, "
1156 "event_type:%d, event_id:%d\n", __func__, state,
1157 event_type, subtype.chunk);
1158 break;
1159
1160 case SCTP_DISPOSITION_NOMEM:
1161 /* We ran out of memory, so we need to discard this
1162 * packet.
1163 */
1164 /* BUG--we should now recover some memory, probably by
1165 * reneging...
1166 */
1167 error = -ENOMEM;
1168 break;
1169
1170 case SCTP_DISPOSITION_DELETE_TCB:
1171 case SCTP_DISPOSITION_ABORT:
1172 /* This should now be a command. */
1173 *asoc = NULL;
1174 break;
1175
1176 case SCTP_DISPOSITION_CONSUME:
1177 /*
1178 * We should no longer have much work to do here as the
1179 * real work has been done as explicit commands above.
1180 */
1181 break;
1182
1183 case SCTP_DISPOSITION_VIOLATION:
1184 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1185 state, subtype.chunk);
1186 break;
1187
1188 case SCTP_DISPOSITION_NOT_IMPL:
1189 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1190 state, event_type, subtype.chunk);
1191 break;
1192
1193 case SCTP_DISPOSITION_BUG:
1194 pr_err("bug in state %d, event_type %d, event_id %d\n",
1195 state, event_type, subtype.chunk);
1196 BUG();
1197 break;
1198
1199 default:
1200 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1201 status, state, event_type, subtype.chunk);
1202 BUG();
1203 break;
1204 }
1205
1206 bail:
1207 return error;
1208 }
1209
1210 /********************************************************************
1211 * 2nd Level Abstractions
1212 ********************************************************************/
1213
1214 /* This is the side-effect interpreter. */
1215 static int sctp_cmd_interpreter(sctp_event_t event_type,
1216 sctp_subtype_t subtype,
1217 sctp_state_t state,
1218 struct sctp_endpoint *ep,
1219 struct sctp_association *asoc,
1220 void *event_arg,
1221 sctp_disposition_t status,
1222 sctp_cmd_seq_t *commands,
1223 gfp_t gfp)
1224 {
1225 int error = 0;
1226 int force;
1227 sctp_cmd_t *cmd;
1228 struct sctp_chunk *new_obj;
1229 struct sctp_chunk *chunk = NULL;
1230 struct sctp_packet *packet;
1231 struct timer_list *timer;
1232 unsigned long timeout;
1233 struct sctp_transport *t;
1234 struct sctp_sackhdr sackh;
1235 int local_cork = 0;
1236
1237 if (SCTP_EVENT_T_TIMEOUT != event_type)
1238 chunk = event_arg;
1239
1240 /* Note: This whole file is a huge candidate for rework.
1241 * For example, each command could either have its own handler, so
1242 * the loop would look like:
1243 * while (cmds)
1244 * cmd->handle(x, y, z)
1245 * --jgrimm
1246 */
1247 while (NULL != (cmd = sctp_next_cmd(commands))) {
1248 switch (cmd->verb) {
1249 case SCTP_CMD_NOP:
1250 /* Do nothing. */
1251 break;
1252
1253 case SCTP_CMD_NEW_ASOC:
1254 /* Register a new association. */
1255 if (local_cork) {
1256 sctp_outq_uncork(&asoc->outqueue, gfp);
1257 local_cork = 0;
1258 }
1259
1260 /* Register with the endpoint. */
1261 asoc = cmd->obj.asoc;
1262 BUG_ON(asoc->peer.primary_path == NULL);
1263 sctp_endpoint_add_asoc(ep, asoc);
1264 break;
1265
1266 case SCTP_CMD_UPDATE_ASSOC:
1267 sctp_assoc_update(asoc, cmd->obj.asoc);
1268 break;
1269
1270 case SCTP_CMD_PURGE_OUTQUEUE:
1271 sctp_outq_teardown(&asoc->outqueue);
1272 break;
1273
1274 case SCTP_CMD_DELETE_TCB:
1275 if (local_cork) {
1276 sctp_outq_uncork(&asoc->outqueue, gfp);
1277 local_cork = 0;
1278 }
1279 /* Delete the current association. */
1280 sctp_cmd_delete_tcb(commands, asoc);
1281 asoc = NULL;
1282 break;
1283
1284 case SCTP_CMD_NEW_STATE:
1285 /* Enter a new state. */
1286 sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1287 break;
1288
1289 case SCTP_CMD_REPORT_TSN:
1290 /* Record the arrival of a TSN. */
1291 error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1292 cmd->obj.u32, NULL);
1293 break;
1294
1295 case SCTP_CMD_REPORT_FWDTSN:
1296 /* Move the Cumulattive TSN Ack ahead. */
1297 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1298
1299 /* purge the fragmentation queue */
1300 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1301
1302 /* Abort any in progress partial delivery. */
1303 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1304 break;
1305
1306 case SCTP_CMD_PROCESS_FWDTSN:
1307 sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
1308 break;
1309
1310 case SCTP_CMD_GEN_SACK:
1311 /* Generate a Selective ACK.
1312 * The argument tells us whether to just count
1313 * the packet and MAYBE generate a SACK, or
1314 * force a SACK out.
1315 */
1316 force = cmd->obj.i32;
1317 error = sctp_gen_sack(asoc, force, commands);
1318 break;
1319
1320 case SCTP_CMD_PROCESS_SACK:
1321 /* Process an inbound SACK. */
1322 error = sctp_cmd_process_sack(commands, asoc,
1323 cmd->obj.chunk);
1324 break;
1325
1326 case SCTP_CMD_GEN_INIT_ACK:
1327 /* Generate an INIT ACK chunk. */
1328 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1329 0);
1330 if (!new_obj)
1331 goto nomem;
1332
1333 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1334 SCTP_CHUNK(new_obj));
1335 break;
1336
1337 case SCTP_CMD_PEER_INIT:
1338 /* Process a unified INIT from the peer.
1339 * Note: Only used during INIT-ACK processing. If
1340 * there is an error just return to the outter
1341 * layer which will bail.
1342 */
1343 error = sctp_cmd_process_init(commands, asoc, chunk,
1344 cmd->obj.init, gfp);
1345 break;
1346
1347 case SCTP_CMD_GEN_COOKIE_ECHO:
1348 /* Generate a COOKIE ECHO chunk. */
1349 new_obj = sctp_make_cookie_echo(asoc, chunk);
1350 if (!new_obj) {
1351 if (cmd->obj.chunk)
1352 sctp_chunk_free(cmd->obj.chunk);
1353 goto nomem;
1354 }
1355 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1356 SCTP_CHUNK(new_obj));
1357
1358 /* If there is an ERROR chunk to be sent along with
1359 * the COOKIE_ECHO, send it, too.
1360 */
1361 if (cmd->obj.chunk)
1362 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1363 SCTP_CHUNK(cmd->obj.chunk));
1364
1365 if (new_obj->transport) {
1366 new_obj->transport->init_sent_count++;
1367 asoc->init_last_sent_to = new_obj->transport;
1368 }
1369
1370 /* FIXME - Eventually come up with a cleaner way to
1371 * enabling COOKIE-ECHO + DATA bundling during
1372 * multihoming stale cookie scenarios, the following
1373 * command plays with asoc->peer.retran_path to
1374 * avoid the problem of sending the COOKIE-ECHO and
1375 * DATA in different paths, which could result
1376 * in the association being ABORTed if the DATA chunk
1377 * is processed first by the server. Checking the
1378 * init error counter simply causes this command
1379 * to be executed only during failed attempts of
1380 * association establishment.
1381 */
1382 if ((asoc->peer.retran_path !=
1383 asoc->peer.primary_path) &&
1384 (asoc->init_err_counter > 0)) {
1385 sctp_add_cmd_sf(commands,
1386 SCTP_CMD_FORCE_PRIM_RETRAN,
1387 SCTP_NULL());
1388 }
1389
1390 break;
1391
1392 case SCTP_CMD_GEN_SHUTDOWN:
1393 /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1394 * Reset error counts.
1395 */
1396 asoc->overall_error_count = 0;
1397
1398 /* Generate a SHUTDOWN chunk. */
1399 new_obj = sctp_make_shutdown(asoc, chunk);
1400 if (!new_obj)
1401 goto nomem;
1402 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1403 SCTP_CHUNK(new_obj));
1404 break;
1405
1406 case SCTP_CMD_CHUNK_ULP:
1407 /* Send a chunk to the sockets layer. */
1408 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1409 __func__, cmd->obj.chunk, &asoc->ulpq);
1410
1411 sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
1412 GFP_ATOMIC);
1413 break;
1414
1415 case SCTP_CMD_EVENT_ULP:
1416 /* Send a notification to the sockets layer. */
1417 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1418 __func__, cmd->obj.ulpevent, &asoc->ulpq);
1419
1420 sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
1421 break;
1422
1423 case SCTP_CMD_REPLY:
1424 /* If an caller has not already corked, do cork. */
1425 if (!asoc->outqueue.cork) {
1426 sctp_outq_cork(&asoc->outqueue);
1427 local_cork = 1;
1428 }
1429 /* Send a chunk to our peer. */
1430 error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk,
1431 gfp);
1432 break;
1433
1434 case SCTP_CMD_SEND_PKT:
1435 /* Send a full packet to our peer. */
1436 packet = cmd->obj.packet;
1437 sctp_packet_transmit(packet, gfp);
1438 sctp_ootb_pkt_free(packet);
1439 break;
1440
1441 case SCTP_CMD_T1_RETRAN:
1442 /* Mark a transport for retransmission. */
1443 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1444 SCTP_RTXR_T1_RTX);
1445 break;
1446
1447 case SCTP_CMD_RETRAN:
1448 /* Mark a transport for retransmission. */
1449 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1450 SCTP_RTXR_T3_RTX);
1451 break;
1452
1453 case SCTP_CMD_ECN_CE:
1454 /* Do delayed CE processing. */
1455 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1456 break;
1457
1458 case SCTP_CMD_ECN_ECNE:
1459 /* Do delayed ECNE processing. */
1460 new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1461 chunk);
1462 if (new_obj)
1463 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1464 SCTP_CHUNK(new_obj));
1465 break;
1466
1467 case SCTP_CMD_ECN_CWR:
1468 /* Do delayed CWR processing. */
1469 sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1470 break;
1471
1472 case SCTP_CMD_SETUP_T2:
1473 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1474 break;
1475
1476 case SCTP_CMD_TIMER_START_ONCE:
1477 timer = &asoc->timers[cmd->obj.to];
1478
1479 if (timer_pending(timer))
1480 break;
1481 /* fall through */
1482
1483 case SCTP_CMD_TIMER_START:
1484 timer = &asoc->timers[cmd->obj.to];
1485 timeout = asoc->timeouts[cmd->obj.to];
1486 BUG_ON(!timeout);
1487
1488 timer->expires = jiffies + timeout;
1489 sctp_association_hold(asoc);
1490 add_timer(timer);
1491 break;
1492
1493 case SCTP_CMD_TIMER_RESTART:
1494 timer = &asoc->timers[cmd->obj.to];
1495 timeout = asoc->timeouts[cmd->obj.to];
1496 if (!mod_timer(timer, jiffies + timeout))
1497 sctp_association_hold(asoc);
1498 break;
1499
1500 case SCTP_CMD_TIMER_STOP:
1501 timer = &asoc->timers[cmd->obj.to];
1502 if (del_timer(timer))
1503 sctp_association_put(asoc);
1504 break;
1505
1506 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1507 chunk = cmd->obj.chunk;
1508 t = sctp_assoc_choose_alter_transport(asoc,
1509 asoc->init_last_sent_to);
1510 asoc->init_last_sent_to = t;
1511 chunk->transport = t;
1512 t->init_sent_count++;
1513 /* Set the new transport as primary */
1514 sctp_assoc_set_primary(asoc, t);
1515 break;
1516
1517 case SCTP_CMD_INIT_RESTART:
1518 /* Do the needed accounting and updates
1519 * associated with restarting an initialization
1520 * timer. Only multiply the timeout by two if
1521 * all transports have been tried at the current
1522 * timeout.
1523 */
1524 sctp_cmd_t1_timer_update(asoc,
1525 SCTP_EVENT_TIMEOUT_T1_INIT,
1526 "INIT");
1527
1528 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1529 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1530 break;
1531
1532 case SCTP_CMD_COOKIEECHO_RESTART:
1533 /* Do the needed accounting and updates
1534 * associated with restarting an initialization
1535 * timer. Only multiply the timeout by two if
1536 * all transports have been tried at the current
1537 * timeout.
1538 */
1539 sctp_cmd_t1_timer_update(asoc,
1540 SCTP_EVENT_TIMEOUT_T1_COOKIE,
1541 "COOKIE");
1542
1543 /* If we've sent any data bundled with
1544 * COOKIE-ECHO we need to resend.
1545 */
1546 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1547 transports) {
1548 sctp_retransmit_mark(&asoc->outqueue, t,
1549 SCTP_RTXR_T1_RTX);
1550 }
1551
1552 sctp_add_cmd_sf(commands,
1553 SCTP_CMD_TIMER_RESTART,
1554 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1555 break;
1556
1557 case SCTP_CMD_INIT_FAILED:
1558 sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
1559 break;
1560
1561 case SCTP_CMD_ASSOC_FAILED:
1562 sctp_cmd_assoc_failed(commands, asoc, event_type,
1563 subtype, chunk, cmd->obj.err);
1564 break;
1565
1566 case SCTP_CMD_INIT_COUNTER_INC:
1567 asoc->init_err_counter++;
1568 break;
1569
1570 case SCTP_CMD_INIT_COUNTER_RESET:
1571 asoc->init_err_counter = 0;
1572 asoc->init_cycle = 0;
1573 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1574 transports) {
1575 t->init_sent_count = 0;
1576 }
1577 break;
1578
1579 case SCTP_CMD_REPORT_DUP:
1580 sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1581 cmd->obj.u32);
1582 break;
1583
1584 case SCTP_CMD_REPORT_BAD_TAG:
1585 pr_debug("%s: vtag mismatch!\n", __func__);
1586 break;
1587
1588 case SCTP_CMD_STRIKE:
1589 /* Mark one strike against a transport. */
1590 sctp_do_8_2_transport_strike(commands, asoc,
1591 cmd->obj.transport, 0);
1592 break;
1593
1594 case SCTP_CMD_TRANSPORT_IDLE:
1595 t = cmd->obj.transport;
1596 sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1597 break;
1598
1599 case SCTP_CMD_TRANSPORT_HB_SENT:
1600 t = cmd->obj.transport;
1601 sctp_do_8_2_transport_strike(commands, asoc,
1602 t, 1);
1603 t->hb_sent = 1;
1604 break;
1605
1606 case SCTP_CMD_TRANSPORT_ON:
1607 t = cmd->obj.transport;
1608 sctp_cmd_transport_on(commands, asoc, t, chunk);
1609 break;
1610
1611 case SCTP_CMD_HB_TIMERS_START:
1612 sctp_cmd_hb_timers_start(commands, asoc);
1613 break;
1614
1615 case SCTP_CMD_HB_TIMER_UPDATE:
1616 t = cmd->obj.transport;
1617 sctp_cmd_hb_timer_update(commands, t);
1618 break;
1619
1620 case SCTP_CMD_HB_TIMERS_STOP:
1621 sctp_cmd_hb_timers_stop(commands, asoc);
1622 break;
1623
1624 case SCTP_CMD_REPORT_ERROR:
1625 error = cmd->obj.error;
1626 break;
1627
1628 case SCTP_CMD_PROCESS_CTSN:
1629 /* Dummy up a SACK for processing. */
1630 sackh.cum_tsn_ack = cmd->obj.be32;
1631 sackh.a_rwnd = asoc->peer.rwnd +
1632 asoc->outqueue.outstanding_bytes;
1633 sackh.num_gap_ack_blocks = 0;
1634 sackh.num_dup_tsns = 0;
1635 chunk->subh.sack_hdr = &sackh;
1636 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1637 SCTP_CHUNK(chunk));
1638 break;
1639
1640 case SCTP_CMD_DISCARD_PACKET:
1641 /* We need to discard the whole packet.
1642 * Uncork the queue since there might be
1643 * responses pending
1644 */
1645 chunk->pdiscard = 1;
1646 if (asoc) {
1647 sctp_outq_uncork(&asoc->outqueue, gfp);
1648 local_cork = 0;
1649 }
1650 break;
1651
1652 case SCTP_CMD_RTO_PENDING:
1653 t = cmd->obj.transport;
1654 t->rto_pending = 1;
1655 break;
1656
1657 case SCTP_CMD_PART_DELIVER:
1658 sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
1659 break;
1660
1661 case SCTP_CMD_RENEGE:
1662 sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
1663 GFP_ATOMIC);
1664 break;
1665
1666 case SCTP_CMD_SETUP_T4:
1667 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1668 break;
1669
1670 case SCTP_CMD_PROCESS_OPERR:
1671 sctp_cmd_process_operr(commands, asoc, chunk);
1672 break;
1673 case SCTP_CMD_CLEAR_INIT_TAG:
1674 asoc->peer.i.init_tag = 0;
1675 break;
1676 case SCTP_CMD_DEL_NON_PRIMARY:
1677 sctp_cmd_del_non_primary(asoc);
1678 break;
1679 case SCTP_CMD_T3_RTX_TIMERS_STOP:
1680 sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1681 break;
1682 case SCTP_CMD_FORCE_PRIM_RETRAN:
1683 t = asoc->peer.retran_path;
1684 asoc->peer.retran_path = asoc->peer.primary_path;
1685 error = sctp_outq_uncork(&asoc->outqueue, gfp);
1686 local_cork = 0;
1687 asoc->peer.retran_path = t;
1688 break;
1689 case SCTP_CMD_SET_SK_ERR:
1690 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1691 break;
1692 case SCTP_CMD_ASSOC_CHANGE:
1693 sctp_cmd_assoc_change(commands, asoc,
1694 cmd->obj.u8);
1695 break;
1696 case SCTP_CMD_ADAPTATION_IND:
1697 sctp_cmd_adaptation_ind(commands, asoc);
1698 break;
1699
1700 case SCTP_CMD_ASSOC_SHKEY:
1701 error = sctp_auth_asoc_init_active_key(asoc,
1702 GFP_ATOMIC);
1703 break;
1704 case SCTP_CMD_UPDATE_INITTAG:
1705 asoc->peer.i.init_tag = cmd->obj.u32;
1706 break;
1707 case SCTP_CMD_SEND_MSG:
1708 if (!asoc->outqueue.cork) {
1709 sctp_outq_cork(&asoc->outqueue);
1710 local_cork = 1;
1711 }
1712 error = sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
1713 break;
1714 case SCTP_CMD_SEND_NEXT_ASCONF:
1715 sctp_cmd_send_asconf(asoc);
1716 break;
1717 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1718 sctp_asconf_queue_teardown(asoc);
1719 break;
1720
1721 case SCTP_CMD_SET_ASOC:
1722 asoc = cmd->obj.asoc;
1723 break;
1724
1725 default:
1726 pr_warn("Impossible command: %u\n",
1727 cmd->verb);
1728 break;
1729 }
1730
1731 if (error)
1732 break;
1733 }
1734
1735 out:
1736 /* If this is in response to a received chunk, wait until
1737 * we are done with the packet to open the queue so that we don't
1738 * send multiple packets in response to a single request.
1739 */
1740 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1741 if (chunk->end_of_packet || chunk->singleton)
1742 error = sctp_outq_uncork(&asoc->outqueue, gfp);
1743 } else if (local_cork)
1744 error = sctp_outq_uncork(&asoc->outqueue, gfp);
1745 return error;
1746 nomem:
1747 error = -ENOMEM;
1748 goto out;
1749 }
1750
This page took 0.08387 seconds and 5 git commands to generate.