drm/i915: Keep the CRC values into a circular buffer
[deliverable/linux.git] / net / sctp / transport.c
1 /* SCTP kernel implementation
2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2001-2003 International Business Machines Corp.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP tranport representing
11 * a remote transport address. For local transport addresses, we just use
12 * union sctp_addr.
13 *
14 * This SCTP implementation is free software;
15 * you can redistribute it and/or modify it under the terms of
16 * the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This SCTP implementation is distributed in the hope that it
21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 * ************************
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 * See the GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with GNU CC; see the file COPYING. If not, write to
28 * the Free Software Foundation, 59 Temple Place - Suite 330,
29 * Boston, MA 02111-1307, USA.
30 *
31 * Please send any bug reports or fixes you make to the
32 * email address(es):
33 * lksctp developers <linux-sctp@vger.kernel.org>
34 *
35 * Written or modified by:
36 * La Monte H.P. Yarroll <piggy@acm.org>
37 * Karl Knutson <karl@athena.chicago.il.us>
38 * Jon Grimm <jgrimm@us.ibm.com>
39 * Xingang Guo <xingang.guo@intel.com>
40 * Hui Huang <hui.huang@nokia.com>
41 * Sridhar Samudrala <sri@us.ibm.com>
42 * Ardelle Fan <ardelle.fan@intel.com>
43 */
44
45 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47 #include <linux/slab.h>
48 #include <linux/types.h>
49 #include <linux/random.h>
50 #include <net/sctp/sctp.h>
51 #include <net/sctp/sm.h>
52
53 /* 1st Level Abstractions. */
54
55 /* Initialize a new transport from provided memory. */
56 static struct sctp_transport *sctp_transport_init(struct net *net,
57 struct sctp_transport *peer,
58 const union sctp_addr *addr,
59 gfp_t gfp)
60 {
61 /* Copy in the address. */
62 peer->ipaddr = *addr;
63 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
64 memset(&peer->saddr, 0, sizeof(union sctp_addr));
65
66 peer->sack_generation = 0;
67
68 /* From 6.3.1 RTO Calculation:
69 *
70 * C1) Until an RTT measurement has been made for a packet sent to the
71 * given destination transport address, set RTO to the protocol
72 * parameter 'RTO.Initial'.
73 */
74 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
75
76 peer->last_time_heard = jiffies;
77 peer->last_time_ecne_reduced = jiffies;
78
79 peer->param_flags = SPP_HB_DISABLE |
80 SPP_PMTUD_ENABLE |
81 SPP_SACKDELAY_ENABLE;
82
83 /* Initialize the default path max_retrans. */
84 peer->pathmaxrxt = net->sctp.max_retrans_path;
85 peer->pf_retrans = net->sctp.pf_retrans;
86
87 INIT_LIST_HEAD(&peer->transmitted);
88 INIT_LIST_HEAD(&peer->send_ready);
89 INIT_LIST_HEAD(&peer->transports);
90
91 setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
92 (unsigned long)peer);
93 setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
94 (unsigned long)peer);
95 setup_timer(&peer->proto_unreach_timer,
96 sctp_generate_proto_unreach_event, (unsigned long)peer);
97
98 /* Initialize the 64-bit random nonce sent with heartbeat. */
99 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
100
101 atomic_set(&peer->refcnt, 1);
102
103 return peer;
104 }
105
106 /* Allocate and initialize a new transport. */
107 struct sctp_transport *sctp_transport_new(struct net *net,
108 const union sctp_addr *addr,
109 gfp_t gfp)
110 {
111 struct sctp_transport *transport;
112
113 transport = kzalloc(sizeof(*transport), gfp);
114 if (!transport)
115 goto fail;
116
117 if (!sctp_transport_init(net, transport, addr, gfp))
118 goto fail_init;
119
120 SCTP_DBG_OBJCNT_INC(transport);
121
122 return transport;
123
124 fail_init:
125 kfree(transport);
126
127 fail:
128 return NULL;
129 }
130
131 /* This transport is no longer needed. Free up if possible, or
132 * delay until it last reference count.
133 */
134 void sctp_transport_free(struct sctp_transport *transport)
135 {
136 transport->dead = 1;
137
138 /* Try to delete the heartbeat timer. */
139 if (del_timer(&transport->hb_timer))
140 sctp_transport_put(transport);
141
142 /* Delete the T3_rtx timer if it's active.
143 * There is no point in not doing this now and letting
144 * structure hang around in memory since we know
145 * the tranport is going away.
146 */
147 if (del_timer(&transport->T3_rtx_timer))
148 sctp_transport_put(transport);
149
150 /* Delete the ICMP proto unreachable timer if it's active. */
151 if (del_timer(&transport->proto_unreach_timer))
152 sctp_association_put(transport->asoc);
153
154 sctp_transport_put(transport);
155 }
156
157 static void sctp_transport_destroy_rcu(struct rcu_head *head)
158 {
159 struct sctp_transport *transport;
160
161 transport = container_of(head, struct sctp_transport, rcu);
162
163 dst_release(transport->dst);
164 kfree(transport);
165 SCTP_DBG_OBJCNT_DEC(transport);
166 }
167
168 /* Destroy the transport data structure.
169 * Assumes there are no more users of this structure.
170 */
171 static void sctp_transport_destroy(struct sctp_transport *transport)
172 {
173 if (unlikely(!transport->dead)) {
174 WARN(1, "Attempt to destroy undead transport %p!\n", transport);
175 return;
176 }
177
178 sctp_packet_free(&transport->packet);
179
180 if (transport->asoc)
181 sctp_association_put(transport->asoc);
182
183 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
184 }
185
186 /* Start T3_rtx timer if it is not already running and update the heartbeat
187 * timer. This routine is called every time a DATA chunk is sent.
188 */
189 void sctp_transport_reset_timers(struct sctp_transport *transport)
190 {
191 /* RFC 2960 6.3.2 Retransmission Timer Rules
192 *
193 * R1) Every time a DATA chunk is sent to any address(including a
194 * retransmission), if the T3-rtx timer of that address is not running
195 * start it running so that it will expire after the RTO of that
196 * address.
197 */
198
199 if (!timer_pending(&transport->T3_rtx_timer))
200 if (!mod_timer(&transport->T3_rtx_timer,
201 jiffies + transport->rto))
202 sctp_transport_hold(transport);
203
204 /* When a data chunk is sent, reset the heartbeat interval. */
205 if (!mod_timer(&transport->hb_timer,
206 sctp_transport_timeout(transport)))
207 sctp_transport_hold(transport);
208 }
209
210 /* This transport has been assigned to an association.
211 * Initialize fields from the association or from the sock itself.
212 * Register the reference count in the association.
213 */
214 void sctp_transport_set_owner(struct sctp_transport *transport,
215 struct sctp_association *asoc)
216 {
217 transport->asoc = asoc;
218 sctp_association_hold(asoc);
219 }
220
221 /* Initialize the pmtu of a transport. */
222 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
223 {
224 /* If we don't have a fresh route, look one up */
225 if (!transport->dst || transport->dst->obsolete) {
226 dst_release(transport->dst);
227 transport->af_specific->get_dst(transport, &transport->saddr,
228 &transport->fl, sk);
229 }
230
231 if (transport->dst) {
232 transport->pathmtu = dst_mtu(transport->dst);
233 } else
234 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
235 }
236
237 void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
238 {
239 struct dst_entry *dst;
240
241 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
242 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
243 __func__, pmtu,
244 SCTP_DEFAULT_MINSEGMENT);
245 /* Use default minimum segment size and disable
246 * pmtu discovery on this transport.
247 */
248 t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
249 } else {
250 t->pathmtu = pmtu;
251 }
252
253 dst = sctp_transport_dst_check(t);
254 if (!dst)
255 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
256
257 if (dst) {
258 dst->ops->update_pmtu(dst, sk, NULL, pmtu);
259
260 dst = sctp_transport_dst_check(t);
261 if (!dst)
262 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
263 }
264 }
265
266 /* Caches the dst entry and source address for a transport's destination
267 * address.
268 */
269 void sctp_transport_route(struct sctp_transport *transport,
270 union sctp_addr *saddr, struct sctp_sock *opt)
271 {
272 struct sctp_association *asoc = transport->asoc;
273 struct sctp_af *af = transport->af_specific;
274
275 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
276
277 if (saddr)
278 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
279 else
280 af->get_saddr(opt, transport, &transport->fl);
281
282 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
283 return;
284 }
285 if (transport->dst) {
286 transport->pathmtu = dst_mtu(transport->dst);
287
288 /* Initialize sk->sk_rcv_saddr, if the transport is the
289 * association's active path for getsockname().
290 */
291 if (asoc && (!asoc->peer.primary_path ||
292 (transport == asoc->peer.active_path)))
293 opt->pf->af->to_sk_saddr(&transport->saddr,
294 asoc->base.sk);
295 } else
296 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
297 }
298
299 /* Hold a reference to a transport. */
300 void sctp_transport_hold(struct sctp_transport *transport)
301 {
302 atomic_inc(&transport->refcnt);
303 }
304
305 /* Release a reference to a transport and clean up
306 * if there are no more references.
307 */
308 void sctp_transport_put(struct sctp_transport *transport)
309 {
310 if (atomic_dec_and_test(&transport->refcnt))
311 sctp_transport_destroy(transport);
312 }
313
314 /* Update transport's RTO based on the newly calculated RTT. */
315 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
316 {
317 if (unlikely(!tp->rto_pending))
318 /* We should not be doing any RTO updates unless rto_pending is set. */
319 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
320
321 if (tp->rttvar || tp->srtt) {
322 struct net *net = sock_net(tp->asoc->base.sk);
323 /* 6.3.1 C3) When a new RTT measurement R' is made, set
324 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
325 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
326 */
327
328 /* Note: The above algorithm has been rewritten to
329 * express rto_beta and rto_alpha as inverse powers
330 * of two.
331 * For example, assuming the default value of RTO.Alpha of
332 * 1/8, rto_alpha would be expressed as 3.
333 */
334 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
335 + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
336 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
337 + (rtt >> net->sctp.rto_alpha);
338 } else {
339 /* 6.3.1 C2) When the first RTT measurement R is made, set
340 * SRTT <- R, RTTVAR <- R/2.
341 */
342 tp->srtt = rtt;
343 tp->rttvar = rtt >> 1;
344 }
345
346 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
347 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
348 */
349 if (tp->rttvar == 0)
350 tp->rttvar = SCTP_CLOCK_GRANULARITY;
351
352 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
353 tp->rto = tp->srtt + (tp->rttvar << 2);
354
355 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
356 * seconds then it is rounded up to RTO.Min seconds.
357 */
358 if (tp->rto < tp->asoc->rto_min)
359 tp->rto = tp->asoc->rto_min;
360
361 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
362 * at least RTO.max seconds.
363 */
364 if (tp->rto > tp->asoc->rto_max)
365 tp->rto = tp->asoc->rto_max;
366
367 sctp_max_rto(tp->asoc, tp);
368 tp->rtt = rtt;
369
370 /* Reset rto_pending so that a new RTT measurement is started when a
371 * new data chunk is sent.
372 */
373 tp->rto_pending = 0;
374
375 pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
376 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
377 }
378
379 /* This routine updates the transport's cwnd and partial_bytes_acked
380 * parameters based on the bytes acked in the received SACK.
381 */
382 void sctp_transport_raise_cwnd(struct sctp_transport *transport,
383 __u32 sack_ctsn, __u32 bytes_acked)
384 {
385 struct sctp_association *asoc = transport->asoc;
386 __u32 cwnd, ssthresh, flight_size, pba, pmtu;
387
388 cwnd = transport->cwnd;
389 flight_size = transport->flight_size;
390
391 /* See if we need to exit Fast Recovery first */
392 if (asoc->fast_recovery &&
393 TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
394 asoc->fast_recovery = 0;
395
396 /* The appropriate cwnd increase algorithm is performed if, and only
397 * if the cumulative TSN whould advanced and the congestion window is
398 * being fully utilized.
399 */
400 if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
401 (flight_size < cwnd))
402 return;
403
404 ssthresh = transport->ssthresh;
405 pba = transport->partial_bytes_acked;
406 pmtu = transport->asoc->pathmtu;
407
408 if (cwnd <= ssthresh) {
409 /* RFC 4960 7.2.1
410 * o When cwnd is less than or equal to ssthresh, an SCTP
411 * endpoint MUST use the slow-start algorithm to increase
412 * cwnd only if the current congestion window is being fully
413 * utilized, an incoming SACK advances the Cumulative TSN
414 * Ack Point, and the data sender is not in Fast Recovery.
415 * Only when these three conditions are met can the cwnd be
416 * increased; otherwise, the cwnd MUST not be increased.
417 * If these conditions are met, then cwnd MUST be increased
418 * by, at most, the lesser of 1) the total size of the
419 * previously outstanding DATA chunk(s) acknowledged, and
420 * 2) the destination's path MTU. This upper bound protects
421 * against the ACK-Splitting attack outlined in [SAVAGE99].
422 */
423 if (asoc->fast_recovery)
424 return;
425
426 if (bytes_acked > pmtu)
427 cwnd += pmtu;
428 else
429 cwnd += bytes_acked;
430
431 pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
432 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
433 __func__, transport, bytes_acked, cwnd, ssthresh,
434 flight_size, pba);
435 } else {
436 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
437 * upon each SACK arrival that advances the Cumulative TSN Ack
438 * Point, increase partial_bytes_acked by the total number of
439 * bytes of all new chunks acknowledged in that SACK including
440 * chunks acknowledged by the new Cumulative TSN Ack and by
441 * Gap Ack Blocks.
442 *
443 * When partial_bytes_acked is equal to or greater than cwnd
444 * and before the arrival of the SACK the sender had cwnd or
445 * more bytes of data outstanding (i.e., before arrival of the
446 * SACK, flightsize was greater than or equal to cwnd),
447 * increase cwnd by MTU, and reset partial_bytes_acked to
448 * (partial_bytes_acked - cwnd).
449 */
450 pba += bytes_acked;
451 if (pba >= cwnd) {
452 cwnd += pmtu;
453 pba = ((cwnd < pba) ? (pba - cwnd) : 0);
454 }
455
456 pr_debug("%s: congestion avoidance: transport:%p, "
457 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
458 "flight_size:%d, pba:%d\n", __func__,
459 transport, bytes_acked, cwnd, ssthresh,
460 flight_size, pba);
461 }
462
463 transport->cwnd = cwnd;
464 transport->partial_bytes_acked = pba;
465 }
466
467 /* This routine is used to lower the transport's cwnd when congestion is
468 * detected.
469 */
470 void sctp_transport_lower_cwnd(struct sctp_transport *transport,
471 sctp_lower_cwnd_t reason)
472 {
473 struct sctp_association *asoc = transport->asoc;
474
475 switch (reason) {
476 case SCTP_LOWER_CWND_T3_RTX:
477 /* RFC 2960 Section 7.2.3, sctpimpguide
478 * When the T3-rtx timer expires on an address, SCTP should
479 * perform slow start by:
480 * ssthresh = max(cwnd/2, 4*MTU)
481 * cwnd = 1*MTU
482 * partial_bytes_acked = 0
483 */
484 transport->ssthresh = max(transport->cwnd/2,
485 4*asoc->pathmtu);
486 transport->cwnd = asoc->pathmtu;
487
488 /* T3-rtx also clears fast recovery */
489 asoc->fast_recovery = 0;
490 break;
491
492 case SCTP_LOWER_CWND_FAST_RTX:
493 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
494 * destination address(es) to which the missing DATA chunks
495 * were last sent, according to the formula described in
496 * Section 7.2.3.
497 *
498 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
499 * losses from SACK (see Section 7.2.4), An endpoint
500 * should do the following:
501 * ssthresh = max(cwnd/2, 4*MTU)
502 * cwnd = ssthresh
503 * partial_bytes_acked = 0
504 */
505 if (asoc->fast_recovery)
506 return;
507
508 /* Mark Fast recovery */
509 asoc->fast_recovery = 1;
510 asoc->fast_recovery_exit = asoc->next_tsn - 1;
511
512 transport->ssthresh = max(transport->cwnd/2,
513 4*asoc->pathmtu);
514 transport->cwnd = transport->ssthresh;
515 break;
516
517 case SCTP_LOWER_CWND_ECNE:
518 /* RFC 2481 Section 6.1.2.
519 * If the sender receives an ECN-Echo ACK packet
520 * then the sender knows that congestion was encountered in the
521 * network on the path from the sender to the receiver. The
522 * indication of congestion should be treated just as a
523 * congestion loss in non-ECN Capable TCP. That is, the TCP
524 * source halves the congestion window "cwnd" and reduces the
525 * slow start threshold "ssthresh".
526 * A critical condition is that TCP does not react to
527 * congestion indications more than once every window of
528 * data (or more loosely more than once every round-trip time).
529 */
530 if (time_after(jiffies, transport->last_time_ecne_reduced +
531 transport->rtt)) {
532 transport->ssthresh = max(transport->cwnd/2,
533 4*asoc->pathmtu);
534 transport->cwnd = transport->ssthresh;
535 transport->last_time_ecne_reduced = jiffies;
536 }
537 break;
538
539 case SCTP_LOWER_CWND_INACTIVE:
540 /* RFC 2960 Section 7.2.1, sctpimpguide
541 * When the endpoint does not transmit data on a given
542 * transport address, the cwnd of the transport address
543 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
544 * NOTE: Although the draft recommends that this check needs
545 * to be done every RTO interval, we do it every hearbeat
546 * interval.
547 */
548 transport->cwnd = max(transport->cwnd/2,
549 4*asoc->pathmtu);
550 break;
551 }
552
553 transport->partial_bytes_acked = 0;
554
555 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
556 __func__, transport, reason, transport->cwnd,
557 transport->ssthresh);
558 }
559
560 /* Apply Max.Burst limit to the congestion window:
561 * sctpimpguide-05 2.14.2
562 * D) When the time comes for the sender to
563 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
564 * first be applied to limit how many new DATA chunks may be sent.
565 * The limit is applied by adjusting cwnd as follows:
566 * if ((flightsize+ Max.Burst * MTU) < cwnd)
567 * cwnd = flightsize + Max.Burst * MTU
568 */
569
570 void sctp_transport_burst_limited(struct sctp_transport *t)
571 {
572 struct sctp_association *asoc = t->asoc;
573 u32 old_cwnd = t->cwnd;
574 u32 max_burst_bytes;
575
576 if (t->burst_limited)
577 return;
578
579 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
580 if (max_burst_bytes < old_cwnd) {
581 t->cwnd = max_burst_bytes;
582 t->burst_limited = old_cwnd;
583 }
584 }
585
586 /* Restore the old cwnd congestion window, after the burst had it's
587 * desired effect.
588 */
589 void sctp_transport_burst_reset(struct sctp_transport *t)
590 {
591 if (t->burst_limited) {
592 t->cwnd = t->burst_limited;
593 t->burst_limited = 0;
594 }
595 }
596
597 /* What is the next timeout value for this transport? */
598 unsigned long sctp_transport_timeout(struct sctp_transport *t)
599 {
600 unsigned long timeout;
601 timeout = t->rto + sctp_jitter(t->rto);
602 if ((t->state != SCTP_UNCONFIRMED) &&
603 (t->state != SCTP_PF))
604 timeout += t->hbinterval;
605 timeout += jiffies;
606 return timeout;
607 }
608
609 /* Reset transport variables to their initial values */
610 void sctp_transport_reset(struct sctp_transport *t)
611 {
612 struct sctp_association *asoc = t->asoc;
613
614 /* RFC 2960 (bis), Section 5.2.4
615 * All the congestion control parameters (e.g., cwnd, ssthresh)
616 * related to this peer MUST be reset to their initial values
617 * (see Section 6.2.1)
618 */
619 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
620 t->burst_limited = 0;
621 t->ssthresh = asoc->peer.i.a_rwnd;
622 t->rto = asoc->rto_initial;
623 sctp_max_rto(asoc, t);
624 t->rtt = 0;
625 t->srtt = 0;
626 t->rttvar = 0;
627
628 /* Reset these additional varibles so that we have a clean
629 * slate.
630 */
631 t->partial_bytes_acked = 0;
632 t->flight_size = 0;
633 t->error_count = 0;
634 t->rto_pending = 0;
635 t->hb_sent = 0;
636
637 /* Initialize the state information for SFR-CACC */
638 t->cacc.changeover_active = 0;
639 t->cacc.cycling_changeover = 0;
640 t->cacc.next_tsn_at_change = 0;
641 t->cacc.cacc_saw_newack = 0;
642 }
643
644 /* Schedule retransmission on the given transport */
645 void sctp_transport_immediate_rtx(struct sctp_transport *t)
646 {
647 /* Stop pending T3_rtx_timer */
648 if (del_timer(&t->T3_rtx_timer))
649 sctp_transport_put(t);
650
651 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
652 if (!timer_pending(&t->T3_rtx_timer)) {
653 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
654 sctp_transport_hold(t);
655 }
656 return;
657 }
This page took 0.064717 seconds and 5 git commands to generate.