dccp ccid-2: Simplify dec_pipe and rearming of RTO timer
[deliverable/linux.git] / net / dccp / ccids / ccid2.c
1 /*
2 * net/dccp/ccids/ccid2.c
3 *
4 * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
5 *
6 * Changes to meet Linux coding standards, and DCCP infrastructure fixes.
7 *
8 * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25 /*
26 * This implementation should follow RFC 4341
27 */
28 #include "../feat.h"
29 #include "../ccid.h"
30 #include "../dccp.h"
31 #include "ccid2.h"
32
33
34 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
35 static int ccid2_debug;
36 #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
37 #else
38 #define ccid2_pr_debug(format, a...)
39 #endif
40
41 static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx)
42 {
43 struct ccid2_seq *seqp;
44 int i;
45
46 /* check if we have space to preserve the pointer to the buffer */
47 if (hctx->seqbufc >= sizeof(hctx->seqbuf) / sizeof(struct ccid2_seq *))
48 return -ENOMEM;
49
50 /* allocate buffer and initialize linked list */
51 seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any());
52 if (seqp == NULL)
53 return -ENOMEM;
54
55 for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
56 seqp[i].ccid2s_next = &seqp[i + 1];
57 seqp[i + 1].ccid2s_prev = &seqp[i];
58 }
59 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
60 seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
61
62 /* This is the first allocation. Initiate the head and tail. */
63 if (hctx->seqbufc == 0)
64 hctx->seqh = hctx->seqt = seqp;
65 else {
66 /* link the existing list with the one we just created */
67 hctx->seqh->ccid2s_next = seqp;
68 seqp->ccid2s_prev = hctx->seqh;
69
70 hctx->seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
71 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->seqt;
72 }
73
74 /* store the original pointer to the buffer so we can free it */
75 hctx->seqbuf[hctx->seqbufc] = seqp;
76 hctx->seqbufc++;
77
78 return 0;
79 }
80
81 static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
82 {
83 if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
84 return CCID_PACKET_WILL_DEQUEUE_LATER;
85 return CCID_PACKET_SEND_AT_ONCE;
86 }
87
88 static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
89 {
90 struct dccp_sock *dp = dccp_sk(sk);
91 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->cwnd, 2);
92
93 /*
94 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
95 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
96 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
97 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
98 */
99 if (val == 0 || val > max_ratio) {
100 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
101 val = max_ratio;
102 }
103 if (val > DCCPF_ACK_RATIO_MAX)
104 val = DCCPF_ACK_RATIO_MAX;
105
106 if (val == dp->dccps_l_ack_ratio)
107 return;
108
109 ccid2_pr_debug("changing local ack ratio to %u\n", val);
110 dp->dccps_l_ack_ratio = val;
111 }
112
113 static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val)
114 {
115 ccid2_pr_debug("change SRTT to %ld\n", val);
116 hctx->srtt = val;
117 }
118
119 static void ccid2_start_rto_timer(struct sock *sk);
120
121 static void ccid2_hc_tx_rto_expire(unsigned long data)
122 {
123 struct sock *sk = (struct sock *)data;
124 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
125 const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
126 long s;
127
128 bh_lock_sock(sk);
129 if (sock_owned_by_user(sk)) {
130 sk_reset_timer(sk, &hctx->rtotimer, jiffies + HZ / 5);
131 goto out;
132 }
133
134 ccid2_pr_debug("RTO_EXPIRE\n");
135
136 /* back-off timer */
137 hctx->rto <<= 1;
138
139 s = hctx->rto / HZ;
140 if (s > 60)
141 hctx->rto = 60 * HZ;
142
143 /* adjust pipe, cwnd etc */
144 hctx->ssthresh = hctx->cwnd / 2;
145 if (hctx->ssthresh < 2)
146 hctx->ssthresh = 2;
147 hctx->cwnd = 1;
148 hctx->pipe = 0;
149
150 /* clear state about stuff we sent */
151 hctx->seqt = hctx->seqh;
152 hctx->packets_acked = 0;
153
154 /* clear ack ratio state. */
155 hctx->rpseq = 0;
156 hctx->rpdupack = -1;
157 ccid2_change_l_ack_ratio(sk, 1);
158
159 /* if we were blocked before, we may now send cwnd=1 packet */
160 if (sender_was_blocked)
161 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
162 ccid2_start_rto_timer(sk);
163 out:
164 bh_unlock_sock(sk);
165 sock_put(sk);
166 }
167
168 static void ccid2_start_rto_timer(struct sock *sk)
169 {
170 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
171
172 ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->rto);
173
174 BUG_ON(timer_pending(&hctx->rtotimer));
175 sk_reset_timer(sk, &hctx->rtotimer,
176 jiffies + hctx->rto);
177 }
178
179 static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
180 {
181 struct dccp_sock *dp = dccp_sk(sk);
182 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
183 struct ccid2_seq *next;
184
185 hctx->pipe++;
186
187 hctx->seqh->ccid2s_seq = dp->dccps_gss;
188 hctx->seqh->ccid2s_acked = 0;
189 hctx->seqh->ccid2s_sent = jiffies;
190
191 next = hctx->seqh->ccid2s_next;
192 /* check if we need to alloc more space */
193 if (next == hctx->seqt) {
194 if (ccid2_hc_tx_alloc_seq(hctx)) {
195 DCCP_CRIT("packet history - out of memory!");
196 /* FIXME: find a more graceful way to bail out */
197 return;
198 }
199 next = hctx->seqh->ccid2s_next;
200 BUG_ON(next == hctx->seqt);
201 }
202 hctx->seqh = next;
203
204 ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->cwnd, hctx->pipe);
205
206 /*
207 * FIXME: The code below is broken and the variables have been removed
208 * from the socket struct. The `ackloss' variable was always set to 0,
209 * and with arsent there are several problems:
210 * (i) it doesn't just count the number of Acks, but all sent packets;
211 * (ii) it is expressed in # of packets, not # of windows, so the
212 * comparison below uses the wrong formula: Appendix A of RFC 4341
213 * comes up with the number K = cwnd / (R^2 - R) of consecutive windows
214 * of data with no lost or marked Ack packets. If arsent were the # of
215 * consecutive Acks received without loss, then Ack Ratio needs to be
216 * decreased by 1 when
217 * arsent >= K * cwnd / R = cwnd^2 / (R^3 - R^2)
218 * where cwnd / R is the number of Acks received per window of data
219 * (cf. RFC 4341, App. A). The problems are that
220 * - arsent counts other packets as well;
221 * - the comparison uses a formula different from RFC 4341;
222 * - computing a cubic/quadratic equation each time is too complicated.
223 * Hence a different algorithm is needed.
224 */
225 #if 0
226 /* Ack Ratio. Need to maintain a concept of how many windows we sent */
227 hctx->arsent++;
228 /* We had an ack loss in this window... */
229 if (hctx->ackloss) {
230 if (hctx->arsent >= hctx->cwnd) {
231 hctx->arsent = 0;
232 hctx->ackloss = 0;
233 }
234 } else {
235 /* No acks lost up to now... */
236 /* decrease ack ratio if enough packets were sent */
237 if (dp->dccps_l_ack_ratio > 1) {
238 /* XXX don't calculate denominator each time */
239 int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
240 dp->dccps_l_ack_ratio;
241
242 denom = hctx->cwnd * hctx->cwnd / denom;
243
244 if (hctx->arsent >= denom) {
245 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
246 hctx->arsent = 0;
247 }
248 } else {
249 /* we can't increase ack ratio further [1] */
250 hctx->arsent = 0; /* or maybe set it to cwnd*/
251 }
252 }
253 #endif
254
255 /* setup RTO timer */
256 if (!timer_pending(&hctx->rtotimer))
257 ccid2_start_rto_timer(sk);
258
259 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
260 do {
261 struct ccid2_seq *seqp = hctx->seqt;
262
263 while (seqp != hctx->seqh) {
264 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
265 (unsigned long long)seqp->ccid2s_seq,
266 seqp->ccid2s_acked, seqp->ccid2s_sent);
267 seqp = seqp->ccid2s_next;
268 }
269 } while (0);
270 ccid2_pr_debug("=========\n");
271 #endif
272 }
273
274 static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
275 {
276 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
277
278 sk_stop_timer(sk, &hctx->rtotimer);
279 ccid2_pr_debug("deleted RTO timer\n");
280 }
281
282 static inline void ccid2_new_ack(struct sock *sk,
283 struct ccid2_seq *seqp,
284 unsigned int *maxincr)
285 {
286 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
287
288 if (hctx->cwnd < hctx->ssthresh) {
289 if (*maxincr > 0 && ++hctx->packets_acked == 2) {
290 hctx->cwnd += 1;
291 *maxincr -= 1;
292 hctx->packets_acked = 0;
293 }
294 } else if (++hctx->packets_acked >= hctx->cwnd) {
295 hctx->cwnd += 1;
296 hctx->packets_acked = 0;
297 }
298
299 /* update RTO */
300 if (hctx->srtt == -1 ||
301 time_after(jiffies, hctx->lastrtt + hctx->srtt)) {
302 unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
303 int s;
304
305 /* first measurement */
306 if (hctx->srtt == -1) {
307 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
308 r, jiffies,
309 (unsigned long long)seqp->ccid2s_seq);
310 ccid2_change_srtt(hctx, r);
311 hctx->rttvar = r >> 1;
312 } else {
313 /* RTTVAR */
314 long tmp = hctx->srtt - r;
315 long srtt;
316
317 if (tmp < 0)
318 tmp *= -1;
319
320 tmp >>= 2;
321 hctx->rttvar *= 3;
322 hctx->rttvar >>= 2;
323 hctx->rttvar += tmp;
324
325 /* SRTT */
326 srtt = hctx->srtt;
327 srtt *= 7;
328 srtt >>= 3;
329 tmp = r >> 3;
330 srtt += tmp;
331 ccid2_change_srtt(hctx, srtt);
332 }
333 s = hctx->rttvar << 2;
334 /* clock granularity is 1 when based on jiffies */
335 if (!s)
336 s = 1;
337 hctx->rto = hctx->srtt + s;
338
339 /* must be at least a second */
340 s = hctx->rto / HZ;
341 /* DCCP doesn't require this [but I like it cuz my code sux] */
342 #if 1
343 if (s < 1)
344 hctx->rto = HZ;
345 #endif
346 /* max 60 seconds */
347 if (s > 60)
348 hctx->rto = HZ * 60;
349
350 hctx->lastrtt = jiffies;
351
352 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
353 hctx->srtt, hctx->rttvar,
354 hctx->rto, HZ, r);
355 }
356 }
357
358 static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
359 {
360 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
361
362 if (time_before(seqp->ccid2s_sent, hctx->last_cong)) {
363 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
364 return;
365 }
366
367 hctx->last_cong = jiffies;
368
369 hctx->cwnd = hctx->cwnd / 2 ? : 1U;
370 hctx->ssthresh = max(hctx->cwnd, 2U);
371
372 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
373 if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->cwnd)
374 ccid2_change_l_ack_ratio(sk, hctx->cwnd);
375 }
376
377 static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
378 u8 option, u8 *optval, u8 optlen)
379 {
380 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
381
382 switch (option) {
383 case DCCPO_ACK_VECTOR_0:
384 case DCCPO_ACK_VECTOR_1:
385 return dccp_ackvec_parsed_add(&hctx->av_chunks, optval, optlen,
386 option - DCCPO_ACK_VECTOR_0);
387 }
388 return 0;
389 }
390
391 static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
392 {
393 struct dccp_sock *dp = dccp_sk(sk);
394 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
395 const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
396 struct dccp_ackvec_parsed *avp;
397 u64 ackno, seqno;
398 struct ccid2_seq *seqp;
399 int done = 0;
400 unsigned int maxincr = 0;
401
402 /* check reverse path congestion */
403 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
404
405 /* XXX this whole "algorithm" is broken. Need to fix it to keep track
406 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
407 * -sorbo.
408 */
409 /* need to bootstrap */
410 if (hctx->rpdupack == -1) {
411 hctx->rpdupack = 0;
412 hctx->rpseq = seqno;
413 } else {
414 /* check if packet is consecutive */
415 if (dccp_delta_seqno(hctx->rpseq, seqno) == 1)
416 hctx->rpseq = seqno;
417 /* it's a later packet */
418 else if (after48(seqno, hctx->rpseq)) {
419 hctx->rpdupack++;
420
421 /* check if we got enough dupacks */
422 if (hctx->rpdupack >= NUMDUPACK) {
423 hctx->rpdupack = -1; /* XXX lame */
424 hctx->rpseq = 0;
425
426 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
427 }
428 }
429 }
430
431 /* check forward path congestion */
432 if (dccp_packet_without_ack(skb))
433 return;
434
435 /* still didn't send out new data packets */
436 if (hctx->seqh == hctx->seqt)
437 goto done;
438
439 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
440 if (after48(ackno, hctx->high_ack))
441 hctx->high_ack = ackno;
442
443 seqp = hctx->seqt;
444 while (before48(seqp->ccid2s_seq, ackno)) {
445 seqp = seqp->ccid2s_next;
446 if (seqp == hctx->seqh) {
447 seqp = hctx->seqh->ccid2s_prev;
448 break;
449 }
450 }
451
452 /*
453 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
454 * packets per acknowledgement. Rounding up avoids that cwnd is not
455 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
456 */
457 if (hctx->cwnd < hctx->ssthresh)
458 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
459
460 /* go through all ack vectors */
461 list_for_each_entry(avp, &hctx->av_chunks, node) {
462 /* go through this ack vector */
463 for (; avp->len--; avp->vec++) {
464 u64 ackno_end_rl = SUB48(ackno,
465 dccp_ackvec_runlen(avp->vec));
466
467 ccid2_pr_debug("ackvec %llu |%u,%u|\n",
468 (unsigned long long)ackno,
469 dccp_ackvec_state(avp->vec) >> 6,
470 dccp_ackvec_runlen(avp->vec));
471 /* if the seqno we are analyzing is larger than the
472 * current ackno, then move towards the tail of our
473 * seqnos.
474 */
475 while (after48(seqp->ccid2s_seq, ackno)) {
476 if (seqp == hctx->seqt) {
477 done = 1;
478 break;
479 }
480 seqp = seqp->ccid2s_prev;
481 }
482 if (done)
483 break;
484
485 /* check all seqnos in the range of the vector
486 * run length
487 */
488 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
489 const u8 state = dccp_ackvec_state(avp->vec);
490
491 /* new packet received or marked */
492 if (state != DCCPAV_NOT_RECEIVED &&
493 !seqp->ccid2s_acked) {
494 if (state == DCCPAV_ECN_MARKED)
495 ccid2_congestion_event(sk,
496 seqp);
497 else
498 ccid2_new_ack(sk, seqp,
499 &maxincr);
500
501 seqp->ccid2s_acked = 1;
502 ccid2_pr_debug("Got ack for %llu\n",
503 (unsigned long long)seqp->ccid2s_seq);
504 hctx->pipe--;
505 }
506 if (seqp == hctx->seqt) {
507 done = 1;
508 break;
509 }
510 seqp = seqp->ccid2s_prev;
511 }
512 if (done)
513 break;
514
515 ackno = SUB48(ackno_end_rl, 1);
516 }
517 if (done)
518 break;
519 }
520
521 /* The state about what is acked should be correct now
522 * Check for NUMDUPACK
523 */
524 seqp = hctx->seqt;
525 while (before48(seqp->ccid2s_seq, hctx->high_ack)) {
526 seqp = seqp->ccid2s_next;
527 if (seqp == hctx->seqh) {
528 seqp = hctx->seqh->ccid2s_prev;
529 break;
530 }
531 }
532 done = 0;
533 while (1) {
534 if (seqp->ccid2s_acked) {
535 done++;
536 if (done == NUMDUPACK)
537 break;
538 }
539 if (seqp == hctx->seqt)
540 break;
541 seqp = seqp->ccid2s_prev;
542 }
543
544 /* If there are at least 3 acknowledgements, anything unacknowledged
545 * below the last sequence number is considered lost
546 */
547 if (done == NUMDUPACK) {
548 struct ccid2_seq *last_acked = seqp;
549
550 /* check for lost packets */
551 while (1) {
552 if (!seqp->ccid2s_acked) {
553 ccid2_pr_debug("Packet lost: %llu\n",
554 (unsigned long long)seqp->ccid2s_seq);
555 /* XXX need to traverse from tail -> head in
556 * order to detect multiple congestion events in
557 * one ack vector.
558 */
559 ccid2_congestion_event(sk, seqp);
560 hctx->pipe--;
561 }
562 if (seqp == hctx->seqt)
563 break;
564 seqp = seqp->ccid2s_prev;
565 }
566
567 hctx->seqt = last_acked;
568 }
569
570 /* trim acked packets in tail */
571 while (hctx->seqt != hctx->seqh) {
572 if (!hctx->seqt->ccid2s_acked)
573 break;
574
575 hctx->seqt = hctx->seqt->ccid2s_next;
576 }
577
578 /* restart RTO timer if not all outstanding data has been acked */
579 if (hctx->pipe == 0)
580 sk_stop_timer(sk, &hctx->rtotimer);
581 else
582 sk_reset_timer(sk, &hctx->rtotimer,
583 jiffies + hctx->rto);
584 done:
585 /* check if incoming Acks allow pending packets to be sent */
586 if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx))
587 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
588 dccp_ackvec_parsed_cleanup(&hctx->av_chunks);
589 }
590
591 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
592 {
593 struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
594 struct dccp_sock *dp = dccp_sk(sk);
595 u32 max_ratio;
596
597 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
598 hctx->ssthresh = ~0U;
599
600 /*
601 * RFC 4341, 5: "The cwnd parameter is initialized to at most four
602 * packets for new connections, following the rules from [RFC3390]".
603 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
604 */
605 hctx->cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
606
607 /* Make sure that Ack Ratio is enabled and within bounds. */
608 max_ratio = DIV_ROUND_UP(hctx->cwnd, 2);
609 if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
610 dp->dccps_l_ack_ratio = max_ratio;
611
612 /* XXX init ~ to window size... */
613 if (ccid2_hc_tx_alloc_seq(hctx))
614 return -ENOMEM;
615
616 hctx->rto = 3 * HZ;
617 ccid2_change_srtt(hctx, -1);
618 hctx->rttvar = -1;
619 hctx->rpdupack = -1;
620 hctx->last_cong = jiffies;
621 setup_timer(&hctx->rtotimer, ccid2_hc_tx_rto_expire, (unsigned long)sk);
622 INIT_LIST_HEAD(&hctx->av_chunks);
623 return 0;
624 }
625
626 static void ccid2_hc_tx_exit(struct sock *sk)
627 {
628 struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
629 int i;
630
631 ccid2_hc_tx_kill_rto_timer(sk);
632
633 for (i = 0; i < hctx->seqbufc; i++)
634 kfree(hctx->seqbuf[i]);
635 hctx->seqbufc = 0;
636 }
637
638 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
639 {
640 const struct dccp_sock *dp = dccp_sk(sk);
641 struct ccid2_hc_rx_sock *hcrx = ccid2_hc_rx_sk(sk);
642
643 switch (DCCP_SKB_CB(skb)->dccpd_type) {
644 case DCCP_PKT_DATA:
645 case DCCP_PKT_DATAACK:
646 hcrx->data++;
647 if (hcrx->data >= dp->dccps_r_ack_ratio) {
648 dccp_send_ack(sk);
649 hcrx->data = 0;
650 }
651 break;
652 }
653 }
654
655 static struct ccid_operations ccid2 = {
656 .ccid_id = DCCPC_CCID2,
657 .ccid_name = "TCP-like",
658 .ccid_owner = THIS_MODULE,
659 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
660 .ccid_hc_tx_init = ccid2_hc_tx_init,
661 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
662 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
663 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
664 .ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
665 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
666 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
667 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
668 };
669
670 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
671 module_param(ccid2_debug, bool, 0644);
672 MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
673 #endif
674
675 static __init int ccid2_module_init(void)
676 {
677 return ccid_register(&ccid2);
678 }
679 module_init(ccid2_module_init);
680
681 static __exit void ccid2_module_exit(void)
682 {
683 ccid_unregister(&ccid2);
684 }
685 module_exit(ccid2_module_exit);
686
687 MODULE_AUTHOR("Andrea Bittau <a.bittau@cs.ucl.ac.uk>");
688 MODULE_DESCRIPTION("DCCP TCP-Like (CCID2) CCID");
689 MODULE_LICENSE("GPL");
690 MODULE_ALIAS("net-dccp-ccid-2");
This page took 0.045511 seconds and 5 git commands to generate.