[DCCP]: Only deliver to the CCID rx side in charge
[deliverable/linux.git] / net / dccp / output.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/output.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
7c657876 13#include <linux/dccp.h>
48918a4d 14#include <linux/kernel.h>
7c657876
ACM
15#include <linux/skbuff.h>
16
14c85021 17#include <net/inet_sock.h>
7c657876
ACM
18#include <net/sock.h>
19
ae31c339 20#include "ackvec.h"
7c657876
ACM
21#include "ccid.h"
22#include "dccp.h"
23
24static inline void dccp_event_ack_sent(struct sock *sk)
25{
26 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
27}
28
c25a18ba 29static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
48918a4d
HX
30{
31 skb_set_owner_w(skb, sk);
32 WARN_ON(sk->sk_send_head);
33 sk->sk_send_head = skb;
34}
35
7c657876
ACM
36/*
37 * All SKB's seen here are completely headerless. It is our
38 * job to build the DCCP header, and pass the packet down to
39 * IP so it can do the same plus pass the packet off to the
40 * device.
41 */
48918a4d 42static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
7c657876
ACM
43{
44 if (likely(skb != NULL)) {
45 const struct inet_sock *inet = inet_sk(sk);
57cca05a 46 const struct inet_connection_sock *icsk = inet_csk(sk);
7c657876
ACM
47 struct dccp_sock *dp = dccp_sk(sk);
48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
49 struct dccp_hdr *dh;
50 /* XXX For now we're using only 48 bits sequence numbers */
118b2c95 51 const u32 dccp_header_size = sizeof(*dh) +
7c657876 52 sizeof(struct dccp_hdr_ext) +
7690af3f 53 dccp_packet_hdr_len(dcb->dccpd_type);
7c657876
ACM
54 int err, set_ack = 1;
55 u64 ackno = dp->dccps_gsr;
56
7c657876
ACM
57 dccp_inc_seqno(&dp->dccps_gss);
58
7c657876
ACM
59 switch (dcb->dccpd_type) {
60 case DCCP_PKT_DATA:
61 set_ack = 0;
edc9e819
HX
62 /* fall through */
63 case DCCP_PKT_DATAACK:
7c657876 64 break;
edc9e819 65
afe00251
AB
66 case DCCP_PKT_REQUEST:
67 set_ack = 0;
68 /* fall through */
69
7c657876
ACM
70 case DCCP_PKT_SYNC:
71 case DCCP_PKT_SYNCACK:
72 ackno = dcb->dccpd_seq;
edc9e819
HX
73 /* fall through */
74 default:
75 /*
76 * Only data packets should come through with skb->sk
77 * set.
78 */
79 WARN_ON(skb->sk);
80 skb_set_owner_w(skb, sk);
7c657876
ACM
81 break;
82 }
24117727
ACM
83
84 dcb->dccpd_seq = dp->dccps_gss;
2d0817d1
ACM
85
86 if (dccp_insert_options(sk, skb)) {
87 kfree_skb(skb);
88 return -EPROTO;
89 }
7c657876 90
fda0fd6c 91
7c657876 92 /* Build DCCP header and checksum it. */
9b42078e 93 dh = dccp_zeroed_hdr(skb, dccp_header_size);
7c657876
ACM
94 dh->dccph_type = dcb->dccpd_type;
95 dh->dccph_sport = inet->sport;
96 dh->dccph_dport = inet->dport;
97 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
98 dh->dccph_ccval = dcb->dccpd_ccval;
6f4e5fff 99 dh->dccph_cscov = dp->dccps_pcslen;
7c657876
ACM
100 /* XXX For now we're using only 48 bits sequence numbers */
101 dh->dccph_x = 1;
102
103 dp->dccps_awh = dp->dccps_gss;
104 dccp_hdr_set_seq(dh, dp->dccps_gss);
105 if (set_ack)
106 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
107
108 switch (dcb->dccpd_type) {
109 case DCCP_PKT_REQUEST:
7690af3f 110 dccp_hdr_request(skb)->dccph_req_service =
67e6b629 111 dp->dccps_service;
7c657876
ACM
112 break;
113 case DCCP_PKT_RESET:
7690af3f
ACM
114 dccp_hdr_reset(skb)->dccph_reset_code =
115 dcb->dccpd_reset_code;
7c657876
ACM
116 break;
117 }
118
6f4e5fff 119 icsk->icsk_af_ops->send_check(sk, 0, skb);
7c657876 120
7ad07e7c 121 if (set_ack)
7c657876
ACM
122 dccp_event_ack_sent(sk);
123
124 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
125
49c5bfaf 126 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
93173112 127 err = icsk->icsk_af_ops->queue_xmit(skb, sk, 0);
b9df3cb8 128 return net_xmit_eval(err);
7c657876
ACM
129 }
130 return -ENOBUFS;
131}
132
133unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
134{
d83d8461 135 struct inet_connection_sock *icsk = inet_csk(sk);
7c657876 136 struct dccp_sock *dp = dccp_sk(sk);
d83d8461 137 int mss_now = (pmtu - icsk->icsk_af_ops->net_header_len -
57cca05a 138 sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext));
7c657876
ACM
139
140 /* Now subtract optional transport overhead */
d83d8461 141 mss_now -= icsk->icsk_ext_hdr_len;
7c657876
ACM
142
143 /*
144 * FIXME: this should come from the CCID infrastructure, where, say,
145 * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets
146 * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED
147 * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to
148 * make it a multiple of 4
149 */
150
151 mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4;
152
153 /* And store cached results */
d83d8461 154 icsk->icsk_pmtu_cookie = pmtu;
7c657876
ACM
155 dp->dccps_mss_cache = mss_now;
156
157 return mss_now;
158}
159
f21e68ca
ACM
160EXPORT_SYMBOL_GPL(dccp_sync_mss);
161
c530cfb1
ACM
162void dccp_write_space(struct sock *sk)
163{
164 read_lock(&sk->sk_callback_lock);
165
166 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
167 wake_up_interruptible(sk->sk_sleep);
168 /* Should agree with poll, otherwise some programs break */
169 if (sock_writeable(sk))
170 sk_wake_async(sk, 2, POLL_OUT);
171
172 read_unlock(&sk->sk_callback_lock);
173}
174
d6809c12
ACM
175/**
176 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
177 * @sk: socket to wait for
d6809c12 178 */
5cc3741d 179static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb)
d6809c12
ACM
180{
181 struct dccp_sock *dp = dccp_sk(sk);
182 DEFINE_WAIT(wait);
5cc3741d 183 unsigned long delay;
d6809c12
ACM
184 int rc;
185
186 while (1) {
187 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
188
97e5848d 189 if (sk->sk_err)
d6809c12 190 goto do_error;
d6809c12
ACM
191 if (signal_pending(current))
192 goto do_interrupted;
193
6b57c93d 194 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
d6809c12
ACM
195 if (rc <= 0)
196 break;
197 delay = msecs_to_jiffies(rc);
d6809c12
ACM
198 sk->sk_write_pending++;
199 release_sock(sk);
5cc3741d 200 schedule_timeout(delay);
d6809c12
ACM
201 lock_sock(sk);
202 sk->sk_write_pending--;
203 }
204out:
205 finish_wait(sk->sk_sleep, &wait);
206 return rc;
207
208do_error:
209 rc = -EPIPE;
210 goto out;
d6809c12 211do_interrupted:
5cc3741d 212 rc = -EINTR;
d6809c12
ACM
213 goto out;
214}
215
97e5848d
IM
216static void dccp_write_xmit_timer(unsigned long data) {
217 struct sock *sk = (struct sock *)data;
218 struct dccp_sock *dp = dccp_sk(sk);
219
220 bh_lock_sock(sk);
221 if (sock_owned_by_user(sk))
222 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
223 else
224 dccp_write_xmit(sk, 0);
225 bh_unlock_sock(sk);
226 sock_put(sk);
227}
228
229void dccp_write_xmit(struct sock *sk, int block)
27258ee5 230{
97e5848d
IM
231 struct dccp_sock *dp = dccp_sk(sk);
232 struct sk_buff *skb;
97e5848d
IM
233
234 while ((skb = skb_peek(&sk->sk_write_queue))) {
6b57c93d 235 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
d6809c12 236
97e5848d
IM
237 if (err > 0) {
238 if (!block) {
239 sk_reset_timer(sk, &dp->dccps_xmit_timer,
240 msecs_to_jiffies(err)+jiffies);
241 break;
5cc3741d
IM
242 } else
243 err = dccp_wait_for_ccid(sk, skb);
59348b19
GR
244 if (err)
245 DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
97e5848d 246 }
27258ee5 247
97e5848d
IM
248 skb_dequeue(&sk->sk_write_queue);
249 if (err == 0) {
250 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
251 const int len = skb->len;
27258ee5 252
97e5848d
IM
253 if (sk->sk_state == DCCP_PARTOPEN) {
254 /* See 8.1.5. Handshake Completion */
255 inet_csk_schedule_ack(sk);
256 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
27258ee5
ACM
257 inet_csk(sk)->icsk_rto,
258 DCCP_RTO_MAX);
97e5848d
IM
259 dcb->dccpd_type = DCCP_PKT_DATAACK;
260 } else if (dccp_ack_pending(sk))
261 dcb->dccpd_type = DCCP_PKT_DATAACK;
262 else
263 dcb->dccpd_type = DCCP_PKT_DATA;
264
265 err = dccp_transmit_skb(sk, skb);
266 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
59348b19
GR
267 if (err)
268 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
269 err);
97e5848d
IM
270 } else
271 kfree(skb);
272 }
27258ee5
ACM
273}
274
7c657876
ACM
275int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
276{
57cca05a 277 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
7c657876
ACM
278 return -EHOSTUNREACH; /* Routing failure or similar. */
279
280 return dccp_transmit_skb(sk, (skb_cloned(skb) ?
281 pskb_copy(skb, GFP_ATOMIC):
282 skb_clone(skb, GFP_ATOMIC)));
283}
284
285struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
286 struct request_sock *req)
287{
288 struct dccp_hdr *dh;
67e6b629 289 struct dccp_request_sock *dreq;
118b2c95 290 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
7c657876
ACM
291 sizeof(struct dccp_hdr_ext) +
292 sizeof(struct dccp_hdr_response);
118b2c95 293 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
7c657876
ACM
294 GFP_ATOMIC);
295 if (skb == NULL)
296 return NULL;
297
298 /* Reserve space for headers. */
118b2c95 299 skb_reserve(skb, sk->sk_prot->max_header);
7c657876
ACM
300
301 skb->dst = dst_clone(dst);
7c657876 302
67e6b629 303 dreq = dccp_rsk(req);
e11d9d30
GR
304 if (inet_rsk(req)->acked) /* increase ISS upon retransmission */
305 dccp_inc_seqno(&dreq->dreq_iss);
7c657876 306 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
67e6b629 307 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
2d0817d1
ACM
308
309 if (dccp_insert_options(sk, skb)) {
310 kfree_skb(skb);
311 return NULL;
312 }
7c657876 313
09dbc389 314 /* Build and checksum header */
9b42078e 315 dh = dccp_zeroed_hdr(skb, dccp_header_size);
7c657876
ACM
316
317 dh->dccph_sport = inet_sk(sk)->sport;
318 dh->dccph_dport = inet_rsk(req)->rmt_port;
7690af3f
ACM
319 dh->dccph_doff = (dccp_header_size +
320 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
7c657876
ACM
321 dh->dccph_type = DCCP_PKT_RESPONSE;
322 dh->dccph_x = 1;
67e6b629
ACM
323 dccp_hdr_set_seq(dh, dreq->dreq_iss);
324 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
325 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
7c657876 326
6f4e5fff
GR
327 dccp_csum_outgoing(skb);
328
e11d9d30
GR
329 /* We use `acked' to remember that a Response was already sent. */
330 inet_rsk(req)->acked = 1;
7c657876
ACM
331 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
332 return skb;
333}
334
f21e68ca
ACM
335EXPORT_SYMBOL_GPL(dccp_make_response);
336
017487d7
ACM
337static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst,
338 const enum dccp_reset_codes code)
7c657876
ACM
339
340{
341 struct dccp_hdr *dh;
342 struct dccp_sock *dp = dccp_sk(sk);
118b2c95 343 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
7c657876
ACM
344 sizeof(struct dccp_hdr_ext) +
345 sizeof(struct dccp_hdr_reset);
118b2c95 346 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
7c657876
ACM
347 GFP_ATOMIC);
348 if (skb == NULL)
349 return NULL;
350
351 /* Reserve space for headers. */
118b2c95 352 skb_reserve(skb, sk->sk_prot->max_header);
7c657876
ACM
353
354 skb->dst = dst_clone(dst);
7c657876
ACM
355
356 dccp_inc_seqno(&dp->dccps_gss);
357
358 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
359 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
360 DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss;
2d0817d1
ACM
361
362 if (dccp_insert_options(sk, skb)) {
363 kfree_skb(skb);
364 return NULL;
365 }
7c657876 366
9b42078e 367 dh = dccp_zeroed_hdr(skb, dccp_header_size);
7c657876
ACM
368
369 dh->dccph_sport = inet_sk(sk)->sport;
370 dh->dccph_dport = inet_sk(sk)->dport;
7690af3f
ACM
371 dh->dccph_doff = (dccp_header_size +
372 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
7c657876
ACM
373 dh->dccph_type = DCCP_PKT_RESET;
374 dh->dccph_x = 1;
375 dccp_hdr_set_seq(dh, dp->dccps_gss);
376 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr);
377
378 dccp_hdr_reset(skb)->dccph_reset_code = code;
6f4e5fff 379 inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb);
7c657876
ACM
380
381 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
382 return skb;
383}
384
017487d7
ACM
385int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
386{
387 /*
388 * FIXME: what if rebuild_header fails?
389 * Should we be doing a rebuild_header here?
390 */
391 int err = inet_sk_rebuild_header(sk);
392
393 if (err == 0) {
394 struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache,
395 code);
396 if (skb != NULL) {
397 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
93173112 398 err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, sk, 0);
b9df3cb8 399 return net_xmit_eval(err);
017487d7
ACM
400 }
401 }
402
403 return err;
404}
405
7c657876
ACM
406/*
407 * Do all connect socket setups that can be done AF independent.
408 */
409static inline void dccp_connect_init(struct sock *sk)
410{
f21e68ca 411 struct dccp_sock *dp = dccp_sk(sk);
7c657876
ACM
412 struct dst_entry *dst = __sk_dst_get(sk);
413 struct inet_connection_sock *icsk = inet_csk(sk);
414
415 sk->sk_err = 0;
416 sock_reset_flag(sk, SOCK_DONE);
417
418 dccp_sync_mss(sk, dst_mtu(dst));
419
f21e68ca
ACM
420 /*
421 * SWL and AWL are initially adjusted so that they are not less than
422 * the initial Sequence Numbers received and sent, respectively:
423 * SWL := max(GSR + 1 - floor(W/4), ISR),
424 * AWL := max(GSS - W' + 1, ISS).
425 * These adjustments MUST be applied only at the beginning of the
426 * connection.
427 */
d7f7365f 428 dccp_update_gss(sk, dp->dccps_iss);
f21e68ca 429 dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss));
7c657876 430
d7f7365f
GR
431 /* S.GAR - greatest valid acknowledgement number received on a non-Sync;
432 * initialized to S.ISS (sec. 8.5) */
433 dp->dccps_gar = dp->dccps_iss;
434
7c657876 435 icsk->icsk_retransmits = 0;
97e5848d
IM
436 init_timer(&dp->dccps_xmit_timer);
437 dp->dccps_xmit_timer.data = (unsigned long)sk;
438 dp->dccps_xmit_timer.function = dccp_write_xmit_timer;
7c657876
ACM
439}
440
441int dccp_connect(struct sock *sk)
442{
443 struct sk_buff *skb;
444 struct inet_connection_sock *icsk = inet_csk(sk);
445
446 dccp_connect_init(sk);
447
118b2c95 448 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
7c657876
ACM
449 if (unlikely(skb == NULL))
450 return -ENOBUFS;
451
452 /* Reserve space for headers. */
118b2c95 453 skb_reserve(skb, sk->sk_prot->max_header);
7c657876
ACM
454
455 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
7c657876 456
48918a4d 457 dccp_skb_entail(sk, skb);
7c657876
ACM
458 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
459 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
460
461 /* Timer for repeating the REQUEST until an answer. */
27258ee5
ACM
462 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
463 icsk->icsk_rto, DCCP_RTO_MAX);
7c657876
ACM
464 return 0;
465}
466
f21e68ca
ACM
467EXPORT_SYMBOL_GPL(dccp_connect);
468
7c657876
ACM
469void dccp_send_ack(struct sock *sk)
470{
471 /* If we have been reset, we may not send again. */
472 if (sk->sk_state != DCCP_CLOSED) {
118b2c95
ACM
473 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
474 GFP_ATOMIC);
7c657876
ACM
475
476 if (skb == NULL) {
477 inet_csk_schedule_ack(sk);
478 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
7690af3f
ACM
479 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
480 TCP_DELACK_MAX,
481 DCCP_RTO_MAX);
7c657876
ACM
482 return;
483 }
484
485 /* Reserve space for headers */
118b2c95 486 skb_reserve(skb, sk->sk_prot->max_header);
7c657876 487 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
7c657876
ACM
488 dccp_transmit_skb(sk, skb);
489 }
490}
491
492EXPORT_SYMBOL_GPL(dccp_send_ack);
493
494void dccp_send_delayed_ack(struct sock *sk)
495{
496 struct inet_connection_sock *icsk = inet_csk(sk);
497 /*
498 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
499 * with using 2s, and active senders also piggyback the ACK into a
500 * DATAACK packet, so this is really for quiescent senders.
501 */
502 unsigned long timeout = jiffies + 2 * HZ;
503
504 /* Use new timeout only if there wasn't a older one earlier. */
505 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
506 /* If delack timer was blocked or is about to expire,
507 * send ACK now.
508 *
509 * FIXME: check the "about to expire" part
510 */
511 if (icsk->icsk_ack.blocked) {
512 dccp_send_ack(sk);
513 return;
514 }
515
516 if (!time_before(timeout, icsk->icsk_ack.timeout))
517 timeout = icsk->icsk_ack.timeout;
518 }
519 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
520 icsk->icsk_ack.timeout = timeout;
521 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
522}
523
e92ae93a
ACM
524void dccp_send_sync(struct sock *sk, const u64 seq,
525 const enum dccp_pkt_type pkt_type)
7c657876
ACM
526{
527 /*
528 * We are not putting this on the write queue, so
529 * dccp_transmit_skb() will set the ownership to this
530 * sock.
531 */
118b2c95 532 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
7c657876
ACM
533
534 if (skb == NULL)
535 /* FIXME: how to make sure the sync is sent? */
536 return;
537
538 /* Reserve space for headers and prepare control bits. */
118b2c95 539 skb_reserve(skb, sk->sk_prot->max_header);
e92ae93a 540 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
7c657876
ACM
541 DCCP_SKB_CB(skb)->dccpd_seq = seq;
542
7c657876
ACM
543 dccp_transmit_skb(sk, skb);
544}
545
b61fafc4
ACM
546EXPORT_SYMBOL_GPL(dccp_send_sync);
547
7690af3f
ACM
548/*
549 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
550 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
551 * any circumstances.
7c657876 552 */
7ad07e7c 553void dccp_send_close(struct sock *sk, const int active)
7c657876
ACM
554{
555 struct dccp_sock *dp = dccp_sk(sk);
556 struct sk_buff *skb;
7d877f3b 557 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
7c657876 558
7ad07e7c
ACM
559 skb = alloc_skb(sk->sk_prot->max_header, prio);
560 if (skb == NULL)
561 return;
7c657876
ACM
562
563 /* Reserve space for headers and prepare control bits. */
564 skb_reserve(skb, sk->sk_prot->max_header);
7690af3f
ACM
565 DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
566 DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
7c657876 567
7ad07e7c 568 if (active) {
97e5848d 569 dccp_write_xmit(sk, 1);
48918a4d 570 dccp_skb_entail(sk, skb);
7ad07e7c 571 dccp_transmit_skb(sk, skb_clone(skb, prio));
97e5848d 572 /* FIXME do we need a retransmit timer here? */
7ad07e7c
ACM
573 } else
574 dccp_transmit_skb(sk, skb);
7c657876 575}
This page took 0.203445 seconds and 5 git commands to generate.