net: introduce helper macro for_each_cmsghdr
[deliverable/linux.git] / net / dccp / proto.c
CommitLineData
7c657876
ACM
1/*
2 * net/dccp/proto.c
3 *
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
7c657876
ACM
12#include <linux/dccp.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/in.h>
20#include <linux/if_arp.h>
21#include <linux/init.h>
22#include <linux/random.h>
5a0e3ad6 23#include <linux/slab.h>
7c657876
ACM
24#include <net/checksum.h>
25
14c85021 26#include <net/inet_sock.h>
7c657876
ACM
27#include <net/sock.h>
28#include <net/xfrm.h>
29
6273172e 30#include <asm/ioctls.h>
7c657876
ACM
31#include <linux/spinlock.h>
32#include <linux/timer.h>
33#include <linux/delay.h>
34#include <linux/poll.h>
7c657876
ACM
35
36#include "ccid.h"
37#include "dccp.h"
afe00251 38#include "feat.h"
7c657876 39
ba89966c 40DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
7c657876 41
f21e68ca
ACM
42EXPORT_SYMBOL_GPL(dccp_statistics);
43
dd24c001 44struct percpu_counter dccp_orphan_count;
f21e68ca
ACM
45EXPORT_SYMBOL_GPL(dccp_orphan_count);
46
5caea4ea 47struct inet_hashinfo dccp_hashinfo;
075ae866
ACM
48EXPORT_SYMBOL_GPL(dccp_hashinfo);
49
b1308dc0
IM
50/* the maximum queue length for tx in packets. 0 is no limit */
51int sysctl_dccp_tx_qlen __read_mostly = 5;
52
1f4f0f64 53#ifdef CONFIG_IP_DCCP_DEBUG
54static const char *dccp_state_name(const int state)
55{
56 static const char *const dccp_state_names[] = {
57 [DCCP_OPEN] = "OPEN",
58 [DCCP_REQUESTING] = "REQUESTING",
59 [DCCP_PARTOPEN] = "PARTOPEN",
60 [DCCP_LISTEN] = "LISTEN",
61 [DCCP_RESPOND] = "RESPOND",
62 [DCCP_CLOSING] = "CLOSING",
63 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
64 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
65 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
66 [DCCP_TIME_WAIT] = "TIME_WAIT",
67 [DCCP_CLOSED] = "CLOSED",
68 };
69
70 if (state >= DCCP_MAX_STATES)
71 return "INVALID STATE!";
72 else
73 return dccp_state_names[state];
74}
75#endif
76
c25a18ba
ACM
77void dccp_set_state(struct sock *sk, const int state)
78{
79 const int oldstate = sk->sk_state;
80
f11135a3 81 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
c25a18ba
ACM
82 dccp_state_name(oldstate), dccp_state_name(state));
83 WARN_ON(state == oldstate);
84
85 switch (state) {
86 case DCCP_OPEN:
87 if (oldstate != DCCP_OPEN)
88 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
6eb55d17
GR
89 /* Client retransmits all Confirm options until entering OPEN */
90 if (oldstate == DCCP_PARTOPEN)
91 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
c25a18ba
ACM
92 break;
93
94 case DCCP_CLOSED:
0c869620
GR
95 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
96 oldstate == DCCP_CLOSING)
c25a18ba
ACM
97 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
98
99 sk->sk_prot->unhash(sk);
100 if (inet_csk(sk)->icsk_bind_hash != NULL &&
101 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
ab1e0a13 102 inet_put_port(sk);
c25a18ba
ACM
103 /* fall through */
104 default:
105 if (oldstate == DCCP_OPEN)
106 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
107 }
108
109 /* Change state AFTER socket is unhashed to avoid closed
110 * socket sitting in hash tables.
111 */
112 sk->sk_state = state;
113}
114
115EXPORT_SYMBOL_GPL(dccp_set_state);
116
0c869620
GR
117static void dccp_finish_passive_close(struct sock *sk)
118{
119 switch (sk->sk_state) {
120 case DCCP_PASSIVE_CLOSE:
121 /* Node (client or server) has received Close packet. */
122 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
123 dccp_set_state(sk, DCCP_CLOSED);
124 break;
125 case DCCP_PASSIVE_CLOSEREQ:
126 /*
127 * Client received CloseReq. We set the `active' flag so that
128 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
129 */
130 dccp_send_close(sk, 1);
131 dccp_set_state(sk, DCCP_CLOSING);
132 }
133}
134
c25a18ba
ACM
135void dccp_done(struct sock *sk)
136{
137 dccp_set_state(sk, DCCP_CLOSED);
138 dccp_clear_xmit_timers(sk);
139
140 sk->sk_shutdown = SHUTDOWN_MASK;
141
142 if (!sock_flag(sk, SOCK_DEAD))
143 sk->sk_state_change(sk);
144 else
145 inet_csk_destroy_sock(sk);
146}
147
148EXPORT_SYMBOL_GPL(dccp_done);
149
7c657876
ACM
150const char *dccp_packet_name(const int type)
151{
36cbd3dc 152 static const char *const dccp_packet_names[] = {
7c657876
ACM
153 [DCCP_PKT_REQUEST] = "REQUEST",
154 [DCCP_PKT_RESPONSE] = "RESPONSE",
155 [DCCP_PKT_DATA] = "DATA",
156 [DCCP_PKT_ACK] = "ACK",
157 [DCCP_PKT_DATAACK] = "DATAACK",
158 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
159 [DCCP_PKT_CLOSE] = "CLOSE",
160 [DCCP_PKT_RESET] = "RESET",
161 [DCCP_PKT_SYNC] = "SYNC",
162 [DCCP_PKT_SYNCACK] = "SYNCACK",
163 };
164
165 if (type >= DCCP_NR_PKT_TYPES)
166 return "INVALID";
167 else
168 return dccp_packet_names[type];
169}
170
171EXPORT_SYMBOL_GPL(dccp_packet_name);
172
72478873 173int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
3e0fadc5
ACM
174{
175 struct dccp_sock *dp = dccp_sk(sk);
176 struct inet_connection_sock *icsk = inet_csk(sk);
3e0fadc5 177
e18d7a98
ACM
178 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
179 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
180 sk->sk_state = DCCP_CLOSED;
181 sk->sk_write_space = dccp_write_space;
182 icsk->icsk_sync_mss = dccp_sync_mss;
410e27a4 183 dp->dccps_mss_cache = 536;
e18d7a98
ACM
184 dp->dccps_rate_last = jiffies;
185 dp->dccps_role = DCCP_ROLE_UNDEFINED;
186 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
871a2c16 187 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
e18d7a98
ACM
188
189 dccp_init_xmit_timers(sk);
190
ac75773c 191 INIT_LIST_HEAD(&dp->dccps_featneg);
6eb55d17
GR
192 /* control socket doesn't need feat nego */
193 if (likely(ctl_sock_initialized))
194 return dccp_feat_init(sk);
3e0fadc5
ACM
195 return 0;
196}
197
198EXPORT_SYMBOL_GPL(dccp_init_sock);
199
7d06b2e0 200void dccp_destroy_sock(struct sock *sk)
3e0fadc5
ACM
201{
202 struct dccp_sock *dp = dccp_sk(sk);
203
204 /*
205 * DCCP doesn't use sk_write_queue, just sk_send_head
206 * for retransmissions
207 */
208 if (sk->sk_send_head != NULL) {
209 kfree_skb(sk->sk_send_head);
210 sk->sk_send_head = NULL;
211 }
212
213 /* Clean up a referenced DCCP bind bucket. */
214 if (inet_csk(sk)->icsk_bind_hash != NULL)
ab1e0a13 215 inet_put_port(sk);
3e0fadc5
ACM
216
217 kfree(dp->dccps_service_list);
218 dp->dccps_service_list = NULL;
219
6fdd34d4 220 if (dp->dccps_hc_rx_ackvec != NULL) {
3e0fadc5
ACM
221 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
222 dp->dccps_hc_rx_ackvec = NULL;
223 }
224 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
225 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
226 dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
227
228 /* clean up feature negotiation state */
d99a7bd2 229 dccp_feat_list_purge(&dp->dccps_featneg);
3e0fadc5
ACM
230}
231
232EXPORT_SYMBOL_GPL(dccp_destroy_sock);
233
72a3effa 234static inline int dccp_listen_start(struct sock *sk, int backlog)
7c657876 235{
67e6b629
ACM
236 struct dccp_sock *dp = dccp_sk(sk);
237
238 dp->dccps_role = DCCP_ROLE_LISTEN;
9eca0a47
GR
239 /* do not start to listen if feature negotiation setup fails */
240 if (dccp_feat_finalise_settings(dp))
241 return -EPROTO;
72a3effa 242 return inet_csk_listen_start(sk, backlog);
7c657876
ACM
243}
244
ce865a61
GR
245static inline int dccp_need_reset(int state)
246{
247 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
248 state != DCCP_REQUESTING;
249}
250
7c657876
ACM
251int dccp_disconnect(struct sock *sk, int flags)
252{
253 struct inet_connection_sock *icsk = inet_csk(sk);
254 struct inet_sock *inet = inet_sk(sk);
255 int err = 0;
256 const int old_state = sk->sk_state;
257
258 if (old_state != DCCP_CLOSED)
259 dccp_set_state(sk, DCCP_CLOSED);
260
ce865a61
GR
261 /*
262 * This corresponds to the ABORT function of RFC793, sec. 3.8
263 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
264 */
7c657876
ACM
265 if (old_state == DCCP_LISTEN) {
266 inet_csk_listen_stop(sk);
ce865a61
GR
267 } else if (dccp_need_reset(old_state)) {
268 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
269 sk->sk_err = ECONNRESET;
7c657876
ACM
270 } else if (old_state == DCCP_REQUESTING)
271 sk->sk_err = ECONNRESET;
272
273 dccp_clear_xmit_timers(sk);
48816322 274
7c657876 275 __skb_queue_purge(&sk->sk_receive_queue);
48816322 276 __skb_queue_purge(&sk->sk_write_queue);
7c657876
ACM
277 if (sk->sk_send_head != NULL) {
278 __kfree_skb(sk->sk_send_head);
279 sk->sk_send_head = NULL;
280 }
281
c720c7e8 282 inet->inet_dport = 0;
7c657876
ACM
283
284 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
285 inet_reset_saddr(sk);
286
287 sk->sk_shutdown = 0;
288 sock_reset_flag(sk, SOCK_DONE);
289
290 icsk->icsk_backoff = 0;
291 inet_csk_delack_init(sk);
292 __sk_dst_reset(sk);
293
c720c7e8 294 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
7c657876
ACM
295
296 sk->sk_error_report(sk);
297 return err;
298}
299
f21e68ca
ACM
300EXPORT_SYMBOL_GPL(dccp_disconnect);
301
331968bd
ACM
302/*
303 * Wait for a DCCP event.
304 *
305 * Note that we don't need to lock the socket, as the upper poll layers
306 * take care of normal races (between the test and the event) and we don't
307 * go look at any of the socket buffers directly.
308 */
f21e68ca
ACM
309unsigned int dccp_poll(struct file *file, struct socket *sock,
310 poll_table *wait)
331968bd
ACM
311{
312 unsigned int mask;
313 struct sock *sk = sock->sk;
314
aa395145 315 sock_poll_wait(file, sk_sleep(sk), wait);
331968bd
ACM
316 if (sk->sk_state == DCCP_LISTEN)
317 return inet_csk_listen_poll(sk);
318
319 /* Socket is not locked. We are protected from async events
320 by poll logic and correct handling of state changes
321 made by another threads is impossible in any case.
322 */
323
324 mask = 0;
325 if (sk->sk_err)
326 mask = POLLERR;
327
328 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
329 mask |= POLLHUP;
330 if (sk->sk_shutdown & RCV_SHUTDOWN)
f348d70a 331 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
331968bd
ACM
332
333 /* Connected? */
334 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
335 if (atomic_read(&sk->sk_rmem_alloc) > 0)
336 mask |= POLLIN | POLLRDNORM;
337
338 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
64dc6130 339 if (sk_stream_is_writeable(sk)) {
331968bd
ACM
340 mask |= POLLOUT | POLLWRNORM;
341 } else { /* send SIGIO later */
342 set_bit(SOCK_ASYNC_NOSPACE,
343 &sk->sk_socket->flags);
344 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
345
346 /* Race breaker. If space is freed after
347 * wspace test but before the flags are set,
348 * IO signal will be lost.
349 */
64dc6130 350 if (sk_stream_is_writeable(sk))
331968bd
ACM
351 mask |= POLLOUT | POLLWRNORM;
352 }
353 }
354 }
355 return mask;
356}
357
f21e68ca
ACM
358EXPORT_SYMBOL_GPL(dccp_poll);
359
7c657876
ACM
360int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
361{
6273172e
ACM
362 int rc = -ENOTCONN;
363
364 lock_sock(sk);
365
366 if (sk->sk_state == DCCP_LISTEN)
367 goto out;
368
369 switch (cmd) {
370 case SIOCINQ: {
371 struct sk_buff *skb;
372 unsigned long amount = 0;
373
374 skb = skb_peek(&sk->sk_receive_queue);
375 if (skb != NULL) {
376 /*
377 * We will only return the amount of this packet since
378 * that is all that will be read.
379 */
380 amount = skb->len;
381 }
382 rc = put_user(amount, (int __user *)arg);
383 }
384 break;
385 default:
386 rc = -ENOIOCTLCMD;
387 break;
388 }
389out:
390 release_sock(sk);
391 return rc;
7c657876
ACM
392}
393
f21e68ca
ACM
394EXPORT_SYMBOL_GPL(dccp_ioctl);
395
60fe62e7 396static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
b7058842 397 char __user *optval, unsigned int optlen)
67e6b629
ACM
398{
399 struct dccp_sock *dp = dccp_sk(sk);
400 struct dccp_service_list *sl = NULL;
401
8109b02b 402 if (service == DCCP_SERVICE_INVALID_VALUE ||
67e6b629
ACM
403 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
404 return -EINVAL;
405
406 if (optlen > sizeof(service)) {
407 sl = kmalloc(optlen, GFP_KERNEL);
408 if (sl == NULL)
409 return -ENOMEM;
410
411 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
412 if (copy_from_user(sl->dccpsl_list,
413 optval + sizeof(service),
414 optlen - sizeof(service)) ||
415 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
416 kfree(sl);
417 return -EFAULT;
418 }
419 }
420
421 lock_sock(sk);
422 dp->dccps_service = service;
423
a51482bd 424 kfree(dp->dccps_service_list);
67e6b629
ACM
425
426 dp->dccps_service_list = sl;
427 release_sock(sk);
428 return 0;
429}
430
29450559
GR
431static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
432{
433 u8 *list, len;
434 int i, rc;
435
436 if (cscov < 0 || cscov > 15)
437 return -EINVAL;
438 /*
439 * Populate a list of permissible values, in the range cscov...15. This
440 * is necessary since feature negotiation of single values only works if
441 * both sides incidentally choose the same value. Since the list starts
442 * lowest-value first, negotiation will pick the smallest shared value.
443 */
444 if (cscov == 0)
445 return 0;
446 len = 16 - cscov;
447
448 list = kmalloc(len, GFP_KERNEL);
449 if (list == NULL)
450 return -ENOBUFS;
451
452 for (i = 0; i < len; i++)
453 list[i] = cscov++;
454
455 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
456
457 if (rc == 0) {
458 if (rx)
459 dccp_sk(sk)->dccps_pcrlen = cscov;
460 else
461 dccp_sk(sk)->dccps_pcslen = cscov;
462 }
463 kfree(list);
464 return rc;
465}
466
b20a9c24 467static int dccp_setsockopt_ccid(struct sock *sk, int type,
b7058842 468 char __user *optval, unsigned int optlen)
b20a9c24
GR
469{
470 u8 *val;
471 int rc = 0;
472
473 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
474 return -EINVAL;
475
042604d2
JL
476 val = memdup_user(optval, optlen);
477 if (IS_ERR(val))
478 return PTR_ERR(val);
b20a9c24
GR
479
480 lock_sock(sk);
481 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
482 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
483
484 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
485 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
486 release_sock(sk);
487
488 kfree(val);
489 return rc;
490}
491
3fdadf7d 492static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
b7058842 493 char __user *optval, unsigned int optlen)
7c657876 494{
09dbc389
GR
495 struct dccp_sock *dp = dccp_sk(sk);
496 int val, err = 0;
7c657876 497
19102996
GR
498 switch (optname) {
499 case DCCP_SOCKOPT_PACKET_SIZE:
500 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
501 return 0;
502 case DCCP_SOCKOPT_CHANGE_L:
503 case DCCP_SOCKOPT_CHANGE_R:
504 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
505 return 0;
b20a9c24
GR
506 case DCCP_SOCKOPT_CCID:
507 case DCCP_SOCKOPT_RX_CCID:
508 case DCCP_SOCKOPT_TX_CCID:
509 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
19102996
GR
510 }
511
512 if (optlen < (int)sizeof(int))
a84ffe43
ACM
513 return -EINVAL;
514
515 if (get_user(val, (int __user *)optval))
516 return -EFAULT;
517
67e6b629
ACM
518 if (optname == DCCP_SOCKOPT_SERVICE)
519 return dccp_setsockopt_service(sk, val, optval, optlen);
a84ffe43 520
67e6b629 521 lock_sock(sk);
a84ffe43 522 switch (optname) {
b8599d20
GR
523 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
524 if (dp->dccps_role != DCCP_ROLE_SERVER)
525 err = -EOPNOTSUPP;
526 else
527 dp->dccps_server_timewait = (val != 0);
528 break;
29450559
GR
529 case DCCP_SOCKOPT_SEND_CSCOV:
530 err = dccp_setsockopt_cscov(sk, val, false);
d6da3511 531 break;
29450559
GR
532 case DCCP_SOCKOPT_RECV_CSCOV:
533 err = dccp_setsockopt_cscov(sk, val, true);
d6da3511 534 break;
871a2c16
TG
535 case DCCP_SOCKOPT_QPOLICY_ID:
536 if (sk->sk_state != DCCP_CLOSED)
537 err = -EISCONN;
538 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
539 err = -EINVAL;
540 else
541 dp->dccps_qpolicy = val;
542 break;
543 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
544 if (val < 0)
545 err = -EINVAL;
546 else
547 dp->dccps_tx_qlen = val;
548 break;
a84ffe43
ACM
549 default:
550 err = -ENOPROTOOPT;
551 break;
552 }
410e27a4 553 release_sock(sk);
19102996 554
a84ffe43 555 return err;
7c657876
ACM
556}
557
3fdadf7d 558int dccp_setsockopt(struct sock *sk, int level, int optname,
b7058842 559 char __user *optval, unsigned int optlen)
3fdadf7d
DM
560{
561 if (level != SOL_DCCP)
562 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
563 optname, optval,
564 optlen);
565 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
566}
543d9cfe 567
f21e68ca
ACM
568EXPORT_SYMBOL_GPL(dccp_setsockopt);
569
3fdadf7d
DM
570#ifdef CONFIG_COMPAT
571int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
b7058842 572 char __user *optval, unsigned int optlen)
3fdadf7d 573{
dec73ff0
ACM
574 if (level != SOL_DCCP)
575 return inet_csk_compat_setsockopt(sk, level, optname,
576 optval, optlen);
3fdadf7d
DM
577 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
578}
543d9cfe 579
3fdadf7d
DM
580EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
581#endif
582
67e6b629 583static int dccp_getsockopt_service(struct sock *sk, int len,
60fe62e7 584 __be32 __user *optval,
67e6b629
ACM
585 int __user *optlen)
586{
587 const struct dccp_sock *dp = dccp_sk(sk);
588 const struct dccp_service_list *sl;
589 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
590
591 lock_sock(sk);
67e6b629
ACM
592 if ((sl = dp->dccps_service_list) != NULL) {
593 slen = sl->dccpsl_nr * sizeof(u32);
594 total_len += slen;
595 }
596
597 err = -EINVAL;
598 if (total_len > len)
599 goto out;
600
601 err = 0;
602 if (put_user(total_len, optlen) ||
603 put_user(dp->dccps_service, optval) ||
604 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
605 err = -EFAULT;
606out:
607 release_sock(sk);
608 return err;
609}
610
3fdadf7d 611static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
a1d3a355 612 char __user *optval, int __user *optlen)
7c657876 613{
a84ffe43
ACM
614 struct dccp_sock *dp;
615 int val, len;
7c657876 616
a84ffe43
ACM
617 if (get_user(len, optlen))
618 return -EFAULT;
619
39ebc027 620 if (len < (int)sizeof(int))
a84ffe43
ACM
621 return -EINVAL;
622
623 dp = dccp_sk(sk);
624
625 switch (optname) {
626 case DCCP_SOCKOPT_PACKET_SIZE:
5aed3243 627 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
841bac1d 628 return 0;
88f964db
ACM
629 case DCCP_SOCKOPT_SERVICE:
630 return dccp_getsockopt_service(sk, len,
60fe62e7 631 (__be32 __user *)optval, optlen);
7c559a9e
GR
632 case DCCP_SOCKOPT_GET_CUR_MPS:
633 val = dp->dccps_mss_cache;
7c559a9e 634 break;
d90ebcbf
GR
635 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
636 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
71c262a3
GR
637 case DCCP_SOCKOPT_TX_CCID:
638 val = ccid_get_current_tx_ccid(dp);
639 if (val < 0)
640 return -ENOPROTOOPT;
641 break;
642 case DCCP_SOCKOPT_RX_CCID:
643 val = ccid_get_current_rx_ccid(dp);
644 if (val < 0)
645 return -ENOPROTOOPT;
646 break;
b8599d20
GR
647 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
648 val = dp->dccps_server_timewait;
b8599d20 649 break;
6f4e5fff
GR
650 case DCCP_SOCKOPT_SEND_CSCOV:
651 val = dp->dccps_pcslen;
652 break;
653 case DCCP_SOCKOPT_RECV_CSCOV:
654 val = dp->dccps_pcrlen;
655 break;
871a2c16
TG
656 case DCCP_SOCKOPT_QPOLICY_ID:
657 val = dp->dccps_qpolicy;
658 break;
659 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
660 val = dp->dccps_tx_qlen;
661 break;
88f964db
ACM
662 case 128 ... 191:
663 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
664 len, (u32 __user *)optval, optlen);
665 case 192 ... 255:
666 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
667 len, (u32 __user *)optval, optlen);
a84ffe43
ACM
668 default:
669 return -ENOPROTOOPT;
670 }
671
79133506 672 len = sizeof(val);
a84ffe43
ACM
673 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
674 return -EFAULT;
675
676 return 0;
7c657876
ACM
677}
678
3fdadf7d
DM
679int dccp_getsockopt(struct sock *sk, int level, int optname,
680 char __user *optval, int __user *optlen)
681{
682 if (level != SOL_DCCP)
683 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
684 optname, optval,
685 optlen);
686 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
687}
543d9cfe 688
f21e68ca
ACM
689EXPORT_SYMBOL_GPL(dccp_getsockopt);
690
3fdadf7d
DM
691#ifdef CONFIG_COMPAT
692int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
543d9cfe 693 char __user *optval, int __user *optlen)
3fdadf7d 694{
dec73ff0
ACM
695 if (level != SOL_DCCP)
696 return inet_csk_compat_getsockopt(sk, level, optname,
697 optval, optlen);
3fdadf7d
DM
698 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
699}
543d9cfe 700
3fdadf7d
DM
701EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
702#endif
703
871a2c16
TG
704static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
705{
f95b414e 706 struct cmsghdr *cmsg;
871a2c16
TG
707
708 /*
709 * Assign an (opaque) qpolicy priority value to skb->priority.
710 *
711 * We are overloading this skb field for use with the qpolicy subystem.
712 * The skb->priority is normally used for the SO_PRIORITY option, which
713 * is initialised from sk_priority. Since the assignment of sk_priority
714 * to skb->priority happens later (on layer 3), we overload this field
715 * for use with queueing priorities as long as the skb is on layer 4.
716 * The default priority value (if nothing is set) is 0.
717 */
718 skb->priority = 0;
719
f95b414e 720 for_each_cmsghdr(cmsg, msg) {
871a2c16
TG
721 if (!CMSG_OK(msg, cmsg))
722 return -EINVAL;
723
724 if (cmsg->cmsg_level != SOL_DCCP)
725 continue;
726
04910265
TG
727 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
728 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
729 return -EINVAL;
730
871a2c16
TG
731 switch (cmsg->cmsg_type) {
732 case DCCP_SCM_PRIORITY:
733 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
734 return -EINVAL;
735 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
736 break;
737 default:
738 return -EINVAL;
739 }
740 }
741 return 0;
742}
743
7c657876
ACM
744int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
745 size_t len)
746{
747 const struct dccp_sock *dp = dccp_sk(sk);
748 const int flags = msg->msg_flags;
749 const int noblock = flags & MSG_DONTWAIT;
750 struct sk_buff *skb;
751 int rc, size;
752 long timeo;
753
754 if (len > dp->dccps_mss_cache)
755 return -EMSGSIZE;
756
757 lock_sock(sk);
b1308dc0 758
871a2c16 759 if (dccp_qpolicy_full(sk)) {
b1308dc0
IM
760 rc = -EAGAIN;
761 goto out_release;
762 }
763
27258ee5 764 timeo = sock_sndtimeo(sk, noblock);
7c657876
ACM
765
766 /*
767 * We have to use sk_stream_wait_connect here to set sk_write_pending,
768 * so that the trick in dccp_rcv_request_sent_state_process.
769 */
770 /* Wait for a connection to finish. */
cecd8d0e 771 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
7c657876 772 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
27258ee5 773 goto out_release;
7c657876
ACM
774
775 size = sk->sk_prot->max_header + len;
776 release_sock(sk);
777 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
778 lock_sock(sk);
7c657876
ACM
779 if (skb == NULL)
780 goto out_release;
781
782 skb_reserve(skb, sk->sk_prot->max_header);
6ce8e9ce 783 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
27258ee5
ACM
784 if (rc != 0)
785 goto out_discard;
786
871a2c16
TG
787 rc = dccp_msghdr_parse(msg, skb);
788 if (rc != 0)
789 goto out_discard;
790
791 dccp_qpolicy_push(sk, skb);
b1fcf55e
GR
792 /*
793 * The xmit_timer is set if the TX CCID is rate-based and will expire
794 * when congestion control permits to release further packets into the
795 * network. Window-based CCIDs do not use this timer.
796 */
797 if (!timer_pending(&dp->dccps_xmit_timer))
798 dccp_write_xmit(sk);
7c657876
ACM
799out_release:
800 release_sock(sk);
801 return rc ? : len;
27258ee5
ACM
802out_discard:
803 kfree_skb(skb);
7c657876 804 goto out_release;
7c657876
ACM
805}
806
f21e68ca
ACM
807EXPORT_SYMBOL_GPL(dccp_sendmsg);
808
7c657876
ACM
809int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
810 size_t len, int nonblock, int flags, int *addr_len)
811{
812 const struct dccp_hdr *dh;
7c657876
ACM
813 long timeo;
814
815 lock_sock(sk);
816
531669a0
ACM
817 if (sk->sk_state == DCCP_LISTEN) {
818 len = -ENOTCONN;
7c657876 819 goto out;
7c657876 820 }
7c657876 821
531669a0 822 timeo = sock_rcvtimeo(sk, nonblock);
7c657876
ACM
823
824 do {
531669a0 825 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
7c657876 826
531669a0
ACM
827 if (skb == NULL)
828 goto verify_sock_status;
7c657876 829
531669a0 830 dh = dccp_hdr(skb);
7c657876 831
0c869620
GR
832 switch (dh->dccph_type) {
833 case DCCP_PKT_DATA:
834 case DCCP_PKT_DATAACK:
531669a0 835 goto found_ok_skb;
7c657876 836
0c869620
GR
837 case DCCP_PKT_CLOSE:
838 case DCCP_PKT_CLOSEREQ:
839 if (!(flags & MSG_PEEK))
840 dccp_finish_passive_close(sk);
841 /* fall through */
842 case DCCP_PKT_RESET:
843 dccp_pr_debug("found fin (%s) ok!\n",
844 dccp_packet_name(dh->dccph_type));
531669a0
ACM
845 len = 0;
846 goto found_fin_ok;
0c869620
GR
847 default:
848 dccp_pr_debug("packet_type=%s\n",
849 dccp_packet_name(dh->dccph_type));
7bced397 850 sk_eat_skb(sk, skb);
531669a0 851 }
531669a0
ACM
852verify_sock_status:
853 if (sock_flag(sk, SOCK_DONE)) {
854 len = 0;
7c657876 855 break;
531669a0 856 }
7c657876 857
531669a0
ACM
858 if (sk->sk_err) {
859 len = sock_error(sk);
860 break;
861 }
7c657876 862
531669a0
ACM
863 if (sk->sk_shutdown & RCV_SHUTDOWN) {
864 len = 0;
865 break;
866 }
7c657876 867
531669a0
ACM
868 if (sk->sk_state == DCCP_CLOSED) {
869 if (!sock_flag(sk, SOCK_DONE)) {
870 /* This occurs when user tries to read
871 * from never connected socket.
872 */
873 len = -ENOTCONN;
7c657876
ACM
874 break;
875 }
531669a0
ACM
876 len = 0;
877 break;
7c657876
ACM
878 }
879
531669a0
ACM
880 if (!timeo) {
881 len = -EAGAIN;
882 break;
883 }
7c657876 884
531669a0
ACM
885 if (signal_pending(current)) {
886 len = sock_intr_errno(timeo);
887 break;
888 }
7c657876 889
531669a0 890 sk_wait_data(sk, &timeo);
7c657876 891 continue;
7c657876 892 found_ok_skb:
531669a0
ACM
893 if (len > skb->len)
894 len = skb->len;
895 else if (len < skb->len)
896 msg->msg_flags |= MSG_TRUNC;
897
51f3d02b 898 if (skb_copy_datagram_msg(skb, 0, msg, len)) {
531669a0
ACM
899 /* Exception. Bailout! */
900 len = -EFAULT;
901 break;
7c657876 902 }
55d95590
GR
903 if (flags & MSG_TRUNC)
904 len = skb->len;
7c657876
ACM
905 found_fin_ok:
906 if (!(flags & MSG_PEEK))
7bced397 907 sk_eat_skb(sk, skb);
7c657876 908 break;
531669a0 909 } while (1);
7c657876
ACM
910out:
911 release_sock(sk);
531669a0 912 return len;
7c657876
ACM
913}
914
f21e68ca
ACM
915EXPORT_SYMBOL_GPL(dccp_recvmsg);
916
917int inet_dccp_listen(struct socket *sock, int backlog)
7c657876
ACM
918{
919 struct sock *sk = sock->sk;
920 unsigned char old_state;
921 int err;
922
923 lock_sock(sk);
924
925 err = -EINVAL;
926 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
927 goto out;
928
929 old_state = sk->sk_state;
930 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
931 goto out;
932
933 /* Really, if the socket is already in listen state
934 * we can only allow the backlog to be adjusted.
935 */
936 if (old_state != DCCP_LISTEN) {
937 /*
938 * FIXME: here it probably should be sk->sk_prot->listen_start
939 * see tcp_listen_start
940 */
72a3effa 941 err = dccp_listen_start(sk, backlog);
7c657876
ACM
942 if (err)
943 goto out;
944 }
945 sk->sk_max_ack_backlog = backlog;
946 err = 0;
947
948out:
949 release_sock(sk);
950 return err;
951}
952
f21e68ca
ACM
953EXPORT_SYMBOL_GPL(inet_dccp_listen);
954
0c869620 955static void dccp_terminate_connection(struct sock *sk)
7c657876 956{
0c869620 957 u8 next_state = DCCP_CLOSED;
7c657876 958
0c869620
GR
959 switch (sk->sk_state) {
960 case DCCP_PASSIVE_CLOSE:
961 case DCCP_PASSIVE_CLOSEREQ:
962 dccp_finish_passive_close(sk);
963 break;
964 case DCCP_PARTOPEN:
965 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
966 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
967 /* fall through */
968 case DCCP_OPEN:
969 dccp_send_close(sk, 1);
7c657876 970
b8599d20
GR
971 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
972 !dccp_sk(sk)->dccps_server_timewait)
0c869620
GR
973 next_state = DCCP_ACTIVE_CLOSEREQ;
974 else
975 next_state = DCCP_CLOSING;
976 /* fall through */
977 default:
978 dccp_set_state(sk, next_state);
979 }
7c657876
ACM
980}
981
982void dccp_close(struct sock *sk, long timeout)
983{
97e5848d 984 struct dccp_sock *dp = dccp_sk(sk);
7c657876 985 struct sk_buff *skb;
d83bd95b 986 u32 data_was_unread = 0;
134af346 987 int state;
7c657876
ACM
988
989 lock_sock(sk);
990
991 sk->sk_shutdown = SHUTDOWN_MASK;
992
993 if (sk->sk_state == DCCP_LISTEN) {
994 dccp_set_state(sk, DCCP_CLOSED);
995
996 /* Special case. */
997 inet_csk_listen_stop(sk);
998
999 goto adjudge_to_death;
1000 }
1001
97e5848d
IM
1002 sk_stop_timer(sk, &dp->dccps_xmit_timer);
1003
7c657876
ACM
1004 /*
1005 * We need to flush the recv. buffs. We do this only on the
1006 * descriptor close, not protocol-sourced closes, because the
1007 *reader process may not have drained the data yet!
1008 */
7c657876 1009 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
d83bd95b 1010 data_was_unread += skb->len;
7c657876
ACM
1011 __kfree_skb(skb);
1012 }
1013
d83bd95b
GR
1014 if (data_was_unread) {
1015 /* Unread data was tossed, send an appropriate Reset Code */
2f34b329 1016 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
d83bd95b
GR
1017 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1018 dccp_set_state(sk, DCCP_CLOSED);
1019 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
7c657876
ACM
1020 /* Check zero linger _after_ checking for unread data. */
1021 sk->sk_prot->disconnect(sk, 0);
0c869620 1022 } else if (sk->sk_state != DCCP_CLOSED) {
b1fcf55e
GR
1023 /*
1024 * Normal connection termination. May need to wait if there are
1025 * still packets in the TX queue that are delayed by the CCID.
1026 */
1027 dccp_flush_write_queue(sk, &timeout);
0c869620 1028 dccp_terminate_connection(sk);
7c657876
ACM
1029 }
1030
b1fcf55e
GR
1031 /*
1032 * Flush write queue. This may be necessary in several cases:
1033 * - we have been closed by the peer but still have application data;
1034 * - abortive termination (unread data or zero linger time),
1035 * - normal termination but queue could not be flushed within time limit
1036 */
1037 __skb_queue_purge(&sk->sk_write_queue);
1038
7c657876
ACM
1039 sk_stream_wait_close(sk, timeout);
1040
1041adjudge_to_death:
134af346
HX
1042 state = sk->sk_state;
1043 sock_hold(sk);
1044 sock_orphan(sk);
134af346 1045
7ad07e7c
ACM
1046 /*
1047 * It is the last release_sock in its life. It will remove backlog.
1048 */
7c657876
ACM
1049 release_sock(sk);
1050 /*
1051 * Now socket is owned by kernel and we acquire BH lock
1052 * to finish close. No need to check for user refs.
1053 */
1054 local_bh_disable();
1055 bh_lock_sock(sk);
547b792c 1056 WARN_ON(sock_owned_by_user(sk));
7c657876 1057
eb4dea58
HX
1058 percpu_counter_inc(sk->sk_prot->orphan_count);
1059
134af346
HX
1060 /* Have we already been destroyed by a softirq or backlog? */
1061 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1062 goto out;
7ad07e7c 1063
7c657876
ACM
1064 if (sk->sk_state == DCCP_CLOSED)
1065 inet_csk_destroy_sock(sk);
1066
1067 /* Otherwise, socket is reprieved until protocol close. */
1068
134af346 1069out:
7c657876
ACM
1070 bh_unlock_sock(sk);
1071 local_bh_enable();
1072 sock_put(sk);
1073}
1074
f21e68ca
ACM
1075EXPORT_SYMBOL_GPL(dccp_close);
1076
7c657876
ACM
1077void dccp_shutdown(struct sock *sk, int how)
1078{
8e8c71f1 1079 dccp_pr_debug("called shutdown(%x)\n", how);
7c657876
ACM
1080}
1081
f21e68ca
ACM
1082EXPORT_SYMBOL_GPL(dccp_shutdown);
1083
0c5b8a46 1084static inline int __init dccp_mib_init(void)
7c657876 1085{
698365fa
WC
1086 dccp_statistics = alloc_percpu(struct dccp_mib);
1087 if (!dccp_statistics)
1088 return -ENOMEM;
1089 return 0;
7c657876
ACM
1090}
1091
24e8b7e4 1092static inline void dccp_mib_exit(void)
46f09ffa 1093{
698365fa 1094 free_percpu(dccp_statistics);
46f09ffa
ACM
1095}
1096
7c657876
ACM
1097static int thash_entries;
1098module_param(thash_entries, int, 0444);
1099MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1100
a1d3a355 1101#ifdef CONFIG_IP_DCCP_DEBUG
eb939922 1102bool dccp_debug;
43264991 1103module_param(dccp_debug, bool, 0644);
7c657876 1104MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
f21e68ca
ACM
1105
1106EXPORT_SYMBOL_GPL(dccp_debug);
a1d3a355 1107#endif
7c657876
ACM
1108
1109static int __init dccp_init(void)
1110{
1111 unsigned long goal;
1112 int ehash_order, bhash_order, i;
dd24c001 1113 int rc;
7c657876 1114
028b0275
PM
1115 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1116 FIELD_SIZEOF(struct sk_buff, cb));
908c7f19 1117 rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
dd24c001 1118 if (rc)
d14a0ebd 1119 goto out_fail;
dd24c001 1120 rc = -ENOBUFS;
5caea4ea 1121 inet_hashinfo_init(&dccp_hashinfo);
7690af3f
ACM
1122 dccp_hashinfo.bind_bucket_cachep =
1123 kmem_cache_create("dccp_bind_bucket",
1124 sizeof(struct inet_bind_bucket), 0,
20c2df83 1125 SLAB_HWCACHE_ALIGN, NULL);
7c657876 1126 if (!dccp_hashinfo.bind_bucket_cachep)
dd24c001 1127 goto out_free_percpu;
7c657876
ACM
1128
1129 /*
1130 * Size and allocate the main established and bind bucket
1131 * hash tables.
1132 *
1133 * The methodology is similar to that of the buffer cache.
1134 */
4481374c
JB
1135 if (totalram_pages >= (128 * 1024))
1136 goal = totalram_pages >> (21 - PAGE_SHIFT);
7c657876 1137 else
4481374c 1138 goal = totalram_pages >> (23 - PAGE_SHIFT);
7c657876
ACM
1139
1140 if (thash_entries)
7690af3f
ACM
1141 goal = (thash_entries *
1142 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
7c657876
ACM
1143 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1144 ;
1145 do {
f373b53b 1146 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
7c657876 1147 sizeof(struct inet_ehash_bucket);
f373b53b
ED
1148
1149 while (hash_size & (hash_size - 1))
1150 hash_size--;
1151 dccp_hashinfo.ehash_mask = hash_size - 1;
7c657876 1152 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1c29b3ff 1153 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
7c657876
ACM
1154 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1155
1156 if (!dccp_hashinfo.ehash) {
59348b19 1157 DCCP_CRIT("Failed to allocate DCCP established hash table");
7c657876
ACM
1158 goto out_free_bind_bucket_cachep;
1159 }
1160
05dbc7b5 1161 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
3ab5aee7 1162 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
7c657876 1163
230140cf
ED
1164 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1165 goto out_free_dccp_ehash;
1166
7c657876
ACM
1167 bhash_order = ehash_order;
1168
1169 do {
1170 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1171 sizeof(struct inet_bind_hashbucket);
7690af3f
ACM
1172 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1173 bhash_order > 0)
7c657876
ACM
1174 continue;
1175 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1c29b3ff 1176 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
7c657876
ACM
1177 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1178
1179 if (!dccp_hashinfo.bhash) {
59348b19 1180 DCCP_CRIT("Failed to allocate DCCP bind hash table");
230140cf 1181 goto out_free_dccp_locks;
7c657876
ACM
1182 }
1183
1184 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1185 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1186 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1187 }
1188
46f09ffa 1189 rc = dccp_mib_init();
fa23e2ec 1190 if (rc)
7c657876
ACM
1191 goto out_free_dccp_bhash;
1192
9b07ef5d 1193 rc = dccp_ackvec_init();
7c657876 1194 if (rc)
b61fafc4 1195 goto out_free_dccp_mib;
9b07ef5d 1196
e55d912f 1197 rc = dccp_sysctl_init();
9b07ef5d
ACM
1198 if (rc)
1199 goto out_ackvec_exit;
4c70f383 1200
ddebc973
GR
1201 rc = ccid_initialize_builtins();
1202 if (rc)
1203 goto out_sysctl_exit;
1204
4c70f383 1205 dccp_timestamping_init();
d14a0ebd
GR
1206
1207 return 0;
1208
ddebc973
GR
1209out_sysctl_exit:
1210 dccp_sysctl_exit();
9b07ef5d
ACM
1211out_ackvec_exit:
1212 dccp_ackvec_exit();
b61fafc4 1213out_free_dccp_mib:
46f09ffa 1214 dccp_mib_exit();
7c657876
ACM
1215out_free_dccp_bhash:
1216 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
230140cf
ED
1217out_free_dccp_locks:
1218 inet_ehash_locks_free(&dccp_hashinfo);
7c657876
ACM
1219out_free_dccp_ehash:
1220 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
7c657876
ACM
1221out_free_bind_bucket_cachep:
1222 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
dd24c001
ED
1223out_free_percpu:
1224 percpu_counter_destroy(&dccp_orphan_count);
d14a0ebd
GR
1225out_fail:
1226 dccp_hashinfo.bhash = NULL;
1227 dccp_hashinfo.ehash = NULL;
1228 dccp_hashinfo.bind_bucket_cachep = NULL;
1229 return rc;
7c657876
ACM
1230}
1231
7c657876
ACM
1232static void __exit dccp_fini(void)
1233{
ddebc973 1234 ccid_cleanup_builtins();
46f09ffa 1235 dccp_mib_exit();
725ba8ee
ACM
1236 free_pages((unsigned long)dccp_hashinfo.bhash,
1237 get_order(dccp_hashinfo.bhash_size *
1238 sizeof(struct inet_bind_hashbucket)));
1239 free_pages((unsigned long)dccp_hashinfo.ehash,
f373b53b 1240 get_order((dccp_hashinfo.ehash_mask + 1) *
725ba8ee 1241 sizeof(struct inet_ehash_bucket)));
230140cf 1242 inet_ehash_locks_free(&dccp_hashinfo);
7c657876 1243 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
9b07ef5d 1244 dccp_ackvec_exit();
e55d912f 1245 dccp_sysctl_exit();
476181cb 1246 percpu_counter_destroy(&dccp_orphan_count);
7c657876
ACM
1247}
1248
1249module_init(dccp_init);
1250module_exit(dccp_fini);
1251
7c657876
ACM
1252MODULE_LICENSE("GPL");
1253MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1254MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
This page took 0.886049 seconds and 5 git commands to generate.