net: Fix use after free by removing length arg from sk_data_ready callbacks.
[deliverable/linux.git] / net / sctp / socket.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
13 *
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
17 *
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
22 * any later version.
23 *
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, see
32 * <http://www.gnu.org/licenses/>.
33 *
34 * Please send any bug reports or fixes you make to the
35 * email address(es):
36 * lksctp developers <linux-sctp@vger.kernel.org>
37 *
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Narasimha Budihal <narsi@refcode.org>
41 * Karl Knutson <karl@athena.chicago.il.us>
42 * Jon Grimm <jgrimm@us.ibm.com>
43 * Xingang Guo <xingang.guo@intel.com>
44 * Daisy Chang <daisyc@us.ibm.com>
45 * Sridhar Samudrala <samudrala@us.ibm.com>
46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
47 * Ardelle Fan <ardelle.fan@intel.com>
48 * Ryan Layer <rmlayer@us.ibm.com>
49 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
50 * Kevin Gao <kevin.gao@intel.com>
51 */
52
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55 #include <linux/types.h>
56 #include <linux/kernel.h>
57 #include <linux/wait.h>
58 #include <linux/time.h>
59 #include <linux/ip.h>
60 #include <linux/capability.h>
61 #include <linux/fcntl.h>
62 #include <linux/poll.h>
63 #include <linux/init.h>
64 #include <linux/crypto.h>
65 #include <linux/slab.h>
66 #include <linux/file.h>
67 #include <linux/compat.h>
68
69 #include <net/ip.h>
70 #include <net/icmp.h>
71 #include <net/route.h>
72 #include <net/ipv6.h>
73 #include <net/inet_common.h>
74
75 #include <linux/socket.h> /* for sa_family_t */
76 #include <linux/export.h>
77 #include <net/sock.h>
78 #include <net/sctp/sctp.h>
79 #include <net/sctp/sm.h>
80
81 /* Forward declarations for internal helper functions. */
82 static int sctp_writeable(struct sock *sk);
83 static void sctp_wfree(struct sk_buff *skb);
84 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
85 size_t msg_len);
86 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
87 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
88 static int sctp_wait_for_accept(struct sock *sk, long timeo);
89 static void sctp_wait_for_close(struct sock *sk, long timeo);
90 static void sctp_destruct_sock(struct sock *sk);
91 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
92 union sctp_addr *addr, int len);
93 static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
94 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
95 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
96 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
97 static int sctp_send_asconf(struct sctp_association *asoc,
98 struct sctp_chunk *chunk);
99 static int sctp_do_bind(struct sock *, union sctp_addr *, int);
100 static int sctp_autobind(struct sock *sk);
101 static void sctp_sock_migrate(struct sock *, struct sock *,
102 struct sctp_association *, sctp_socket_type_t);
103
104 extern struct kmem_cache *sctp_bucket_cachep;
105 extern long sysctl_sctp_mem[3];
106 extern int sysctl_sctp_rmem[3];
107 extern int sysctl_sctp_wmem[3];
108
109 static int sctp_memory_pressure;
110 static atomic_long_t sctp_memory_allocated;
111 struct percpu_counter sctp_sockets_allocated;
112
113 static void sctp_enter_memory_pressure(struct sock *sk)
114 {
115 sctp_memory_pressure = 1;
116 }
117
118
119 /* Get the sndbuf space available at the time on the association. */
120 static inline int sctp_wspace(struct sctp_association *asoc)
121 {
122 int amt;
123
124 if (asoc->ep->sndbuf_policy)
125 amt = asoc->sndbuf_used;
126 else
127 amt = sk_wmem_alloc_get(asoc->base.sk);
128
129 if (amt >= asoc->base.sk->sk_sndbuf) {
130 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
131 amt = 0;
132 else {
133 amt = sk_stream_wspace(asoc->base.sk);
134 if (amt < 0)
135 amt = 0;
136 }
137 } else {
138 amt = asoc->base.sk->sk_sndbuf - amt;
139 }
140 return amt;
141 }
142
143 /* Increment the used sndbuf space count of the corresponding association by
144 * the size of the outgoing data chunk.
145 * Also, set the skb destructor for sndbuf accounting later.
146 *
147 * Since it is always 1-1 between chunk and skb, and also a new skb is always
148 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
149 * destructor in the data chunk skb for the purpose of the sndbuf space
150 * tracking.
151 */
152 static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
153 {
154 struct sctp_association *asoc = chunk->asoc;
155 struct sock *sk = asoc->base.sk;
156
157 /* The sndbuf space is tracked per association. */
158 sctp_association_hold(asoc);
159
160 skb_set_owner_w(chunk->skb, sk);
161
162 chunk->skb->destructor = sctp_wfree;
163 /* Save the chunk pointer in skb for sctp_wfree to use later. */
164 *((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
165
166 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
167 sizeof(struct sk_buff) +
168 sizeof(struct sctp_chunk);
169
170 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
171 sk->sk_wmem_queued += chunk->skb->truesize;
172 sk_mem_charge(sk, chunk->skb->truesize);
173 }
174
175 /* Verify that this is a valid address. */
176 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
177 int len)
178 {
179 struct sctp_af *af;
180
181 /* Verify basic sockaddr. */
182 af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
183 if (!af)
184 return -EINVAL;
185
186 /* Is this a valid SCTP address? */
187 if (!af->addr_valid(addr, sctp_sk(sk), NULL))
188 return -EINVAL;
189
190 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
191 return -EINVAL;
192
193 return 0;
194 }
195
196 /* Look up the association by its id. If this is not a UDP-style
197 * socket, the ID field is always ignored.
198 */
199 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
200 {
201 struct sctp_association *asoc = NULL;
202
203 /* If this is not a UDP-style socket, assoc id should be ignored. */
204 if (!sctp_style(sk, UDP)) {
205 /* Return NULL if the socket state is not ESTABLISHED. It
206 * could be a TCP-style listening socket or a socket which
207 * hasn't yet called connect() to establish an association.
208 */
209 if (!sctp_sstate(sk, ESTABLISHED))
210 return NULL;
211
212 /* Get the first and the only association from the list. */
213 if (!list_empty(&sctp_sk(sk)->ep->asocs))
214 asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
215 struct sctp_association, asocs);
216 return asoc;
217 }
218
219 /* Otherwise this is a UDP-style socket. */
220 if (!id || (id == (sctp_assoc_t)-1))
221 return NULL;
222
223 spin_lock_bh(&sctp_assocs_id_lock);
224 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
225 spin_unlock_bh(&sctp_assocs_id_lock);
226
227 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
228 return NULL;
229
230 return asoc;
231 }
232
233 /* Look up the transport from an address and an assoc id. If both address and
234 * id are specified, the associations matching the address and the id should be
235 * the same.
236 */
237 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
238 struct sockaddr_storage *addr,
239 sctp_assoc_t id)
240 {
241 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
242 struct sctp_transport *transport;
243 union sctp_addr *laddr = (union sctp_addr *)addr;
244
245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
246 laddr,
247 &transport);
248
249 if (!addr_asoc)
250 return NULL;
251
252 id_asoc = sctp_id2assoc(sk, id);
253 if (id_asoc && (id_asoc != addr_asoc))
254 return NULL;
255
256 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
257 (union sctp_addr *)addr);
258
259 return transport;
260 }
261
262 /* API 3.1.2 bind() - UDP Style Syntax
263 * The syntax of bind() is,
264 *
265 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
266 *
267 * sd - the socket descriptor returned by socket().
268 * addr - the address structure (struct sockaddr_in or struct
269 * sockaddr_in6 [RFC 2553]),
270 * addr_len - the size of the address structure.
271 */
272 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
273 {
274 int retval = 0;
275
276 lock_sock(sk);
277
278 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
279 addr, addr_len);
280
281 /* Disallow binding twice. */
282 if (!sctp_sk(sk)->ep->base.bind_addr.port)
283 retval = sctp_do_bind(sk, (union sctp_addr *)addr,
284 addr_len);
285 else
286 retval = -EINVAL;
287
288 release_sock(sk);
289
290 return retval;
291 }
292
293 static long sctp_get_port_local(struct sock *, union sctp_addr *);
294
295 /* Verify this is a valid sockaddr. */
296 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
297 union sctp_addr *addr, int len)
298 {
299 struct sctp_af *af;
300
301 /* Check minimum size. */
302 if (len < sizeof (struct sockaddr))
303 return NULL;
304
305 /* V4 mapped address are really of AF_INET family */
306 if (addr->sa.sa_family == AF_INET6 &&
307 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
308 if (!opt->pf->af_supported(AF_INET, opt))
309 return NULL;
310 } else {
311 /* Does this PF support this AF? */
312 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
313 return NULL;
314 }
315
316 /* If we get this far, af is valid. */
317 af = sctp_get_af_specific(addr->sa.sa_family);
318
319 if (len < af->sockaddr_len)
320 return NULL;
321
322 return af;
323 }
324
325 /* Bind a local address either to an endpoint or to an association. */
326 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
327 {
328 struct net *net = sock_net(sk);
329 struct sctp_sock *sp = sctp_sk(sk);
330 struct sctp_endpoint *ep = sp->ep;
331 struct sctp_bind_addr *bp = &ep->base.bind_addr;
332 struct sctp_af *af;
333 unsigned short snum;
334 int ret = 0;
335
336 /* Common sockaddr verification. */
337 af = sctp_sockaddr_af(sp, addr, len);
338 if (!af) {
339 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
340 __func__, sk, addr, len);
341 return -EINVAL;
342 }
343
344 snum = ntohs(addr->v4.sin_port);
345
346 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
347 __func__, sk, &addr->sa, bp->port, snum, len);
348
349 /* PF specific bind() address verification. */
350 if (!sp->pf->bind_verify(sp, addr))
351 return -EADDRNOTAVAIL;
352
353 /* We must either be unbound, or bind to the same port.
354 * It's OK to allow 0 ports if we are already bound.
355 * We'll just inhert an already bound port in this case
356 */
357 if (bp->port) {
358 if (!snum)
359 snum = bp->port;
360 else if (snum != bp->port) {
361 pr_debug("%s: new port %d doesn't match existing port "
362 "%d\n", __func__, snum, bp->port);
363 return -EINVAL;
364 }
365 }
366
367 if (snum && snum < PROT_SOCK &&
368 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
369 return -EACCES;
370
371 /* See if the address matches any of the addresses we may have
372 * already bound before checking against other endpoints.
373 */
374 if (sctp_bind_addr_match(bp, addr, sp))
375 return -EINVAL;
376
377 /* Make sure we are allowed to bind here.
378 * The function sctp_get_port_local() does duplicate address
379 * detection.
380 */
381 addr->v4.sin_port = htons(snum);
382 if ((ret = sctp_get_port_local(sk, addr))) {
383 return -EADDRINUSE;
384 }
385
386 /* Refresh ephemeral port. */
387 if (!bp->port)
388 bp->port = inet_sk(sk)->inet_num;
389
390 /* Add the address to the bind address list.
391 * Use GFP_ATOMIC since BHs will be disabled.
392 */
393 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC);
394
395 /* Copy back into socket for getsockname() use. */
396 if (!ret) {
397 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
398 af->to_sk_saddr(addr, sk);
399 }
400
401 return ret;
402 }
403
404 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
405 *
406 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
407 * at any one time. If a sender, after sending an ASCONF chunk, decides
408 * it needs to transfer another ASCONF Chunk, it MUST wait until the
409 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
410 * subsequent ASCONF. Note this restriction binds each side, so at any
411 * time two ASCONF may be in-transit on any given association (one sent
412 * from each endpoint).
413 */
414 static int sctp_send_asconf(struct sctp_association *asoc,
415 struct sctp_chunk *chunk)
416 {
417 struct net *net = sock_net(asoc->base.sk);
418 int retval = 0;
419
420 /* If there is an outstanding ASCONF chunk, queue it for later
421 * transmission.
422 */
423 if (asoc->addip_last_asconf) {
424 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
425 goto out;
426 }
427
428 /* Hold the chunk until an ASCONF_ACK is received. */
429 sctp_chunk_hold(chunk);
430 retval = sctp_primitive_ASCONF(net, asoc, chunk);
431 if (retval)
432 sctp_chunk_free(chunk);
433 else
434 asoc->addip_last_asconf = chunk;
435
436 out:
437 return retval;
438 }
439
440 /* Add a list of addresses as bind addresses to local endpoint or
441 * association.
442 *
443 * Basically run through each address specified in the addrs/addrcnt
444 * array/length pair, determine if it is IPv6 or IPv4 and call
445 * sctp_do_bind() on it.
446 *
447 * If any of them fails, then the operation will be reversed and the
448 * ones that were added will be removed.
449 *
450 * Only sctp_setsockopt_bindx() is supposed to call this function.
451 */
452 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
453 {
454 int cnt;
455 int retval = 0;
456 void *addr_buf;
457 struct sockaddr *sa_addr;
458 struct sctp_af *af;
459
460 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
461 addrs, addrcnt);
462
463 addr_buf = addrs;
464 for (cnt = 0; cnt < addrcnt; cnt++) {
465 /* The list may contain either IPv4 or IPv6 address;
466 * determine the address length for walking thru the list.
467 */
468 sa_addr = addr_buf;
469 af = sctp_get_af_specific(sa_addr->sa_family);
470 if (!af) {
471 retval = -EINVAL;
472 goto err_bindx_add;
473 }
474
475 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
476 af->sockaddr_len);
477
478 addr_buf += af->sockaddr_len;
479
480 err_bindx_add:
481 if (retval < 0) {
482 /* Failed. Cleanup the ones that have been added */
483 if (cnt > 0)
484 sctp_bindx_rem(sk, addrs, cnt);
485 return retval;
486 }
487 }
488
489 return retval;
490 }
491
492 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
493 * associations that are part of the endpoint indicating that a list of local
494 * addresses are added to the endpoint.
495 *
496 * If any of the addresses is already in the bind address list of the
497 * association, we do not send the chunk for that association. But it will not
498 * affect other associations.
499 *
500 * Only sctp_setsockopt_bindx() is supposed to call this function.
501 */
502 static int sctp_send_asconf_add_ip(struct sock *sk,
503 struct sockaddr *addrs,
504 int addrcnt)
505 {
506 struct net *net = sock_net(sk);
507 struct sctp_sock *sp;
508 struct sctp_endpoint *ep;
509 struct sctp_association *asoc;
510 struct sctp_bind_addr *bp;
511 struct sctp_chunk *chunk;
512 struct sctp_sockaddr_entry *laddr;
513 union sctp_addr *addr;
514 union sctp_addr saveaddr;
515 void *addr_buf;
516 struct sctp_af *af;
517 struct list_head *p;
518 int i;
519 int retval = 0;
520
521 if (!net->sctp.addip_enable)
522 return retval;
523
524 sp = sctp_sk(sk);
525 ep = sp->ep;
526
527 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
528 __func__, sk, addrs, addrcnt);
529
530 list_for_each_entry(asoc, &ep->asocs, asocs) {
531 if (!asoc->peer.asconf_capable)
532 continue;
533
534 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
535 continue;
536
537 if (!sctp_state(asoc, ESTABLISHED))
538 continue;
539
540 /* Check if any address in the packed array of addresses is
541 * in the bind address list of the association. If so,
542 * do not send the asconf chunk to its peer, but continue with
543 * other associations.
544 */
545 addr_buf = addrs;
546 for (i = 0; i < addrcnt; i++) {
547 addr = addr_buf;
548 af = sctp_get_af_specific(addr->v4.sin_family);
549 if (!af) {
550 retval = -EINVAL;
551 goto out;
552 }
553
554 if (sctp_assoc_lookup_laddr(asoc, addr))
555 break;
556
557 addr_buf += af->sockaddr_len;
558 }
559 if (i < addrcnt)
560 continue;
561
562 /* Use the first valid address in bind addr list of
563 * association as Address Parameter of ASCONF CHUNK.
564 */
565 bp = &asoc->base.bind_addr;
566 p = bp->address_list.next;
567 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
568 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
569 addrcnt, SCTP_PARAM_ADD_IP);
570 if (!chunk) {
571 retval = -ENOMEM;
572 goto out;
573 }
574
575 /* Add the new addresses to the bind address list with
576 * use_as_src set to 0.
577 */
578 addr_buf = addrs;
579 for (i = 0; i < addrcnt; i++) {
580 addr = addr_buf;
581 af = sctp_get_af_specific(addr->v4.sin_family);
582 memcpy(&saveaddr, addr, af->sockaddr_len);
583 retval = sctp_add_bind_addr(bp, &saveaddr,
584 SCTP_ADDR_NEW, GFP_ATOMIC);
585 addr_buf += af->sockaddr_len;
586 }
587 if (asoc->src_out_of_asoc_ok) {
588 struct sctp_transport *trans;
589
590 list_for_each_entry(trans,
591 &asoc->peer.transport_addr_list, transports) {
592 /* Clear the source and route cache */
593 dst_release(trans->dst);
594 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
595 2*asoc->pathmtu, 4380));
596 trans->ssthresh = asoc->peer.i.a_rwnd;
597 trans->rto = asoc->rto_initial;
598 sctp_max_rto(asoc, trans);
599 trans->rtt = trans->srtt = trans->rttvar = 0;
600 sctp_transport_route(trans, NULL,
601 sctp_sk(asoc->base.sk));
602 }
603 }
604 retval = sctp_send_asconf(asoc, chunk);
605 }
606
607 out:
608 return retval;
609 }
610
611 /* Remove a list of addresses from bind addresses list. Do not remove the
612 * last address.
613 *
614 * Basically run through each address specified in the addrs/addrcnt
615 * array/length pair, determine if it is IPv6 or IPv4 and call
616 * sctp_del_bind() on it.
617 *
618 * If any of them fails, then the operation will be reversed and the
619 * ones that were removed will be added back.
620 *
621 * At least one address has to be left; if only one address is
622 * available, the operation will return -EBUSY.
623 *
624 * Only sctp_setsockopt_bindx() is supposed to call this function.
625 */
626 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
627 {
628 struct sctp_sock *sp = sctp_sk(sk);
629 struct sctp_endpoint *ep = sp->ep;
630 int cnt;
631 struct sctp_bind_addr *bp = &ep->base.bind_addr;
632 int retval = 0;
633 void *addr_buf;
634 union sctp_addr *sa_addr;
635 struct sctp_af *af;
636
637 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
638 __func__, sk, addrs, addrcnt);
639
640 addr_buf = addrs;
641 for (cnt = 0; cnt < addrcnt; cnt++) {
642 /* If the bind address list is empty or if there is only one
643 * bind address, there is nothing more to be removed (we need
644 * at least one address here).
645 */
646 if (list_empty(&bp->address_list) ||
647 (sctp_list_single_entry(&bp->address_list))) {
648 retval = -EBUSY;
649 goto err_bindx_rem;
650 }
651
652 sa_addr = addr_buf;
653 af = sctp_get_af_specific(sa_addr->sa.sa_family);
654 if (!af) {
655 retval = -EINVAL;
656 goto err_bindx_rem;
657 }
658
659 if (!af->addr_valid(sa_addr, sp, NULL)) {
660 retval = -EADDRNOTAVAIL;
661 goto err_bindx_rem;
662 }
663
664 if (sa_addr->v4.sin_port &&
665 sa_addr->v4.sin_port != htons(bp->port)) {
666 retval = -EINVAL;
667 goto err_bindx_rem;
668 }
669
670 if (!sa_addr->v4.sin_port)
671 sa_addr->v4.sin_port = htons(bp->port);
672
673 /* FIXME - There is probably a need to check if sk->sk_saddr and
674 * sk->sk_rcv_addr are currently set to one of the addresses to
675 * be removed. This is something which needs to be looked into
676 * when we are fixing the outstanding issues with multi-homing
677 * socket routing and failover schemes. Refer to comments in
678 * sctp_do_bind(). -daisy
679 */
680 retval = sctp_del_bind_addr(bp, sa_addr);
681
682 addr_buf += af->sockaddr_len;
683 err_bindx_rem:
684 if (retval < 0) {
685 /* Failed. Add the ones that has been removed back */
686 if (cnt > 0)
687 sctp_bindx_add(sk, addrs, cnt);
688 return retval;
689 }
690 }
691
692 return retval;
693 }
694
695 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
696 * the associations that are part of the endpoint indicating that a list of
697 * local addresses are removed from the endpoint.
698 *
699 * If any of the addresses is already in the bind address list of the
700 * association, we do not send the chunk for that association. But it will not
701 * affect other associations.
702 *
703 * Only sctp_setsockopt_bindx() is supposed to call this function.
704 */
705 static int sctp_send_asconf_del_ip(struct sock *sk,
706 struct sockaddr *addrs,
707 int addrcnt)
708 {
709 struct net *net = sock_net(sk);
710 struct sctp_sock *sp;
711 struct sctp_endpoint *ep;
712 struct sctp_association *asoc;
713 struct sctp_transport *transport;
714 struct sctp_bind_addr *bp;
715 struct sctp_chunk *chunk;
716 union sctp_addr *laddr;
717 void *addr_buf;
718 struct sctp_af *af;
719 struct sctp_sockaddr_entry *saddr;
720 int i;
721 int retval = 0;
722 int stored = 0;
723
724 chunk = NULL;
725 if (!net->sctp.addip_enable)
726 return retval;
727
728 sp = sctp_sk(sk);
729 ep = sp->ep;
730
731 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
732 __func__, sk, addrs, addrcnt);
733
734 list_for_each_entry(asoc, &ep->asocs, asocs) {
735
736 if (!asoc->peer.asconf_capable)
737 continue;
738
739 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
740 continue;
741
742 if (!sctp_state(asoc, ESTABLISHED))
743 continue;
744
745 /* Check if any address in the packed array of addresses is
746 * not present in the bind address list of the association.
747 * If so, do not send the asconf chunk to its peer, but
748 * continue with other associations.
749 */
750 addr_buf = addrs;
751 for (i = 0; i < addrcnt; i++) {
752 laddr = addr_buf;
753 af = sctp_get_af_specific(laddr->v4.sin_family);
754 if (!af) {
755 retval = -EINVAL;
756 goto out;
757 }
758
759 if (!sctp_assoc_lookup_laddr(asoc, laddr))
760 break;
761
762 addr_buf += af->sockaddr_len;
763 }
764 if (i < addrcnt)
765 continue;
766
767 /* Find one address in the association's bind address list
768 * that is not in the packed array of addresses. This is to
769 * make sure that we do not delete all the addresses in the
770 * association.
771 */
772 bp = &asoc->base.bind_addr;
773 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
774 addrcnt, sp);
775 if ((laddr == NULL) && (addrcnt == 1)) {
776 if (asoc->asconf_addr_del_pending)
777 continue;
778 asoc->asconf_addr_del_pending =
779 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
780 if (asoc->asconf_addr_del_pending == NULL) {
781 retval = -ENOMEM;
782 goto out;
783 }
784 asoc->asconf_addr_del_pending->sa.sa_family =
785 addrs->sa_family;
786 asoc->asconf_addr_del_pending->v4.sin_port =
787 htons(bp->port);
788 if (addrs->sa_family == AF_INET) {
789 struct sockaddr_in *sin;
790
791 sin = (struct sockaddr_in *)addrs;
792 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
793 } else if (addrs->sa_family == AF_INET6) {
794 struct sockaddr_in6 *sin6;
795
796 sin6 = (struct sockaddr_in6 *)addrs;
797 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
798 }
799
800 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
801 __func__, asoc, &asoc->asconf_addr_del_pending->sa,
802 asoc->asconf_addr_del_pending);
803
804 asoc->src_out_of_asoc_ok = 1;
805 stored = 1;
806 goto skip_mkasconf;
807 }
808
809 if (laddr == NULL)
810 return -EINVAL;
811
812 /* We do not need RCU protection throughout this loop
813 * because this is done under a socket lock from the
814 * setsockopt call.
815 */
816 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
817 SCTP_PARAM_DEL_IP);
818 if (!chunk) {
819 retval = -ENOMEM;
820 goto out;
821 }
822
823 skip_mkasconf:
824 /* Reset use_as_src flag for the addresses in the bind address
825 * list that are to be deleted.
826 */
827 addr_buf = addrs;
828 for (i = 0; i < addrcnt; i++) {
829 laddr = addr_buf;
830 af = sctp_get_af_specific(laddr->v4.sin_family);
831 list_for_each_entry(saddr, &bp->address_list, list) {
832 if (sctp_cmp_addr_exact(&saddr->a, laddr))
833 saddr->state = SCTP_ADDR_DEL;
834 }
835 addr_buf += af->sockaddr_len;
836 }
837
838 /* Update the route and saddr entries for all the transports
839 * as some of the addresses in the bind address list are
840 * about to be deleted and cannot be used as source addresses.
841 */
842 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
843 transports) {
844 dst_release(transport->dst);
845 sctp_transport_route(transport, NULL,
846 sctp_sk(asoc->base.sk));
847 }
848
849 if (stored)
850 /* We don't need to transmit ASCONF */
851 continue;
852 retval = sctp_send_asconf(asoc, chunk);
853 }
854 out:
855 return retval;
856 }
857
858 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
859 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
860 {
861 struct sock *sk = sctp_opt2sk(sp);
862 union sctp_addr *addr;
863 struct sctp_af *af;
864
865 /* It is safe to write port space in caller. */
866 addr = &addrw->a;
867 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
868 af = sctp_get_af_specific(addr->sa.sa_family);
869 if (!af)
870 return -EINVAL;
871 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
872 return -EINVAL;
873
874 if (addrw->state == SCTP_ADDR_NEW)
875 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
876 else
877 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
878 }
879
880 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
881 *
882 * API 8.1
883 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
884 * int flags);
885 *
886 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
887 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
888 * or IPv6 addresses.
889 *
890 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
891 * Section 3.1.2 for this usage.
892 *
893 * addrs is a pointer to an array of one or more socket addresses. Each
894 * address is contained in its appropriate structure (i.e. struct
895 * sockaddr_in or struct sockaddr_in6) the family of the address type
896 * must be used to distinguish the address length (note that this
897 * representation is termed a "packed array" of addresses). The caller
898 * specifies the number of addresses in the array with addrcnt.
899 *
900 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
901 * -1, and sets errno to the appropriate error code.
902 *
903 * For SCTP, the port given in each socket address must be the same, or
904 * sctp_bindx() will fail, setting errno to EINVAL.
905 *
906 * The flags parameter is formed from the bitwise OR of zero or more of
907 * the following currently defined flags:
908 *
909 * SCTP_BINDX_ADD_ADDR
910 *
911 * SCTP_BINDX_REM_ADDR
912 *
913 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
914 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
915 * addresses from the association. The two flags are mutually exclusive;
916 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
917 * not remove all addresses from an association; sctp_bindx() will
918 * reject such an attempt with EINVAL.
919 *
920 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
921 * additional addresses with an endpoint after calling bind(). Or use
922 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
923 * socket is associated with so that no new association accepted will be
924 * associated with those addresses. If the endpoint supports dynamic
925 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
926 * endpoint to send the appropriate message to the peer to change the
927 * peers address lists.
928 *
929 * Adding and removing addresses from a connected association is
930 * optional functionality. Implementations that do not support this
931 * functionality should return EOPNOTSUPP.
932 *
933 * Basically do nothing but copying the addresses from user to kernel
934 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
935 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
936 * from userspace.
937 *
938 * We don't use copy_from_user() for optimization: we first do the
939 * sanity checks (buffer size -fast- and access check-healthy
940 * pointer); if all of those succeed, then we can alloc the memory
941 * (expensive operation) needed to copy the data to kernel. Then we do
942 * the copying without checking the user space area
943 * (__copy_from_user()).
944 *
945 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
946 * it.
947 *
948 * sk The sk of the socket
949 * addrs The pointer to the addresses in user land
950 * addrssize Size of the addrs buffer
951 * op Operation to perform (add or remove, see the flags of
952 * sctp_bindx)
953 *
954 * Returns 0 if ok, <0 errno code on error.
955 */
956 static int sctp_setsockopt_bindx(struct sock *sk,
957 struct sockaddr __user *addrs,
958 int addrs_size, int op)
959 {
960 struct sockaddr *kaddrs;
961 int err;
962 int addrcnt = 0;
963 int walk_size = 0;
964 struct sockaddr *sa_addr;
965 void *addr_buf;
966 struct sctp_af *af;
967
968 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
969 __func__, sk, addrs, addrs_size, op);
970
971 if (unlikely(addrs_size <= 0))
972 return -EINVAL;
973
974 /* Check the user passed a healthy pointer. */
975 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
976 return -EFAULT;
977
978 /* Alloc space for the address array in kernel memory. */
979 kaddrs = kmalloc(addrs_size, GFP_KERNEL);
980 if (unlikely(!kaddrs))
981 return -ENOMEM;
982
983 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
984 kfree(kaddrs);
985 return -EFAULT;
986 }
987
988 /* Walk through the addrs buffer and count the number of addresses. */
989 addr_buf = kaddrs;
990 while (walk_size < addrs_size) {
991 if (walk_size + sizeof(sa_family_t) > addrs_size) {
992 kfree(kaddrs);
993 return -EINVAL;
994 }
995
996 sa_addr = addr_buf;
997 af = sctp_get_af_specific(sa_addr->sa_family);
998
999 /* If the address family is not supported or if this address
1000 * causes the address buffer to overflow return EINVAL.
1001 */
1002 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1003 kfree(kaddrs);
1004 return -EINVAL;
1005 }
1006 addrcnt++;
1007 addr_buf += af->sockaddr_len;
1008 walk_size += af->sockaddr_len;
1009 }
1010
1011 /* Do the work. */
1012 switch (op) {
1013 case SCTP_BINDX_ADD_ADDR:
1014 err = sctp_bindx_add(sk, kaddrs, addrcnt);
1015 if (err)
1016 goto out;
1017 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
1018 break;
1019
1020 case SCTP_BINDX_REM_ADDR:
1021 err = sctp_bindx_rem(sk, kaddrs, addrcnt);
1022 if (err)
1023 goto out;
1024 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
1025 break;
1026
1027 default:
1028 err = -EINVAL;
1029 break;
1030 }
1031
1032 out:
1033 kfree(kaddrs);
1034
1035 return err;
1036 }
1037
1038 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1039 *
1040 * Common routine for handling connect() and sctp_connectx().
1041 * Connect will come in with just a single address.
1042 */
1043 static int __sctp_connect(struct sock *sk,
1044 struct sockaddr *kaddrs,
1045 int addrs_size,
1046 sctp_assoc_t *assoc_id)
1047 {
1048 struct net *net = sock_net(sk);
1049 struct sctp_sock *sp;
1050 struct sctp_endpoint *ep;
1051 struct sctp_association *asoc = NULL;
1052 struct sctp_association *asoc2;
1053 struct sctp_transport *transport;
1054 union sctp_addr to;
1055 struct sctp_af *af;
1056 sctp_scope_t scope;
1057 long timeo;
1058 int err = 0;
1059 int addrcnt = 0;
1060 int walk_size = 0;
1061 union sctp_addr *sa_addr = NULL;
1062 void *addr_buf;
1063 unsigned short port;
1064 unsigned int f_flags = 0;
1065
1066 sp = sctp_sk(sk);
1067 ep = sp->ep;
1068
1069 /* connect() cannot be done on a socket that is already in ESTABLISHED
1070 * state - UDP-style peeled off socket or a TCP-style socket that
1071 * is already connected.
1072 * It cannot be done even on a TCP-style listening socket.
1073 */
1074 if (sctp_sstate(sk, ESTABLISHED) ||
1075 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
1076 err = -EISCONN;
1077 goto out_free;
1078 }
1079
1080 /* Walk through the addrs buffer and count the number of addresses. */
1081 addr_buf = kaddrs;
1082 while (walk_size < addrs_size) {
1083 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1084 err = -EINVAL;
1085 goto out_free;
1086 }
1087
1088 sa_addr = addr_buf;
1089 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1090
1091 /* If the address family is not supported or if this address
1092 * causes the address buffer to overflow return EINVAL.
1093 */
1094 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1095 err = -EINVAL;
1096 goto out_free;
1097 }
1098
1099 port = ntohs(sa_addr->v4.sin_port);
1100
1101 /* Save current address so we can work with it */
1102 memcpy(&to, sa_addr, af->sockaddr_len);
1103
1104 err = sctp_verify_addr(sk, &to, af->sockaddr_len);
1105 if (err)
1106 goto out_free;
1107
1108 /* Make sure the destination port is correctly set
1109 * in all addresses.
1110 */
1111 if (asoc && asoc->peer.port && asoc->peer.port != port) {
1112 err = -EINVAL;
1113 goto out_free;
1114 }
1115
1116 /* Check if there already is a matching association on the
1117 * endpoint (other than the one created here).
1118 */
1119 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1120 if (asoc2 && asoc2 != asoc) {
1121 if (asoc2->state >= SCTP_STATE_ESTABLISHED)
1122 err = -EISCONN;
1123 else
1124 err = -EALREADY;
1125 goto out_free;
1126 }
1127
1128 /* If we could not find a matching association on the endpoint,
1129 * make sure that there is no peeled-off association matching
1130 * the peer address even on another socket.
1131 */
1132 if (sctp_endpoint_is_peeled_off(ep, &to)) {
1133 err = -EADDRNOTAVAIL;
1134 goto out_free;
1135 }
1136
1137 if (!asoc) {
1138 /* If a bind() or sctp_bindx() is not called prior to
1139 * an sctp_connectx() call, the system picks an
1140 * ephemeral port and will choose an address set
1141 * equivalent to binding with a wildcard address.
1142 */
1143 if (!ep->base.bind_addr.port) {
1144 if (sctp_autobind(sk)) {
1145 err = -EAGAIN;
1146 goto out_free;
1147 }
1148 } else {
1149 /*
1150 * If an unprivileged user inherits a 1-many
1151 * style socket with open associations on a
1152 * privileged port, it MAY be permitted to
1153 * accept new associations, but it SHOULD NOT
1154 * be permitted to open new associations.
1155 */
1156 if (ep->base.bind_addr.port < PROT_SOCK &&
1157 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1158 err = -EACCES;
1159 goto out_free;
1160 }
1161 }
1162
1163 scope = sctp_scope(&to);
1164 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1165 if (!asoc) {
1166 err = -ENOMEM;
1167 goto out_free;
1168 }
1169
1170 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1171 GFP_KERNEL);
1172 if (err < 0) {
1173 goto out_free;
1174 }
1175
1176 }
1177
1178 /* Prime the peer's transport structures. */
1179 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
1180 SCTP_UNKNOWN);
1181 if (!transport) {
1182 err = -ENOMEM;
1183 goto out_free;
1184 }
1185
1186 addrcnt++;
1187 addr_buf += af->sockaddr_len;
1188 walk_size += af->sockaddr_len;
1189 }
1190
1191 /* In case the user of sctp_connectx() wants an association
1192 * id back, assign one now.
1193 */
1194 if (assoc_id) {
1195 err = sctp_assoc_set_id(asoc, GFP_KERNEL);
1196 if (err < 0)
1197 goto out_free;
1198 }
1199
1200 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1201 if (err < 0) {
1202 goto out_free;
1203 }
1204
1205 /* Initialize sk's dport and daddr for getpeername() */
1206 inet_sk(sk)->inet_dport = htons(asoc->peer.port);
1207 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1208 af->to_sk_daddr(sa_addr, sk);
1209 sk->sk_err = 0;
1210
1211 /* in-kernel sockets don't generally have a file allocated to them
1212 * if all they do is call sock_create_kern().
1213 */
1214 if (sk->sk_socket->file)
1215 f_flags = sk->sk_socket->file->f_flags;
1216
1217 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1218
1219 err = sctp_wait_for_connect(asoc, &timeo);
1220 if ((err == 0 || err == -EINPROGRESS) && assoc_id)
1221 *assoc_id = asoc->assoc_id;
1222
1223 /* Don't free association on exit. */
1224 asoc = NULL;
1225
1226 out_free:
1227 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1228 __func__, asoc, kaddrs, err);
1229
1230 if (asoc) {
1231 /* sctp_primitive_ASSOCIATE may have added this association
1232 * To the hash table, try to unhash it, just in case, its a noop
1233 * if it wasn't hashed so we're safe
1234 */
1235 sctp_unhash_established(asoc);
1236 sctp_association_free(asoc);
1237 }
1238 return err;
1239 }
1240
1241 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1242 *
1243 * API 8.9
1244 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1245 * sctp_assoc_t *asoc);
1246 *
1247 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1248 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1249 * or IPv6 addresses.
1250 *
1251 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1252 * Section 3.1.2 for this usage.
1253 *
1254 * addrs is a pointer to an array of one or more socket addresses. Each
1255 * address is contained in its appropriate structure (i.e. struct
1256 * sockaddr_in or struct sockaddr_in6) the family of the address type
1257 * must be used to distengish the address length (note that this
1258 * representation is termed a "packed array" of addresses). The caller
1259 * specifies the number of addresses in the array with addrcnt.
1260 *
1261 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1262 * the association id of the new association. On failure, sctp_connectx()
1263 * returns -1, and sets errno to the appropriate error code. The assoc_id
1264 * is not touched by the kernel.
1265 *
1266 * For SCTP, the port given in each socket address must be the same, or
1267 * sctp_connectx() will fail, setting errno to EINVAL.
1268 *
1269 * An application can use sctp_connectx to initiate an association with
1270 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1271 * allows a caller to specify multiple addresses at which a peer can be
1272 * reached. The way the SCTP stack uses the list of addresses to set up
1273 * the association is implementation dependent. This function only
1274 * specifies that the stack will try to make use of all the addresses in
1275 * the list when needed.
1276 *
1277 * Note that the list of addresses passed in is only used for setting up
1278 * the association. It does not necessarily equal the set of addresses
1279 * the peer uses for the resulting association. If the caller wants to
1280 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1281 * retrieve them after the association has been set up.
1282 *
1283 * Basically do nothing but copying the addresses from user to kernel
1284 * land and invoking either sctp_connectx(). This is used for tunneling
1285 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1286 *
1287 * We don't use copy_from_user() for optimization: we first do the
1288 * sanity checks (buffer size -fast- and access check-healthy
1289 * pointer); if all of those succeed, then we can alloc the memory
1290 * (expensive operation) needed to copy the data to kernel. Then we do
1291 * the copying without checking the user space area
1292 * (__copy_from_user()).
1293 *
1294 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1295 * it.
1296 *
1297 * sk The sk of the socket
1298 * addrs The pointer to the addresses in user land
1299 * addrssize Size of the addrs buffer
1300 *
1301 * Returns >=0 if ok, <0 errno code on error.
1302 */
1303 static int __sctp_setsockopt_connectx(struct sock *sk,
1304 struct sockaddr __user *addrs,
1305 int addrs_size,
1306 sctp_assoc_t *assoc_id)
1307 {
1308 int err = 0;
1309 struct sockaddr *kaddrs;
1310
1311 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1312 __func__, sk, addrs, addrs_size);
1313
1314 if (unlikely(addrs_size <= 0))
1315 return -EINVAL;
1316
1317 /* Check the user passed a healthy pointer. */
1318 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
1319 return -EFAULT;
1320
1321 /* Alloc space for the address array in kernel memory. */
1322 kaddrs = kmalloc(addrs_size, GFP_KERNEL);
1323 if (unlikely(!kaddrs))
1324 return -ENOMEM;
1325
1326 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1327 err = -EFAULT;
1328 } else {
1329 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1330 }
1331
1332 kfree(kaddrs);
1333
1334 return err;
1335 }
1336
1337 /*
1338 * This is an older interface. It's kept for backward compatibility
1339 * to the option that doesn't provide association id.
1340 */
1341 static int sctp_setsockopt_connectx_old(struct sock *sk,
1342 struct sockaddr __user *addrs,
1343 int addrs_size)
1344 {
1345 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1346 }
1347
1348 /*
1349 * New interface for the API. The since the API is done with a socket
1350 * option, to make it simple we feed back the association id is as a return
1351 * indication to the call. Error is always negative and association id is
1352 * always positive.
1353 */
1354 static int sctp_setsockopt_connectx(struct sock *sk,
1355 struct sockaddr __user *addrs,
1356 int addrs_size)
1357 {
1358 sctp_assoc_t assoc_id = 0;
1359 int err = 0;
1360
1361 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1362
1363 if (err)
1364 return err;
1365 else
1366 return assoc_id;
1367 }
1368
1369 /*
1370 * New (hopefully final) interface for the API.
1371 * We use the sctp_getaddrs_old structure so that use-space library
1372 * can avoid any unnecessary allocations. The only different part
1373 * is that we store the actual length of the address buffer into the
1374 * addrs_num structure member. That way we can re-use the existing
1375 * code.
1376 */
1377 #ifdef CONFIG_COMPAT
1378 struct compat_sctp_getaddrs_old {
1379 sctp_assoc_t assoc_id;
1380 s32 addr_num;
1381 compat_uptr_t addrs; /* struct sockaddr * */
1382 };
1383 #endif
1384
1385 static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1386 char __user *optval,
1387 int __user *optlen)
1388 {
1389 struct sctp_getaddrs_old param;
1390 sctp_assoc_t assoc_id = 0;
1391 int err = 0;
1392
1393 #ifdef CONFIG_COMPAT
1394 if (is_compat_task()) {
1395 struct compat_sctp_getaddrs_old param32;
1396
1397 if (len < sizeof(param32))
1398 return -EINVAL;
1399 if (copy_from_user(&param32, optval, sizeof(param32)))
1400 return -EFAULT;
1401
1402 param.assoc_id = param32.assoc_id;
1403 param.addr_num = param32.addr_num;
1404 param.addrs = compat_ptr(param32.addrs);
1405 } else
1406 #endif
1407 {
1408 if (len < sizeof(param))
1409 return -EINVAL;
1410 if (copy_from_user(&param, optval, sizeof(param)))
1411 return -EFAULT;
1412 }
1413
1414 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1415 param.addrs, param.addr_num,
1416 &assoc_id);
1417 if (err == 0 || err == -EINPROGRESS) {
1418 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1419 return -EFAULT;
1420 if (put_user(sizeof(assoc_id), optlen))
1421 return -EFAULT;
1422 }
1423
1424 return err;
1425 }
1426
1427 /* API 3.1.4 close() - UDP Style Syntax
1428 * Applications use close() to perform graceful shutdown (as described in
1429 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1430 * by a UDP-style socket.
1431 *
1432 * The syntax is
1433 *
1434 * ret = close(int sd);
1435 *
1436 * sd - the socket descriptor of the associations to be closed.
1437 *
1438 * To gracefully shutdown a specific association represented by the
1439 * UDP-style socket, an application should use the sendmsg() call,
1440 * passing no user data, but including the appropriate flag in the
1441 * ancillary data (see Section xxxx).
1442 *
1443 * If sd in the close() call is a branched-off socket representing only
1444 * one association, the shutdown is performed on that association only.
1445 *
1446 * 4.1.6 close() - TCP Style Syntax
1447 *
1448 * Applications use close() to gracefully close down an association.
1449 *
1450 * The syntax is:
1451 *
1452 * int close(int sd);
1453 *
1454 * sd - the socket descriptor of the association to be closed.
1455 *
1456 * After an application calls close() on a socket descriptor, no further
1457 * socket operations will succeed on that descriptor.
1458 *
1459 * API 7.1.4 SO_LINGER
1460 *
1461 * An application using the TCP-style socket can use this option to
1462 * perform the SCTP ABORT primitive. The linger option structure is:
1463 *
1464 * struct linger {
1465 * int l_onoff; // option on/off
1466 * int l_linger; // linger time
1467 * };
1468 *
1469 * To enable the option, set l_onoff to 1. If the l_linger value is set
1470 * to 0, calling close() is the same as the ABORT primitive. If the
1471 * value is set to a negative value, the setsockopt() call will return
1472 * an error. If the value is set to a positive value linger_time, the
1473 * close() can be blocked for at most linger_time ms. If the graceful
1474 * shutdown phase does not finish during this period, close() will
1475 * return but the graceful shutdown phase continues in the system.
1476 */
1477 static void sctp_close(struct sock *sk, long timeout)
1478 {
1479 struct net *net = sock_net(sk);
1480 struct sctp_endpoint *ep;
1481 struct sctp_association *asoc;
1482 struct list_head *pos, *temp;
1483 unsigned int data_was_unread;
1484
1485 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
1486
1487 lock_sock(sk);
1488 sk->sk_shutdown = SHUTDOWN_MASK;
1489 sk->sk_state = SCTP_SS_CLOSING;
1490
1491 ep = sctp_sk(sk)->ep;
1492
1493 /* Clean up any skbs sitting on the receive queue. */
1494 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1495 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1496
1497 /* Walk all associations on an endpoint. */
1498 list_for_each_safe(pos, temp, &ep->asocs) {
1499 asoc = list_entry(pos, struct sctp_association, asocs);
1500
1501 if (sctp_style(sk, TCP)) {
1502 /* A closed association can still be in the list if
1503 * it belongs to a TCP-style listening socket that is
1504 * not yet accepted. If so, free it. If not, send an
1505 * ABORT or SHUTDOWN based on the linger options.
1506 */
1507 if (sctp_state(asoc, CLOSED)) {
1508 sctp_unhash_established(asoc);
1509 sctp_association_free(asoc);
1510 continue;
1511 }
1512 }
1513
1514 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1515 !skb_queue_empty(&asoc->ulpq.reasm) ||
1516 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1517 struct sctp_chunk *chunk;
1518
1519 chunk = sctp_make_abort_user(asoc, NULL, 0);
1520 if (chunk)
1521 sctp_primitive_ABORT(net, asoc, chunk);
1522 } else
1523 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1524 }
1525
1526 /* On a TCP-style socket, block for at most linger_time if set. */
1527 if (sctp_style(sk, TCP) && timeout)
1528 sctp_wait_for_close(sk, timeout);
1529
1530 /* This will run the backlog queue. */
1531 release_sock(sk);
1532
1533 /* Supposedly, no process has access to the socket, but
1534 * the net layers still may.
1535 */
1536 local_bh_disable();
1537 bh_lock_sock(sk);
1538
1539 /* Hold the sock, since sk_common_release() will put sock_put()
1540 * and we have just a little more cleanup.
1541 */
1542 sock_hold(sk);
1543 sk_common_release(sk);
1544
1545 bh_unlock_sock(sk);
1546 local_bh_enable();
1547
1548 sock_put(sk);
1549
1550 SCTP_DBG_OBJCNT_DEC(sock);
1551 }
1552
1553 /* Handle EPIPE error. */
1554 static int sctp_error(struct sock *sk, int flags, int err)
1555 {
1556 if (err == -EPIPE)
1557 err = sock_error(sk) ? : -EPIPE;
1558 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1559 send_sig(SIGPIPE, current, 0);
1560 return err;
1561 }
1562
1563 /* API 3.1.3 sendmsg() - UDP Style Syntax
1564 *
1565 * An application uses sendmsg() and recvmsg() calls to transmit data to
1566 * and receive data from its peer.
1567 *
1568 * ssize_t sendmsg(int socket, const struct msghdr *message,
1569 * int flags);
1570 *
1571 * socket - the socket descriptor of the endpoint.
1572 * message - pointer to the msghdr structure which contains a single
1573 * user message and possibly some ancillary data.
1574 *
1575 * See Section 5 for complete description of the data
1576 * structures.
1577 *
1578 * flags - flags sent or received with the user message, see Section
1579 * 5 for complete description of the flags.
1580 *
1581 * Note: This function could use a rewrite especially when explicit
1582 * connect support comes in.
1583 */
1584 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1585
1586 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
1587
1588 static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1589 struct msghdr *msg, size_t msg_len)
1590 {
1591 struct net *net = sock_net(sk);
1592 struct sctp_sock *sp;
1593 struct sctp_endpoint *ep;
1594 struct sctp_association *new_asoc = NULL, *asoc = NULL;
1595 struct sctp_transport *transport, *chunk_tp;
1596 struct sctp_chunk *chunk;
1597 union sctp_addr to;
1598 struct sockaddr *msg_name = NULL;
1599 struct sctp_sndrcvinfo default_sinfo;
1600 struct sctp_sndrcvinfo *sinfo;
1601 struct sctp_initmsg *sinit;
1602 sctp_assoc_t associd = 0;
1603 sctp_cmsgs_t cmsgs = { NULL };
1604 int err;
1605 sctp_scope_t scope;
1606 long timeo;
1607 __u16 sinfo_flags = 0;
1608 struct sctp_datamsg *datamsg;
1609 int msg_flags = msg->msg_flags;
1610
1611 err = 0;
1612 sp = sctp_sk(sk);
1613 ep = sp->ep;
1614
1615 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk,
1616 msg, msg_len, ep);
1617
1618 /* We cannot send a message over a TCP-style listening socket. */
1619 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
1620 err = -EPIPE;
1621 goto out_nounlock;
1622 }
1623
1624 /* Parse out the SCTP CMSGs. */
1625 err = sctp_msghdr_parse(msg, &cmsgs);
1626 if (err) {
1627 pr_debug("%s: msghdr parse err:%x\n", __func__, err);
1628 goto out_nounlock;
1629 }
1630
1631 /* Fetch the destination address for this packet. This
1632 * address only selects the association--it is not necessarily
1633 * the address we will send to.
1634 * For a peeled-off socket, msg_name is ignored.
1635 */
1636 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
1637 int msg_namelen = msg->msg_namelen;
1638
1639 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
1640 msg_namelen);
1641 if (err)
1642 return err;
1643
1644 if (msg_namelen > sizeof(to))
1645 msg_namelen = sizeof(to);
1646 memcpy(&to, msg->msg_name, msg_namelen);
1647 msg_name = msg->msg_name;
1648 }
1649
1650 sinfo = cmsgs.info;
1651 sinit = cmsgs.init;
1652
1653 /* Did the user specify SNDRCVINFO? */
1654 if (sinfo) {
1655 sinfo_flags = sinfo->sinfo_flags;
1656 associd = sinfo->sinfo_assoc_id;
1657 }
1658
1659 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__,
1660 msg_len, sinfo_flags);
1661
1662 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
1663 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
1664 err = -EINVAL;
1665 goto out_nounlock;
1666 }
1667
1668 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero
1669 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1670 * If SCTP_ABORT is set, the message length could be non zero with
1671 * the msg_iov set to the user abort reason.
1672 */
1673 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
1674 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
1675 err = -EINVAL;
1676 goto out_nounlock;
1677 }
1678
1679 /* If SCTP_ADDR_OVER is set, there must be an address
1680 * specified in msg_name.
1681 */
1682 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) {
1683 err = -EINVAL;
1684 goto out_nounlock;
1685 }
1686
1687 transport = NULL;
1688
1689 pr_debug("%s: about to look up association\n", __func__);
1690
1691 lock_sock(sk);
1692
1693 /* If a msg_name has been specified, assume this is to be used. */
1694 if (msg_name) {
1695 /* Look for a matching association on the endpoint. */
1696 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1697 if (!asoc) {
1698 /* If we could not find a matching association on the
1699 * endpoint, make sure that it is not a TCP-style
1700 * socket that already has an association or there is
1701 * no peeled-off association on another socket.
1702 */
1703 if ((sctp_style(sk, TCP) &&
1704 sctp_sstate(sk, ESTABLISHED)) ||
1705 sctp_endpoint_is_peeled_off(ep, &to)) {
1706 err = -EADDRNOTAVAIL;
1707 goto out_unlock;
1708 }
1709 }
1710 } else {
1711 asoc = sctp_id2assoc(sk, associd);
1712 if (!asoc) {
1713 err = -EPIPE;
1714 goto out_unlock;
1715 }
1716 }
1717
1718 if (asoc) {
1719 pr_debug("%s: just looked up association:%p\n", __func__, asoc);
1720
1721 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
1722 * socket that has an association in CLOSED state. This can
1723 * happen when an accepted socket has an association that is
1724 * already CLOSED.
1725 */
1726 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) {
1727 err = -EPIPE;
1728 goto out_unlock;
1729 }
1730
1731 if (sinfo_flags & SCTP_EOF) {
1732 pr_debug("%s: shutting down association:%p\n",
1733 __func__, asoc);
1734
1735 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1736 err = 0;
1737 goto out_unlock;
1738 }
1739 if (sinfo_flags & SCTP_ABORT) {
1740
1741 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1742 if (!chunk) {
1743 err = -ENOMEM;
1744 goto out_unlock;
1745 }
1746
1747 pr_debug("%s: aborting association:%p\n",
1748 __func__, asoc);
1749
1750 sctp_primitive_ABORT(net, asoc, chunk);
1751 err = 0;
1752 goto out_unlock;
1753 }
1754 }
1755
1756 /* Do we need to create the association? */
1757 if (!asoc) {
1758 pr_debug("%s: there is no association yet\n", __func__);
1759
1760 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
1761 err = -EINVAL;
1762 goto out_unlock;
1763 }
1764
1765 /* Check for invalid stream against the stream counts,
1766 * either the default or the user specified stream counts.
1767 */
1768 if (sinfo) {
1769 if (!sinit || !sinit->sinit_num_ostreams) {
1770 /* Check against the defaults. */
1771 if (sinfo->sinfo_stream >=
1772 sp->initmsg.sinit_num_ostreams) {
1773 err = -EINVAL;
1774 goto out_unlock;
1775 }
1776 } else {
1777 /* Check against the requested. */
1778 if (sinfo->sinfo_stream >=
1779 sinit->sinit_num_ostreams) {
1780 err = -EINVAL;
1781 goto out_unlock;
1782 }
1783 }
1784 }
1785
1786 /*
1787 * API 3.1.2 bind() - UDP Style Syntax
1788 * If a bind() or sctp_bindx() is not called prior to a
1789 * sendmsg() call that initiates a new association, the
1790 * system picks an ephemeral port and will choose an address
1791 * set equivalent to binding with a wildcard address.
1792 */
1793 if (!ep->base.bind_addr.port) {
1794 if (sctp_autobind(sk)) {
1795 err = -EAGAIN;
1796 goto out_unlock;
1797 }
1798 } else {
1799 /*
1800 * If an unprivileged user inherits a one-to-many
1801 * style socket with open associations on a privileged
1802 * port, it MAY be permitted to accept new associations,
1803 * but it SHOULD NOT be permitted to open new
1804 * associations.
1805 */
1806 if (ep->base.bind_addr.port < PROT_SOCK &&
1807 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1808 err = -EACCES;
1809 goto out_unlock;
1810 }
1811 }
1812
1813 scope = sctp_scope(&to);
1814 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1815 if (!new_asoc) {
1816 err = -ENOMEM;
1817 goto out_unlock;
1818 }
1819 asoc = new_asoc;
1820 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1821 if (err < 0) {
1822 err = -ENOMEM;
1823 goto out_free;
1824 }
1825
1826 /* If the SCTP_INIT ancillary data is specified, set all
1827 * the association init values accordingly.
1828 */
1829 if (sinit) {
1830 if (sinit->sinit_num_ostreams) {
1831 asoc->c.sinit_num_ostreams =
1832 sinit->sinit_num_ostreams;
1833 }
1834 if (sinit->sinit_max_instreams) {
1835 asoc->c.sinit_max_instreams =
1836 sinit->sinit_max_instreams;
1837 }
1838 if (sinit->sinit_max_attempts) {
1839 asoc->max_init_attempts
1840 = sinit->sinit_max_attempts;
1841 }
1842 if (sinit->sinit_max_init_timeo) {
1843 asoc->max_init_timeo =
1844 msecs_to_jiffies(sinit->sinit_max_init_timeo);
1845 }
1846 }
1847
1848 /* Prime the peer's transport structures. */
1849 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN);
1850 if (!transport) {
1851 err = -ENOMEM;
1852 goto out_free;
1853 }
1854 }
1855
1856 /* ASSERT: we have a valid association at this point. */
1857 pr_debug("%s: we have a valid association\n", __func__);
1858
1859 if (!sinfo) {
1860 /* If the user didn't specify SNDRCVINFO, make up one with
1861 * some defaults.
1862 */
1863 memset(&default_sinfo, 0, sizeof(default_sinfo));
1864 default_sinfo.sinfo_stream = asoc->default_stream;
1865 default_sinfo.sinfo_flags = asoc->default_flags;
1866 default_sinfo.sinfo_ppid = asoc->default_ppid;
1867 default_sinfo.sinfo_context = asoc->default_context;
1868 default_sinfo.sinfo_timetolive = asoc->default_timetolive;
1869 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
1870 sinfo = &default_sinfo;
1871 }
1872
1873 /* API 7.1.7, the sndbuf size per association bounds the
1874 * maximum size of data that can be sent in a single send call.
1875 */
1876 if (msg_len > sk->sk_sndbuf) {
1877 err = -EMSGSIZE;
1878 goto out_free;
1879 }
1880
1881 if (asoc->pmtu_pending)
1882 sctp_assoc_pending_pmtu(sk, asoc);
1883
1884 /* If fragmentation is disabled and the message length exceeds the
1885 * association fragmentation point, return EMSGSIZE. The I-D
1886 * does not specify what this error is, but this looks like
1887 * a great fit.
1888 */
1889 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) {
1890 err = -EMSGSIZE;
1891 goto out_free;
1892 }
1893
1894 /* Check for invalid stream. */
1895 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
1896 err = -EINVAL;
1897 goto out_free;
1898 }
1899
1900 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1901 if (!sctp_wspace(asoc)) {
1902 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1903 if (err)
1904 goto out_free;
1905 }
1906
1907 /* If an address is passed with the sendto/sendmsg call, it is used
1908 * to override the primary destination address in the TCP model, or
1909 * when SCTP_ADDR_OVER flag is set in the UDP model.
1910 */
1911 if ((sctp_style(sk, TCP) && msg_name) ||
1912 (sinfo_flags & SCTP_ADDR_OVER)) {
1913 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to);
1914 if (!chunk_tp) {
1915 err = -EINVAL;
1916 goto out_free;
1917 }
1918 } else
1919 chunk_tp = NULL;
1920
1921 /* Auto-connect, if we aren't connected already. */
1922 if (sctp_state(asoc, CLOSED)) {
1923 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1924 if (err < 0)
1925 goto out_free;
1926
1927 pr_debug("%s: we associated primitively\n", __func__);
1928 }
1929
1930 /* Break the message into multiple chunks of maximum size. */
1931 datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len);
1932 if (IS_ERR(datamsg)) {
1933 err = PTR_ERR(datamsg);
1934 goto out_free;
1935 }
1936
1937 /* Now send the (possibly) fragmented message. */
1938 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1939 sctp_chunk_hold(chunk);
1940
1941 /* Do accounting for the write space. */
1942 sctp_set_owner_w(chunk);
1943
1944 chunk->transport = chunk_tp;
1945 }
1946
1947 /* Send it to the lower layers. Note: all chunks
1948 * must either fail or succeed. The lower layer
1949 * works that way today. Keep it that way or this
1950 * breaks.
1951 */
1952 err = sctp_primitive_SEND(net, asoc, datamsg);
1953 /* Did the lower layer accept the chunk? */
1954 if (err) {
1955 sctp_datamsg_free(datamsg);
1956 goto out_free;
1957 }
1958
1959 pr_debug("%s: we sent primitively\n", __func__);
1960
1961 sctp_datamsg_put(datamsg);
1962 err = msg_len;
1963
1964 /* If we are already past ASSOCIATE, the lower
1965 * layers are responsible for association cleanup.
1966 */
1967 goto out_unlock;
1968
1969 out_free:
1970 if (new_asoc) {
1971 sctp_unhash_established(asoc);
1972 sctp_association_free(asoc);
1973 }
1974 out_unlock:
1975 release_sock(sk);
1976
1977 out_nounlock:
1978 return sctp_error(sk, msg_flags, err);
1979
1980 #if 0
1981 do_sock_err:
1982 if (msg_len)
1983 err = msg_len;
1984 else
1985 err = sock_error(sk);
1986 goto out;
1987
1988 do_interrupted:
1989 if (msg_len)
1990 err = msg_len;
1991 goto out;
1992 #endif /* 0 */
1993 }
1994
1995 /* This is an extended version of skb_pull() that removes the data from the
1996 * start of a skb even when data is spread across the list of skb's in the
1997 * frag_list. len specifies the total amount of data that needs to be removed.
1998 * when 'len' bytes could be removed from the skb, it returns 0.
1999 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2000 * could not be removed.
2001 */
2002 static int sctp_skb_pull(struct sk_buff *skb, int len)
2003 {
2004 struct sk_buff *list;
2005 int skb_len = skb_headlen(skb);
2006 int rlen;
2007
2008 if (len <= skb_len) {
2009 __skb_pull(skb, len);
2010 return 0;
2011 }
2012 len -= skb_len;
2013 __skb_pull(skb, skb_len);
2014
2015 skb_walk_frags(skb, list) {
2016 rlen = sctp_skb_pull(list, len);
2017 skb->len -= (len-rlen);
2018 skb->data_len -= (len-rlen);
2019
2020 if (!rlen)
2021 return 0;
2022
2023 len = rlen;
2024 }
2025
2026 return len;
2027 }
2028
2029 /* API 3.1.3 recvmsg() - UDP Style Syntax
2030 *
2031 * ssize_t recvmsg(int socket, struct msghdr *message,
2032 * int flags);
2033 *
2034 * socket - the socket descriptor of the endpoint.
2035 * message - pointer to the msghdr structure which contains a single
2036 * user message and possibly some ancillary data.
2037 *
2038 * See Section 5 for complete description of the data
2039 * structures.
2040 *
2041 * flags - flags sent or received with the user message, see Section
2042 * 5 for complete description of the flags.
2043 */
2044 static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
2045
2046 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
2047 struct msghdr *msg, size_t len, int noblock,
2048 int flags, int *addr_len)
2049 {
2050 struct sctp_ulpevent *event = NULL;
2051 struct sctp_sock *sp = sctp_sk(sk);
2052 struct sk_buff *skb;
2053 int copied;
2054 int err = 0;
2055 int skb_len;
2056
2057 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2058 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
2059 addr_len);
2060
2061 lock_sock(sk);
2062
2063 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) {
2064 err = -ENOTCONN;
2065 goto out;
2066 }
2067
2068 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
2069 if (!skb)
2070 goto out;
2071
2072 /* Get the total length of the skb including any skb's in the
2073 * frag_list.
2074 */
2075 skb_len = skb->len;
2076
2077 copied = skb_len;
2078 if (copied > len)
2079 copied = len;
2080
2081 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2082
2083 event = sctp_skb2event(skb);
2084
2085 if (err)
2086 goto out_free;
2087
2088 sock_recv_ts_and_drops(msg, sk, skb);
2089 if (sctp_ulpevent_is_notification(event)) {
2090 msg->msg_flags |= MSG_NOTIFICATION;
2091 sp->pf->event_msgname(event, msg->msg_name, addr_len);
2092 } else {
2093 sp->pf->skb_msgname(skb, msg->msg_name, addr_len);
2094 }
2095
2096 /* Check if we allow SCTP_SNDRCVINFO. */
2097 if (sp->subscribe.sctp_data_io_event)
2098 sctp_ulpevent_read_sndrcvinfo(event, msg);
2099 #if 0
2100 /* FIXME: we should be calling IP/IPv6 layers. */
2101 if (sk->sk_protinfo.af_inet.cmsg_flags)
2102 ip_cmsg_recv(msg, skb);
2103 #endif
2104
2105 err = copied;
2106
2107 /* If skb's length exceeds the user's buffer, update the skb and
2108 * push it back to the receive_queue so that the next call to
2109 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2110 */
2111 if (skb_len > copied) {
2112 msg->msg_flags &= ~MSG_EOR;
2113 if (flags & MSG_PEEK)
2114 goto out_free;
2115 sctp_skb_pull(skb, copied);
2116 skb_queue_head(&sk->sk_receive_queue, skb);
2117
2118 goto out;
2119 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2120 (event->msg_flags & MSG_EOR))
2121 msg->msg_flags |= MSG_EOR;
2122 else
2123 msg->msg_flags &= ~MSG_EOR;
2124
2125 out_free:
2126 if (flags & MSG_PEEK) {
2127 /* Release the skb reference acquired after peeking the skb in
2128 * sctp_skb_recv_datagram().
2129 */
2130 kfree_skb(skb);
2131 } else {
2132 /* Free the event which includes releasing the reference to
2133 * the owner of the skb, freeing the skb and updating the
2134 * rwnd.
2135 */
2136 sctp_ulpevent_free(event);
2137 }
2138 out:
2139 release_sock(sk);
2140 return err;
2141 }
2142
2143 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2144 *
2145 * This option is a on/off flag. If enabled no SCTP message
2146 * fragmentation will be performed. Instead if a message being sent
2147 * exceeds the current PMTU size, the message will NOT be sent and
2148 * instead a error will be indicated to the user.
2149 */
2150 static int sctp_setsockopt_disable_fragments(struct sock *sk,
2151 char __user *optval,
2152 unsigned int optlen)
2153 {
2154 int val;
2155
2156 if (optlen < sizeof(int))
2157 return -EINVAL;
2158
2159 if (get_user(val, (int __user *)optval))
2160 return -EFAULT;
2161
2162 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
2163
2164 return 0;
2165 }
2166
2167 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2168 unsigned int optlen)
2169 {
2170 struct sctp_association *asoc;
2171 struct sctp_ulpevent *event;
2172
2173 if (optlen > sizeof(struct sctp_event_subscribe))
2174 return -EINVAL;
2175 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2176 return -EFAULT;
2177
2178 /*
2179 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2180 * if there is no data to be sent or retransmit, the stack will
2181 * immediately send up this notification.
2182 */
2183 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2184 &sctp_sk(sk)->subscribe)) {
2185 asoc = sctp_id2assoc(sk, 0);
2186
2187 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2188 event = sctp_ulpevent_make_sender_dry_event(asoc,
2189 GFP_ATOMIC);
2190 if (!event)
2191 return -ENOMEM;
2192
2193 sctp_ulpq_tail_event(&asoc->ulpq, event);
2194 }
2195 }
2196
2197 return 0;
2198 }
2199
2200 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2201 *
2202 * This socket option is applicable to the UDP-style socket only. When
2203 * set it will cause associations that are idle for more than the
2204 * specified number of seconds to automatically close. An association
2205 * being idle is defined an association that has NOT sent or received
2206 * user data. The special value of '0' indicates that no automatic
2207 * close of any associations should be performed. The option expects an
2208 * integer defining the number of seconds of idle time before an
2209 * association is closed.
2210 */
2211 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2212 unsigned int optlen)
2213 {
2214 struct sctp_sock *sp = sctp_sk(sk);
2215 struct net *net = sock_net(sk);
2216
2217 /* Applicable to UDP-style socket only */
2218 if (sctp_style(sk, TCP))
2219 return -EOPNOTSUPP;
2220 if (optlen != sizeof(int))
2221 return -EINVAL;
2222 if (copy_from_user(&sp->autoclose, optval, optlen))
2223 return -EFAULT;
2224
2225 if (sp->autoclose > net->sctp.max_autoclose)
2226 sp->autoclose = net->sctp.max_autoclose;
2227
2228 return 0;
2229 }
2230
2231 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2232 *
2233 * Applications can enable or disable heartbeats for any peer address of
2234 * an association, modify an address's heartbeat interval, force a
2235 * heartbeat to be sent immediately, and adjust the address's maximum
2236 * number of retransmissions sent before an address is considered
2237 * unreachable. The following structure is used to access and modify an
2238 * address's parameters:
2239 *
2240 * struct sctp_paddrparams {
2241 * sctp_assoc_t spp_assoc_id;
2242 * struct sockaddr_storage spp_address;
2243 * uint32_t spp_hbinterval;
2244 * uint16_t spp_pathmaxrxt;
2245 * uint32_t spp_pathmtu;
2246 * uint32_t spp_sackdelay;
2247 * uint32_t spp_flags;
2248 * };
2249 *
2250 * spp_assoc_id - (one-to-many style socket) This is filled in the
2251 * application, and identifies the association for
2252 * this query.
2253 * spp_address - This specifies which address is of interest.
2254 * spp_hbinterval - This contains the value of the heartbeat interval,
2255 * in milliseconds. If a value of zero
2256 * is present in this field then no changes are to
2257 * be made to this parameter.
2258 * spp_pathmaxrxt - This contains the maximum number of
2259 * retransmissions before this address shall be
2260 * considered unreachable. If a value of zero
2261 * is present in this field then no changes are to
2262 * be made to this parameter.
2263 * spp_pathmtu - When Path MTU discovery is disabled the value
2264 * specified here will be the "fixed" path mtu.
2265 * Note that if the spp_address field is empty
2266 * then all associations on this address will
2267 * have this fixed path mtu set upon them.
2268 *
2269 * spp_sackdelay - When delayed sack is enabled, this value specifies
2270 * the number of milliseconds that sacks will be delayed
2271 * for. This value will apply to all addresses of an
2272 * association if the spp_address field is empty. Note
2273 * also, that if delayed sack is enabled and this
2274 * value is set to 0, no change is made to the last
2275 * recorded delayed sack timer value.
2276 *
2277 * spp_flags - These flags are used to control various features
2278 * on an association. The flag field may contain
2279 * zero or more of the following options.
2280 *
2281 * SPP_HB_ENABLE - Enable heartbeats on the
2282 * specified address. Note that if the address
2283 * field is empty all addresses for the association
2284 * have heartbeats enabled upon them.
2285 *
2286 * SPP_HB_DISABLE - Disable heartbeats on the
2287 * speicifed address. Note that if the address
2288 * field is empty all addresses for the association
2289 * will have their heartbeats disabled. Note also
2290 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2291 * mutually exclusive, only one of these two should
2292 * be specified. Enabling both fields will have
2293 * undetermined results.
2294 *
2295 * SPP_HB_DEMAND - Request a user initiated heartbeat
2296 * to be made immediately.
2297 *
2298 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2299 * heartbeat delayis to be set to the value of 0
2300 * milliseconds.
2301 *
2302 * SPP_PMTUD_ENABLE - This field will enable PMTU
2303 * discovery upon the specified address. Note that
2304 * if the address feild is empty then all addresses
2305 * on the association are effected.
2306 *
2307 * SPP_PMTUD_DISABLE - This field will disable PMTU
2308 * discovery upon the specified address. Note that
2309 * if the address feild is empty then all addresses
2310 * on the association are effected. Not also that
2311 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2312 * exclusive. Enabling both will have undetermined
2313 * results.
2314 *
2315 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2316 * on delayed sack. The time specified in spp_sackdelay
2317 * is used to specify the sack delay for this address. Note
2318 * that if spp_address is empty then all addresses will
2319 * enable delayed sack and take on the sack delay
2320 * value specified in spp_sackdelay.
2321 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2322 * off delayed sack. If the spp_address field is blank then
2323 * delayed sack is disabled for the entire association. Note
2324 * also that this field is mutually exclusive to
2325 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2326 * results.
2327 */
2328 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2329 struct sctp_transport *trans,
2330 struct sctp_association *asoc,
2331 struct sctp_sock *sp,
2332 int hb_change,
2333 int pmtud_change,
2334 int sackdelay_change)
2335 {
2336 int error;
2337
2338 if (params->spp_flags & SPP_HB_DEMAND && trans) {
2339 struct net *net = sock_net(trans->asoc->base.sk);
2340
2341 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
2342 if (error)
2343 return error;
2344 }
2345
2346 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2347 * this field is ignored. Note also that a value of zero indicates
2348 * the current setting should be left unchanged.
2349 */
2350 if (params->spp_flags & SPP_HB_ENABLE) {
2351
2352 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2353 * set. This lets us use 0 value when this flag
2354 * is set.
2355 */
2356 if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
2357 params->spp_hbinterval = 0;
2358
2359 if (params->spp_hbinterval ||
2360 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
2361 if (trans) {
2362 trans->hbinterval =
2363 msecs_to_jiffies(params->spp_hbinterval);
2364 } else if (asoc) {
2365 asoc->hbinterval =
2366 msecs_to_jiffies(params->spp_hbinterval);
2367 } else {
2368 sp->hbinterval = params->spp_hbinterval;
2369 }
2370 }
2371 }
2372
2373 if (hb_change) {
2374 if (trans) {
2375 trans->param_flags =
2376 (trans->param_flags & ~SPP_HB) | hb_change;
2377 } else if (asoc) {
2378 asoc->param_flags =
2379 (asoc->param_flags & ~SPP_HB) | hb_change;
2380 } else {
2381 sp->param_flags =
2382 (sp->param_flags & ~SPP_HB) | hb_change;
2383 }
2384 }
2385
2386 /* When Path MTU discovery is disabled the value specified here will
2387 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2388 * include the flag SPP_PMTUD_DISABLE for this field to have any
2389 * effect).
2390 */
2391 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2392 if (trans) {
2393 trans->pathmtu = params->spp_pathmtu;
2394 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2395 } else if (asoc) {
2396 asoc->pathmtu = params->spp_pathmtu;
2397 sctp_frag_point(asoc, params->spp_pathmtu);
2398 } else {
2399 sp->pathmtu = params->spp_pathmtu;
2400 }
2401 }
2402
2403 if (pmtud_change) {
2404 if (trans) {
2405 int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
2406 (params->spp_flags & SPP_PMTUD_ENABLE);
2407 trans->param_flags =
2408 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2409 if (update) {
2410 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2411 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2412 }
2413 } else if (asoc) {
2414 asoc->param_flags =
2415 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
2416 } else {
2417 sp->param_flags =
2418 (sp->param_flags & ~SPP_PMTUD) | pmtud_change;
2419 }
2420 }
2421
2422 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2423 * value of this field is ignored. Note also that a value of zero
2424 * indicates the current setting should be left unchanged.
2425 */
2426 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
2427 if (trans) {
2428 trans->sackdelay =
2429 msecs_to_jiffies(params->spp_sackdelay);
2430 } else if (asoc) {
2431 asoc->sackdelay =
2432 msecs_to_jiffies(params->spp_sackdelay);
2433 } else {
2434 sp->sackdelay = params->spp_sackdelay;
2435 }
2436 }
2437
2438 if (sackdelay_change) {
2439 if (trans) {
2440 trans->param_flags =
2441 (trans->param_flags & ~SPP_SACKDELAY) |
2442 sackdelay_change;
2443 } else if (asoc) {
2444 asoc->param_flags =
2445 (asoc->param_flags & ~SPP_SACKDELAY) |
2446 sackdelay_change;
2447 } else {
2448 sp->param_flags =
2449 (sp->param_flags & ~SPP_SACKDELAY) |
2450 sackdelay_change;
2451 }
2452 }
2453
2454 /* Note that a value of zero indicates the current setting should be
2455 left unchanged.
2456 */
2457 if (params->spp_pathmaxrxt) {
2458 if (trans) {
2459 trans->pathmaxrxt = params->spp_pathmaxrxt;
2460 } else if (asoc) {
2461 asoc->pathmaxrxt = params->spp_pathmaxrxt;
2462 } else {
2463 sp->pathmaxrxt = params->spp_pathmaxrxt;
2464 }
2465 }
2466
2467 return 0;
2468 }
2469
2470 static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2471 char __user *optval,
2472 unsigned int optlen)
2473 {
2474 struct sctp_paddrparams params;
2475 struct sctp_transport *trans = NULL;
2476 struct sctp_association *asoc = NULL;
2477 struct sctp_sock *sp = sctp_sk(sk);
2478 int error;
2479 int hb_change, pmtud_change, sackdelay_change;
2480
2481 if (optlen != sizeof(struct sctp_paddrparams))
2482 return -EINVAL;
2483
2484 if (copy_from_user(&params, optval, optlen))
2485 return -EFAULT;
2486
2487 /* Validate flags and value parameters. */
2488 hb_change = params.spp_flags & SPP_HB;
2489 pmtud_change = params.spp_flags & SPP_PMTUD;
2490 sackdelay_change = params.spp_flags & SPP_SACKDELAY;
2491
2492 if (hb_change == SPP_HB ||
2493 pmtud_change == SPP_PMTUD ||
2494 sackdelay_change == SPP_SACKDELAY ||
2495 params.spp_sackdelay > 500 ||
2496 (params.spp_pathmtu &&
2497 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2498 return -EINVAL;
2499
2500 /* If an address other than INADDR_ANY is specified, and
2501 * no transport is found, then the request is invalid.
2502 */
2503 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
2504 trans = sctp_addr_id2transport(sk, &params.spp_address,
2505 params.spp_assoc_id);
2506 if (!trans)
2507 return -EINVAL;
2508 }
2509
2510 /* Get association, if assoc_id != 0 and the socket is a one
2511 * to many style socket, and an association was not found, then
2512 * the id was invalid.
2513 */
2514 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
2515 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
2516 return -EINVAL;
2517
2518 /* Heartbeat demand can only be sent on a transport or
2519 * association, but not a socket.
2520 */
2521 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
2522 return -EINVAL;
2523
2524 /* Process parameters. */
2525 error = sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2526 hb_change, pmtud_change,
2527 sackdelay_change);
2528
2529 if (error)
2530 return error;
2531
2532 /* If changes are for association, also apply parameters to each
2533 * transport.
2534 */
2535 if (!trans && asoc) {
2536 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2537 transports) {
2538 sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2539 hb_change, pmtud_change,
2540 sackdelay_change);
2541 }
2542 }
2543
2544 return 0;
2545 }
2546
2547 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
2548 {
2549 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
2550 }
2551
2552 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
2553 {
2554 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
2555 }
2556
2557 /*
2558 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2559 *
2560 * This option will effect the way delayed acks are performed. This
2561 * option allows you to get or set the delayed ack time, in
2562 * milliseconds. It also allows changing the delayed ack frequency.
2563 * Changing the frequency to 1 disables the delayed sack algorithm. If
2564 * the assoc_id is 0, then this sets or gets the endpoints default
2565 * values. If the assoc_id field is non-zero, then the set or get
2566 * effects the specified association for the one to many model (the
2567 * assoc_id field is ignored by the one to one model). Note that if
2568 * sack_delay or sack_freq are 0 when setting this option, then the
2569 * current values will remain unchanged.
2570 *
2571 * struct sctp_sack_info {
2572 * sctp_assoc_t sack_assoc_id;
2573 * uint32_t sack_delay;
2574 * uint32_t sack_freq;
2575 * };
2576 *
2577 * sack_assoc_id - This parameter, indicates which association the user
2578 * is performing an action upon. Note that if this field's value is
2579 * zero then the endpoints default value is changed (effecting future
2580 * associations only).
2581 *
2582 * sack_delay - This parameter contains the number of milliseconds that
2583 * the user is requesting the delayed ACK timer be set to. Note that
2584 * this value is defined in the standard to be between 200 and 500
2585 * milliseconds.
2586 *
2587 * sack_freq - This parameter contains the number of packets that must
2588 * be received before a sack is sent without waiting for the delay
2589 * timer to expire. The default value for this is 2, setting this
2590 * value to 1 will disable the delayed sack algorithm.
2591 */
2592
2593 static int sctp_setsockopt_delayed_ack(struct sock *sk,
2594 char __user *optval, unsigned int optlen)
2595 {
2596 struct sctp_sack_info params;
2597 struct sctp_transport *trans = NULL;
2598 struct sctp_association *asoc = NULL;
2599 struct sctp_sock *sp = sctp_sk(sk);
2600
2601 if (optlen == sizeof(struct sctp_sack_info)) {
2602 if (copy_from_user(&params, optval, optlen))
2603 return -EFAULT;
2604
2605 if (params.sack_delay == 0 && params.sack_freq == 0)
2606 return 0;
2607 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2608 pr_warn_ratelimited(DEPRECATED
2609 "%s (pid %d) "
2610 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2611 "Use struct sctp_sack_info instead\n",
2612 current->comm, task_pid_nr(current));
2613 if (copy_from_user(&params, optval, optlen))
2614 return -EFAULT;
2615
2616 if (params.sack_delay == 0)
2617 params.sack_freq = 1;
2618 else
2619 params.sack_freq = 0;
2620 } else
2621 return -EINVAL;
2622
2623 /* Validate value parameter. */
2624 if (params.sack_delay > 500)
2625 return -EINVAL;
2626
2627 /* Get association, if sack_assoc_id != 0 and the socket is a one
2628 * to many style socket, and an association was not found, then
2629 * the id was invalid.
2630 */
2631 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2632 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2633 return -EINVAL;
2634
2635 if (params.sack_delay) {
2636 if (asoc) {
2637 asoc->sackdelay =
2638 msecs_to_jiffies(params.sack_delay);
2639 asoc->param_flags =
2640 sctp_spp_sackdelay_enable(asoc->param_flags);
2641 } else {
2642 sp->sackdelay = params.sack_delay;
2643 sp->param_flags =
2644 sctp_spp_sackdelay_enable(sp->param_flags);
2645 }
2646 }
2647
2648 if (params.sack_freq == 1) {
2649 if (asoc) {
2650 asoc->param_flags =
2651 sctp_spp_sackdelay_disable(asoc->param_flags);
2652 } else {
2653 sp->param_flags =
2654 sctp_spp_sackdelay_disable(sp->param_flags);
2655 }
2656 } else if (params.sack_freq > 1) {
2657 if (asoc) {
2658 asoc->sackfreq = params.sack_freq;
2659 asoc->param_flags =
2660 sctp_spp_sackdelay_enable(asoc->param_flags);
2661 } else {
2662 sp->sackfreq = params.sack_freq;
2663 sp->param_flags =
2664 sctp_spp_sackdelay_enable(sp->param_flags);
2665 }
2666 }
2667
2668 /* If change is for association, also apply to each transport. */
2669 if (asoc) {
2670 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2671 transports) {
2672 if (params.sack_delay) {
2673 trans->sackdelay =
2674 msecs_to_jiffies(params.sack_delay);
2675 trans->param_flags =
2676 sctp_spp_sackdelay_enable(trans->param_flags);
2677 }
2678 if (params.sack_freq == 1) {
2679 trans->param_flags =
2680 sctp_spp_sackdelay_disable(trans->param_flags);
2681 } else if (params.sack_freq > 1) {
2682 trans->sackfreq = params.sack_freq;
2683 trans->param_flags =
2684 sctp_spp_sackdelay_enable(trans->param_flags);
2685 }
2686 }
2687 }
2688
2689 return 0;
2690 }
2691
2692 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2693 *
2694 * Applications can specify protocol parameters for the default association
2695 * initialization. The option name argument to setsockopt() and getsockopt()
2696 * is SCTP_INITMSG.
2697 *
2698 * Setting initialization parameters is effective only on an unconnected
2699 * socket (for UDP-style sockets only future associations are effected
2700 * by the change). With TCP-style sockets, this option is inherited by
2701 * sockets derived from a listener socket.
2702 */
2703 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2704 {
2705 struct sctp_initmsg sinit;
2706 struct sctp_sock *sp = sctp_sk(sk);
2707
2708 if (optlen != sizeof(struct sctp_initmsg))
2709 return -EINVAL;
2710 if (copy_from_user(&sinit, optval, optlen))
2711 return -EFAULT;
2712
2713 if (sinit.sinit_num_ostreams)
2714 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
2715 if (sinit.sinit_max_instreams)
2716 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
2717 if (sinit.sinit_max_attempts)
2718 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
2719 if (sinit.sinit_max_init_timeo)
2720 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
2721
2722 return 0;
2723 }
2724
2725 /*
2726 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2727 *
2728 * Applications that wish to use the sendto() system call may wish to
2729 * specify a default set of parameters that would normally be supplied
2730 * through the inclusion of ancillary data. This socket option allows
2731 * such an application to set the default sctp_sndrcvinfo structure.
2732 * The application that wishes to use this socket option simply passes
2733 * in to this call the sctp_sndrcvinfo structure defined in Section
2734 * 5.2.2) The input parameters accepted by this call include
2735 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2736 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2737 * to this call if the caller is using the UDP model.
2738 */
2739 static int sctp_setsockopt_default_send_param(struct sock *sk,
2740 char __user *optval,
2741 unsigned int optlen)
2742 {
2743 struct sctp_sndrcvinfo info;
2744 struct sctp_association *asoc;
2745 struct sctp_sock *sp = sctp_sk(sk);
2746
2747 if (optlen != sizeof(struct sctp_sndrcvinfo))
2748 return -EINVAL;
2749 if (copy_from_user(&info, optval, optlen))
2750 return -EFAULT;
2751
2752 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
2753 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
2754 return -EINVAL;
2755
2756 if (asoc) {
2757 asoc->default_stream = info.sinfo_stream;
2758 asoc->default_flags = info.sinfo_flags;
2759 asoc->default_ppid = info.sinfo_ppid;
2760 asoc->default_context = info.sinfo_context;
2761 asoc->default_timetolive = info.sinfo_timetolive;
2762 } else {
2763 sp->default_stream = info.sinfo_stream;
2764 sp->default_flags = info.sinfo_flags;
2765 sp->default_ppid = info.sinfo_ppid;
2766 sp->default_context = info.sinfo_context;
2767 sp->default_timetolive = info.sinfo_timetolive;
2768 }
2769
2770 return 0;
2771 }
2772
2773 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
2774 *
2775 * Requests that the local SCTP stack use the enclosed peer address as
2776 * the association primary. The enclosed address must be one of the
2777 * association peer's addresses.
2778 */
2779 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2780 unsigned int optlen)
2781 {
2782 struct sctp_prim prim;
2783 struct sctp_transport *trans;
2784
2785 if (optlen != sizeof(struct sctp_prim))
2786 return -EINVAL;
2787
2788 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
2789 return -EFAULT;
2790
2791 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
2792 if (!trans)
2793 return -EINVAL;
2794
2795 sctp_assoc_set_primary(trans->asoc, trans);
2796
2797 return 0;
2798 }
2799
2800 /*
2801 * 7.1.5 SCTP_NODELAY
2802 *
2803 * Turn on/off any Nagle-like algorithm. This means that packets are
2804 * generally sent as soon as possible and no unnecessary delays are
2805 * introduced, at the cost of more packets in the network. Expects an
2806 * integer boolean flag.
2807 */
2808 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2809 unsigned int optlen)
2810 {
2811 int val;
2812
2813 if (optlen < sizeof(int))
2814 return -EINVAL;
2815 if (get_user(val, (int __user *)optval))
2816 return -EFAULT;
2817
2818 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
2819 return 0;
2820 }
2821
2822 /*
2823 *
2824 * 7.1.1 SCTP_RTOINFO
2825 *
2826 * The protocol parameters used to initialize and bound retransmission
2827 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
2828 * and modify these parameters.
2829 * All parameters are time values, in milliseconds. A value of 0, when
2830 * modifying the parameters, indicates that the current value should not
2831 * be changed.
2832 *
2833 */
2834 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
2835 {
2836 struct sctp_rtoinfo rtoinfo;
2837 struct sctp_association *asoc;
2838 unsigned long rto_min, rto_max;
2839 struct sctp_sock *sp = sctp_sk(sk);
2840
2841 if (optlen != sizeof (struct sctp_rtoinfo))
2842 return -EINVAL;
2843
2844 if (copy_from_user(&rtoinfo, optval, optlen))
2845 return -EFAULT;
2846
2847 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
2848
2849 /* Set the values to the specific association */
2850 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
2851 return -EINVAL;
2852
2853 rto_max = rtoinfo.srto_max;
2854 rto_min = rtoinfo.srto_min;
2855
2856 if (rto_max)
2857 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
2858 else
2859 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
2860
2861 if (rto_min)
2862 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
2863 else
2864 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
2865
2866 if (rto_min > rto_max)
2867 return -EINVAL;
2868
2869 if (asoc) {
2870 if (rtoinfo.srto_initial != 0)
2871 asoc->rto_initial =
2872 msecs_to_jiffies(rtoinfo.srto_initial);
2873 asoc->rto_max = rto_max;
2874 asoc->rto_min = rto_min;
2875 } else {
2876 /* If there is no association or the association-id = 0
2877 * set the values to the endpoint.
2878 */
2879 if (rtoinfo.srto_initial != 0)
2880 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
2881 sp->rtoinfo.srto_max = rto_max;
2882 sp->rtoinfo.srto_min = rto_min;
2883 }
2884
2885 return 0;
2886 }
2887
2888 /*
2889 *
2890 * 7.1.2 SCTP_ASSOCINFO
2891 *
2892 * This option is used to tune the maximum retransmission attempts
2893 * of the association.
2894 * Returns an error if the new association retransmission value is
2895 * greater than the sum of the retransmission value of the peer.
2896 * See [SCTP] for more information.
2897 *
2898 */
2899 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
2900 {
2901
2902 struct sctp_assocparams assocparams;
2903 struct sctp_association *asoc;
2904
2905 if (optlen != sizeof(struct sctp_assocparams))
2906 return -EINVAL;
2907 if (copy_from_user(&assocparams, optval, optlen))
2908 return -EFAULT;
2909
2910 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
2911
2912 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
2913 return -EINVAL;
2914
2915 /* Set the values to the specific association */
2916 if (asoc) {
2917 if (assocparams.sasoc_asocmaxrxt != 0) {
2918 __u32 path_sum = 0;
2919 int paths = 0;
2920 struct sctp_transport *peer_addr;
2921
2922 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
2923 transports) {
2924 path_sum += peer_addr->pathmaxrxt;
2925 paths++;
2926 }
2927
2928 /* Only validate asocmaxrxt if we have more than
2929 * one path/transport. We do this because path
2930 * retransmissions are only counted when we have more
2931 * then one path.
2932 */
2933 if (paths > 1 &&
2934 assocparams.sasoc_asocmaxrxt > path_sum)
2935 return -EINVAL;
2936
2937 asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
2938 }
2939
2940 if (assocparams.sasoc_cookie_life != 0)
2941 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
2942 } else {
2943 /* Set the values to the endpoint */
2944 struct sctp_sock *sp = sctp_sk(sk);
2945
2946 if (assocparams.sasoc_asocmaxrxt != 0)
2947 sp->assocparams.sasoc_asocmaxrxt =
2948 assocparams.sasoc_asocmaxrxt;
2949 if (assocparams.sasoc_cookie_life != 0)
2950 sp->assocparams.sasoc_cookie_life =
2951 assocparams.sasoc_cookie_life;
2952 }
2953 return 0;
2954 }
2955
2956 /*
2957 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
2958 *
2959 * This socket option is a boolean flag which turns on or off mapped V4
2960 * addresses. If this option is turned on and the socket is type
2961 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
2962 * If this option is turned off, then no mapping will be done of V4
2963 * addresses and a user will receive both PF_INET6 and PF_INET type
2964 * addresses on the socket.
2965 */
2966 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
2967 {
2968 int val;
2969 struct sctp_sock *sp = sctp_sk(sk);
2970
2971 if (optlen < sizeof(int))
2972 return -EINVAL;
2973 if (get_user(val, (int __user *)optval))
2974 return -EFAULT;
2975 if (val)
2976 sp->v4mapped = 1;
2977 else
2978 sp->v4mapped = 0;
2979
2980 return 0;
2981 }
2982
2983 /*
2984 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
2985 * This option will get or set the maximum size to put in any outgoing
2986 * SCTP DATA chunk. If a message is larger than this size it will be
2987 * fragmented by SCTP into the specified size. Note that the underlying
2988 * SCTP implementation may fragment into smaller sized chunks when the
2989 * PMTU of the underlying association is smaller than the value set by
2990 * the user. The default value for this option is '0' which indicates
2991 * the user is NOT limiting fragmentation and only the PMTU will effect
2992 * SCTP's choice of DATA chunk size. Note also that values set larger
2993 * than the maximum size of an IP datagram will effectively let SCTP
2994 * control fragmentation (i.e. the same as setting this option to 0).
2995 *
2996 * The following structure is used to access and modify this parameter:
2997 *
2998 * struct sctp_assoc_value {
2999 * sctp_assoc_t assoc_id;
3000 * uint32_t assoc_value;
3001 * };
3002 *
3003 * assoc_id: This parameter is ignored for one-to-one style sockets.
3004 * For one-to-many style sockets this parameter indicates which
3005 * association the user is performing an action upon. Note that if
3006 * this field's value is zero then the endpoints default value is
3007 * changed (effecting future associations only).
3008 * assoc_value: This parameter specifies the maximum size in bytes.
3009 */
3010 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
3011 {
3012 struct sctp_assoc_value params;
3013 struct sctp_association *asoc;
3014 struct sctp_sock *sp = sctp_sk(sk);
3015 int val;
3016
3017 if (optlen == sizeof(int)) {
3018 pr_warn_ratelimited(DEPRECATED
3019 "%s (pid %d) "
3020 "Use of int in maxseg socket option.\n"
3021 "Use struct sctp_assoc_value instead\n",
3022 current->comm, task_pid_nr(current));
3023 if (copy_from_user(&val, optval, optlen))
3024 return -EFAULT;
3025 params.assoc_id = 0;
3026 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3027 if (copy_from_user(&params, optval, optlen))
3028 return -EFAULT;
3029 val = params.assoc_value;
3030 } else
3031 return -EINVAL;
3032
3033 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
3034 return -EINVAL;
3035
3036 asoc = sctp_id2assoc(sk, params.assoc_id);
3037 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
3038 return -EINVAL;
3039
3040 if (asoc) {
3041 if (val == 0) {
3042 val = asoc->pathmtu;
3043 val -= sp->pf->af->net_header_len;
3044 val -= sizeof(struct sctphdr) +
3045 sizeof(struct sctp_data_chunk);
3046 }
3047 asoc->user_frag = val;
3048 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
3049 } else {
3050 sp->user_frag = val;
3051 }
3052
3053 return 0;
3054 }
3055
3056
3057 /*
3058 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3059 *
3060 * Requests that the peer mark the enclosed address as the association
3061 * primary. The enclosed address must be one of the association's
3062 * locally bound addresses. The following structure is used to make a
3063 * set primary request:
3064 */
3065 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
3066 unsigned int optlen)
3067 {
3068 struct net *net = sock_net(sk);
3069 struct sctp_sock *sp;
3070 struct sctp_association *asoc = NULL;
3071 struct sctp_setpeerprim prim;
3072 struct sctp_chunk *chunk;
3073 struct sctp_af *af;
3074 int err;
3075
3076 sp = sctp_sk(sk);
3077
3078 if (!net->sctp.addip_enable)
3079 return -EPERM;
3080
3081 if (optlen != sizeof(struct sctp_setpeerprim))
3082 return -EINVAL;
3083
3084 if (copy_from_user(&prim, optval, optlen))
3085 return -EFAULT;
3086
3087 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
3088 if (!asoc)
3089 return -EINVAL;
3090
3091 if (!asoc->peer.asconf_capable)
3092 return -EPERM;
3093
3094 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
3095 return -EPERM;
3096
3097 if (!sctp_state(asoc, ESTABLISHED))
3098 return -ENOTCONN;
3099
3100 af = sctp_get_af_specific(prim.sspp_addr.ss_family);
3101 if (!af)
3102 return -EINVAL;
3103
3104 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
3105 return -EADDRNOTAVAIL;
3106
3107 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
3108 return -EADDRNOTAVAIL;
3109
3110 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3111 chunk = sctp_make_asconf_set_prim(asoc,
3112 (union sctp_addr *)&prim.sspp_addr);
3113 if (!chunk)
3114 return -ENOMEM;
3115
3116 err = sctp_send_asconf(asoc, chunk);
3117
3118 pr_debug("%s: we set peer primary addr primitively\n", __func__);
3119
3120 return err;
3121 }
3122
3123 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
3124 unsigned int optlen)
3125 {
3126 struct sctp_setadaptation adaptation;
3127
3128 if (optlen != sizeof(struct sctp_setadaptation))
3129 return -EINVAL;
3130 if (copy_from_user(&adaptation, optval, optlen))
3131 return -EFAULT;
3132
3133 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
3134
3135 return 0;
3136 }
3137
3138 /*
3139 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3140 *
3141 * The context field in the sctp_sndrcvinfo structure is normally only
3142 * used when a failed message is retrieved holding the value that was
3143 * sent down on the actual send call. This option allows the setting of
3144 * a default context on an association basis that will be received on
3145 * reading messages from the peer. This is especially helpful in the
3146 * one-2-many model for an application to keep some reference to an
3147 * internal state machine that is processing messages on the
3148 * association. Note that the setting of this value only effects
3149 * received messages from the peer and does not effect the value that is
3150 * saved with outbound messages.
3151 */
3152 static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3153 unsigned int optlen)
3154 {
3155 struct sctp_assoc_value params;
3156 struct sctp_sock *sp;
3157 struct sctp_association *asoc;
3158
3159 if (optlen != sizeof(struct sctp_assoc_value))
3160 return -EINVAL;
3161 if (copy_from_user(&params, optval, optlen))
3162 return -EFAULT;
3163
3164 sp = sctp_sk(sk);
3165
3166 if (params.assoc_id != 0) {
3167 asoc = sctp_id2assoc(sk, params.assoc_id);
3168 if (!asoc)
3169 return -EINVAL;
3170 asoc->default_rcv_context = params.assoc_value;
3171 } else {
3172 sp->default_rcv_context = params.assoc_value;
3173 }
3174
3175 return 0;
3176 }
3177
3178 /*
3179 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3180 *
3181 * This options will at a minimum specify if the implementation is doing
3182 * fragmented interleave. Fragmented interleave, for a one to many
3183 * socket, is when subsequent calls to receive a message may return
3184 * parts of messages from different associations. Some implementations
3185 * may allow you to turn this value on or off. If so, when turned off,
3186 * no fragment interleave will occur (which will cause a head of line
3187 * blocking amongst multiple associations sharing the same one to many
3188 * socket). When this option is turned on, then each receive call may
3189 * come from a different association (thus the user must receive data
3190 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3191 * association each receive belongs to.
3192 *
3193 * This option takes a boolean value. A non-zero value indicates that
3194 * fragmented interleave is on. A value of zero indicates that
3195 * fragmented interleave is off.
3196 *
3197 * Note that it is important that an implementation that allows this
3198 * option to be turned on, have it off by default. Otherwise an unaware
3199 * application using the one to many model may become confused and act
3200 * incorrectly.
3201 */
3202 static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3203 char __user *optval,
3204 unsigned int optlen)
3205 {
3206 int val;
3207
3208 if (optlen != sizeof(int))
3209 return -EINVAL;
3210 if (get_user(val, (int __user *)optval))
3211 return -EFAULT;
3212
3213 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
3214
3215 return 0;
3216 }
3217
3218 /*
3219 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3220 * (SCTP_PARTIAL_DELIVERY_POINT)
3221 *
3222 * This option will set or get the SCTP partial delivery point. This
3223 * point is the size of a message where the partial delivery API will be
3224 * invoked to help free up rwnd space for the peer. Setting this to a
3225 * lower value will cause partial deliveries to happen more often. The
3226 * calls argument is an integer that sets or gets the partial delivery
3227 * point. Note also that the call will fail if the user attempts to set
3228 * this value larger than the socket receive buffer size.
3229 *
3230 * Note that any single message having a length smaller than or equal to
3231 * the SCTP partial delivery point will be delivered in one single read
3232 * call as long as the user provided buffer is large enough to hold the
3233 * message.
3234 */
3235 static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3236 char __user *optval,
3237 unsigned int optlen)
3238 {
3239 u32 val;
3240
3241 if (optlen != sizeof(u32))
3242 return -EINVAL;
3243 if (get_user(val, (int __user *)optval))
3244 return -EFAULT;
3245
3246 /* Note: We double the receive buffer from what the user sets
3247 * it to be, also initial rwnd is based on rcvbuf/2.
3248 */
3249 if (val > (sk->sk_rcvbuf >> 1))
3250 return -EINVAL;
3251
3252 sctp_sk(sk)->pd_point = val;
3253
3254 return 0; /* is this the right error code? */
3255 }
3256
3257 /*
3258 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3259 *
3260 * This option will allow a user to change the maximum burst of packets
3261 * that can be emitted by this association. Note that the default value
3262 * is 4, and some implementations may restrict this setting so that it
3263 * can only be lowered.
3264 *
3265 * NOTE: This text doesn't seem right. Do this on a socket basis with
3266 * future associations inheriting the socket value.
3267 */
3268 static int sctp_setsockopt_maxburst(struct sock *sk,
3269 char __user *optval,
3270 unsigned int optlen)
3271 {
3272 struct sctp_assoc_value params;
3273 struct sctp_sock *sp;
3274 struct sctp_association *asoc;
3275 int val;
3276 int assoc_id = 0;
3277
3278 if (optlen == sizeof(int)) {
3279 pr_warn_ratelimited(DEPRECATED
3280 "%s (pid %d) "
3281 "Use of int in max_burst socket option deprecated.\n"
3282 "Use struct sctp_assoc_value instead\n",
3283 current->comm, task_pid_nr(current));
3284 if (copy_from_user(&val, optval, optlen))
3285 return -EFAULT;
3286 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3287 if (copy_from_user(&params, optval, optlen))
3288 return -EFAULT;
3289 val = params.assoc_value;
3290 assoc_id = params.assoc_id;
3291 } else
3292 return -EINVAL;
3293
3294 sp = sctp_sk(sk);
3295
3296 if (assoc_id != 0) {
3297 asoc = sctp_id2assoc(sk, assoc_id);
3298 if (!asoc)
3299 return -EINVAL;
3300 asoc->max_burst = val;
3301 } else
3302 sp->max_burst = val;
3303
3304 return 0;
3305 }
3306
3307 /*
3308 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3309 *
3310 * This set option adds a chunk type that the user is requesting to be
3311 * received only in an authenticated way. Changes to the list of chunks
3312 * will only effect future associations on the socket.
3313 */
3314 static int sctp_setsockopt_auth_chunk(struct sock *sk,
3315 char __user *optval,
3316 unsigned int optlen)
3317 {
3318 struct net *net = sock_net(sk);
3319 struct sctp_authchunk val;
3320
3321 if (!net->sctp.auth_enable)
3322 return -EACCES;
3323
3324 if (optlen != sizeof(struct sctp_authchunk))
3325 return -EINVAL;
3326 if (copy_from_user(&val, optval, optlen))
3327 return -EFAULT;
3328
3329 switch (val.sauth_chunk) {
3330 case SCTP_CID_INIT:
3331 case SCTP_CID_INIT_ACK:
3332 case SCTP_CID_SHUTDOWN_COMPLETE:
3333 case SCTP_CID_AUTH:
3334 return -EINVAL;
3335 }
3336
3337 /* add this chunk id to the endpoint */
3338 return sctp_auth_ep_add_chunkid(sctp_sk(sk)->ep, val.sauth_chunk);
3339 }
3340
3341 /*
3342 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3343 *
3344 * This option gets or sets the list of HMAC algorithms that the local
3345 * endpoint requires the peer to use.
3346 */
3347 static int sctp_setsockopt_hmac_ident(struct sock *sk,
3348 char __user *optval,
3349 unsigned int optlen)
3350 {
3351 struct net *net = sock_net(sk);
3352 struct sctp_hmacalgo *hmacs;
3353 u32 idents;
3354 int err;
3355
3356 if (!net->sctp.auth_enable)
3357 return -EACCES;
3358
3359 if (optlen < sizeof(struct sctp_hmacalgo))
3360 return -EINVAL;
3361
3362 hmacs = memdup_user(optval, optlen);
3363 if (IS_ERR(hmacs))
3364 return PTR_ERR(hmacs);
3365
3366 idents = hmacs->shmac_num_idents;
3367 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
3368 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
3369 err = -EINVAL;
3370 goto out;
3371 }
3372
3373 err = sctp_auth_ep_set_hmacs(sctp_sk(sk)->ep, hmacs);
3374 out:
3375 kfree(hmacs);
3376 return err;
3377 }
3378
3379 /*
3380 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3381 *
3382 * This option will set a shared secret key which is used to build an
3383 * association shared key.
3384 */
3385 static int sctp_setsockopt_auth_key(struct sock *sk,
3386 char __user *optval,
3387 unsigned int optlen)
3388 {
3389 struct net *net = sock_net(sk);
3390 struct sctp_authkey *authkey;
3391 struct sctp_association *asoc;
3392 int ret;
3393
3394 if (!net->sctp.auth_enable)
3395 return -EACCES;
3396
3397 if (optlen <= sizeof(struct sctp_authkey))
3398 return -EINVAL;
3399
3400 authkey = memdup_user(optval, optlen);
3401 if (IS_ERR(authkey))
3402 return PTR_ERR(authkey);
3403
3404 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3405 ret = -EINVAL;
3406 goto out;
3407 }
3408
3409 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
3410 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
3411 ret = -EINVAL;
3412 goto out;
3413 }
3414
3415 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
3416 out:
3417 kzfree(authkey);
3418 return ret;
3419 }
3420
3421 /*
3422 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3423 *
3424 * This option will get or set the active shared key to be used to build
3425 * the association shared key.
3426 */
3427 static int sctp_setsockopt_active_key(struct sock *sk,
3428 char __user *optval,
3429 unsigned int optlen)
3430 {
3431 struct net *net = sock_net(sk);
3432 struct sctp_authkeyid val;
3433 struct sctp_association *asoc;
3434
3435 if (!net->sctp.auth_enable)
3436 return -EACCES;
3437
3438 if (optlen != sizeof(struct sctp_authkeyid))
3439 return -EINVAL;
3440 if (copy_from_user(&val, optval, optlen))
3441 return -EFAULT;
3442
3443 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3444 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3445 return -EINVAL;
3446
3447 return sctp_auth_set_active_key(sctp_sk(sk)->ep, asoc,
3448 val.scact_keynumber);
3449 }
3450
3451 /*
3452 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3453 *
3454 * This set option will delete a shared secret key from use.
3455 */
3456 static int sctp_setsockopt_del_key(struct sock *sk,
3457 char __user *optval,
3458 unsigned int optlen)
3459 {
3460 struct net *net = sock_net(sk);
3461 struct sctp_authkeyid val;
3462 struct sctp_association *asoc;
3463
3464 if (!net->sctp.auth_enable)
3465 return -EACCES;
3466
3467 if (optlen != sizeof(struct sctp_authkeyid))
3468 return -EINVAL;
3469 if (copy_from_user(&val, optval, optlen))
3470 return -EFAULT;
3471
3472 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3473 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3474 return -EINVAL;
3475
3476 return sctp_auth_del_key_id(sctp_sk(sk)->ep, asoc,
3477 val.scact_keynumber);
3478
3479 }
3480
3481 /*
3482 * 8.1.23 SCTP_AUTO_ASCONF
3483 *
3484 * This option will enable or disable the use of the automatic generation of
3485 * ASCONF chunks to add and delete addresses to an existing association. Note
3486 * that this option has two caveats namely: a) it only affects sockets that
3487 * are bound to all addresses available to the SCTP stack, and b) the system
3488 * administrator may have an overriding control that turns the ASCONF feature
3489 * off no matter what setting the socket option may have.
3490 * This option expects an integer boolean flag, where a non-zero value turns on
3491 * the option, and a zero value turns off the option.
3492 * Note. In this implementation, socket operation overrides default parameter
3493 * being set by sysctl as well as FreeBSD implementation
3494 */
3495 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3496 unsigned int optlen)
3497 {
3498 int val;
3499 struct sctp_sock *sp = sctp_sk(sk);
3500
3501 if (optlen < sizeof(int))
3502 return -EINVAL;
3503 if (get_user(val, (int __user *)optval))
3504 return -EFAULT;
3505 if (!sctp_is_ep_boundall(sk) && val)
3506 return -EINVAL;
3507 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
3508 return 0;
3509
3510 if (val == 0 && sp->do_auto_asconf) {
3511 list_del(&sp->auto_asconf_list);
3512 sp->do_auto_asconf = 0;
3513 } else if (val && !sp->do_auto_asconf) {
3514 list_add_tail(&sp->auto_asconf_list,
3515 &sock_net(sk)->sctp.auto_asconf_splist);
3516 sp->do_auto_asconf = 1;
3517 }
3518 return 0;
3519 }
3520
3521
3522 /*
3523 * SCTP_PEER_ADDR_THLDS
3524 *
3525 * This option allows us to alter the partially failed threshold for one or all
3526 * transports in an association. See Section 6.1 of:
3527 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3528 */
3529 static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
3530 char __user *optval,
3531 unsigned int optlen)
3532 {
3533 struct sctp_paddrthlds val;
3534 struct sctp_transport *trans;
3535 struct sctp_association *asoc;
3536
3537 if (optlen < sizeof(struct sctp_paddrthlds))
3538 return -EINVAL;
3539 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
3540 sizeof(struct sctp_paddrthlds)))
3541 return -EFAULT;
3542
3543
3544 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
3545 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
3546 if (!asoc)
3547 return -ENOENT;
3548 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
3549 transports) {
3550 if (val.spt_pathmaxrxt)
3551 trans->pathmaxrxt = val.spt_pathmaxrxt;
3552 trans->pf_retrans = val.spt_pathpfthld;
3553 }
3554
3555 if (val.spt_pathmaxrxt)
3556 asoc->pathmaxrxt = val.spt_pathmaxrxt;
3557 asoc->pf_retrans = val.spt_pathpfthld;
3558 } else {
3559 trans = sctp_addr_id2transport(sk, &val.spt_address,
3560 val.spt_assoc_id);
3561 if (!trans)
3562 return -ENOENT;
3563
3564 if (val.spt_pathmaxrxt)
3565 trans->pathmaxrxt = val.spt_pathmaxrxt;
3566 trans->pf_retrans = val.spt_pathpfthld;
3567 }
3568
3569 return 0;
3570 }
3571
3572 /* API 6.2 setsockopt(), getsockopt()
3573 *
3574 * Applications use setsockopt() and getsockopt() to set or retrieve
3575 * socket options. Socket options are used to change the default
3576 * behavior of sockets calls. They are described in Section 7.
3577 *
3578 * The syntax is:
3579 *
3580 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
3581 * int __user *optlen);
3582 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
3583 * int optlen);
3584 *
3585 * sd - the socket descript.
3586 * level - set to IPPROTO_SCTP for all SCTP options.
3587 * optname - the option name.
3588 * optval - the buffer to store the value of the option.
3589 * optlen - the size of the buffer.
3590 */
3591 static int sctp_setsockopt(struct sock *sk, int level, int optname,
3592 char __user *optval, unsigned int optlen)
3593 {
3594 int retval = 0;
3595
3596 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
3597
3598 /* I can hardly begin to describe how wrong this is. This is
3599 * so broken as to be worse than useless. The API draft
3600 * REALLY is NOT helpful here... I am not convinced that the
3601 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
3602 * are at all well-founded.
3603 */
3604 if (level != SOL_SCTP) {
3605 struct sctp_af *af = sctp_sk(sk)->pf->af;
3606 retval = af->setsockopt(sk, level, optname, optval, optlen);
3607 goto out_nounlock;
3608 }
3609
3610 lock_sock(sk);
3611
3612 switch (optname) {
3613 case SCTP_SOCKOPT_BINDX_ADD:
3614 /* 'optlen' is the size of the addresses buffer. */
3615 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3616 optlen, SCTP_BINDX_ADD_ADDR);
3617 break;
3618
3619 case SCTP_SOCKOPT_BINDX_REM:
3620 /* 'optlen' is the size of the addresses buffer. */
3621 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3622 optlen, SCTP_BINDX_REM_ADDR);
3623 break;
3624
3625 case SCTP_SOCKOPT_CONNECTX_OLD:
3626 /* 'optlen' is the size of the addresses buffer. */
3627 retval = sctp_setsockopt_connectx_old(sk,
3628 (struct sockaddr __user *)optval,
3629 optlen);
3630 break;
3631
3632 case SCTP_SOCKOPT_CONNECTX:
3633 /* 'optlen' is the size of the addresses buffer. */
3634 retval = sctp_setsockopt_connectx(sk,
3635 (struct sockaddr __user *)optval,
3636 optlen);
3637 break;
3638
3639 case SCTP_DISABLE_FRAGMENTS:
3640 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
3641 break;
3642
3643 case SCTP_EVENTS:
3644 retval = sctp_setsockopt_events(sk, optval, optlen);
3645 break;
3646
3647 case SCTP_AUTOCLOSE:
3648 retval = sctp_setsockopt_autoclose(sk, optval, optlen);
3649 break;
3650
3651 case SCTP_PEER_ADDR_PARAMS:
3652 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3653 break;
3654
3655 case SCTP_DELAYED_SACK:
3656 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3657 break;
3658 case SCTP_PARTIAL_DELIVERY_POINT:
3659 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
3660 break;
3661
3662 case SCTP_INITMSG:
3663 retval = sctp_setsockopt_initmsg(sk, optval, optlen);
3664 break;
3665 case SCTP_DEFAULT_SEND_PARAM:
3666 retval = sctp_setsockopt_default_send_param(sk, optval,
3667 optlen);
3668 break;
3669 case SCTP_PRIMARY_ADDR:
3670 retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
3671 break;
3672 case SCTP_SET_PEER_PRIMARY_ADDR:
3673 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
3674 break;
3675 case SCTP_NODELAY:
3676 retval = sctp_setsockopt_nodelay(sk, optval, optlen);
3677 break;
3678 case SCTP_RTOINFO:
3679 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
3680 break;
3681 case SCTP_ASSOCINFO:
3682 retval = sctp_setsockopt_associnfo(sk, optval, optlen);
3683 break;
3684 case SCTP_I_WANT_MAPPED_V4_ADDR:
3685 retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
3686 break;
3687 case SCTP_MAXSEG:
3688 retval = sctp_setsockopt_maxseg(sk, optval, optlen);
3689 break;
3690 case SCTP_ADAPTATION_LAYER:
3691 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
3692 break;
3693 case SCTP_CONTEXT:
3694 retval = sctp_setsockopt_context(sk, optval, optlen);
3695 break;
3696 case SCTP_FRAGMENT_INTERLEAVE:
3697 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
3698 break;
3699 case SCTP_MAX_BURST:
3700 retval = sctp_setsockopt_maxburst(sk, optval, optlen);
3701 break;
3702 case SCTP_AUTH_CHUNK:
3703 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
3704 break;
3705 case SCTP_HMAC_IDENT:
3706 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
3707 break;
3708 case SCTP_AUTH_KEY:
3709 retval = sctp_setsockopt_auth_key(sk, optval, optlen);
3710 break;
3711 case SCTP_AUTH_ACTIVE_KEY:
3712 retval = sctp_setsockopt_active_key(sk, optval, optlen);
3713 break;
3714 case SCTP_AUTH_DELETE_KEY:
3715 retval = sctp_setsockopt_del_key(sk, optval, optlen);
3716 break;
3717 case SCTP_AUTO_ASCONF:
3718 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
3719 break;
3720 case SCTP_PEER_ADDR_THLDS:
3721 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
3722 break;
3723 default:
3724 retval = -ENOPROTOOPT;
3725 break;
3726 }
3727
3728 release_sock(sk);
3729
3730 out_nounlock:
3731 return retval;
3732 }
3733
3734 /* API 3.1.6 connect() - UDP Style Syntax
3735 *
3736 * An application may use the connect() call in the UDP model to initiate an
3737 * association without sending data.
3738 *
3739 * The syntax is:
3740 *
3741 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
3742 *
3743 * sd: the socket descriptor to have a new association added to.
3744 *
3745 * nam: the address structure (either struct sockaddr_in or struct
3746 * sockaddr_in6 defined in RFC2553 [7]).
3747 *
3748 * len: the size of the address.
3749 */
3750 static int sctp_connect(struct sock *sk, struct sockaddr *addr,
3751 int addr_len)
3752 {
3753 int err = 0;
3754 struct sctp_af *af;
3755
3756 lock_sock(sk);
3757
3758 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
3759 addr, addr_len);
3760
3761 /* Validate addr_len before calling common connect/connectx routine. */
3762 af = sctp_get_af_specific(addr->sa_family);
3763 if (!af || addr_len < af->sockaddr_len) {
3764 err = -EINVAL;
3765 } else {
3766 /* Pass correct addr len to common routine (so it knows there
3767 * is only one address being passed.
3768 */
3769 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
3770 }
3771
3772 release_sock(sk);
3773 return err;
3774 }
3775
3776 /* FIXME: Write comments. */
3777 static int sctp_disconnect(struct sock *sk, int flags)
3778 {
3779 return -EOPNOTSUPP; /* STUB */
3780 }
3781
3782 /* 4.1.4 accept() - TCP Style Syntax
3783 *
3784 * Applications use accept() call to remove an established SCTP
3785 * association from the accept queue of the endpoint. A new socket
3786 * descriptor will be returned from accept() to represent the newly
3787 * formed association.
3788 */
3789 static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
3790 {
3791 struct sctp_sock *sp;
3792 struct sctp_endpoint *ep;
3793 struct sock *newsk = NULL;
3794 struct sctp_association *asoc;
3795 long timeo;
3796 int error = 0;
3797
3798 lock_sock(sk);
3799
3800 sp = sctp_sk(sk);
3801 ep = sp->ep;
3802
3803 if (!sctp_style(sk, TCP)) {
3804 error = -EOPNOTSUPP;
3805 goto out;
3806 }
3807
3808 if (!sctp_sstate(sk, LISTENING)) {
3809 error = -EINVAL;
3810 goto out;
3811 }
3812
3813 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
3814
3815 error = sctp_wait_for_accept(sk, timeo);
3816 if (error)
3817 goto out;
3818
3819 /* We treat the list of associations on the endpoint as the accept
3820 * queue and pick the first association on the list.
3821 */
3822 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
3823
3824 newsk = sp->pf->create_accept_sk(sk, asoc);
3825 if (!newsk) {
3826 error = -ENOMEM;
3827 goto out;
3828 }
3829
3830 /* Populate the fields of the newsk from the oldsk and migrate the
3831 * asoc to the newsk.
3832 */
3833 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
3834
3835 out:
3836 release_sock(sk);
3837 *err = error;
3838 return newsk;
3839 }
3840
3841 /* The SCTP ioctl handler. */
3842 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3843 {
3844 int rc = -ENOTCONN;
3845
3846 lock_sock(sk);
3847
3848 /*
3849 * SEQPACKET-style sockets in LISTENING state are valid, for
3850 * SCTP, so only discard TCP-style sockets in LISTENING state.
3851 */
3852 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
3853 goto out;
3854
3855 switch (cmd) {
3856 case SIOCINQ: {
3857 struct sk_buff *skb;
3858 unsigned int amount = 0;
3859
3860 skb = skb_peek(&sk->sk_receive_queue);
3861 if (skb != NULL) {
3862 /*
3863 * We will only return the amount of this packet since
3864 * that is all that will be read.
3865 */
3866 amount = skb->len;
3867 }
3868 rc = put_user(amount, (int __user *)arg);
3869 break;
3870 }
3871 default:
3872 rc = -ENOIOCTLCMD;
3873 break;
3874 }
3875 out:
3876 release_sock(sk);
3877 return rc;
3878 }
3879
3880 /* This is the function which gets called during socket creation to
3881 * initialized the SCTP-specific portion of the sock.
3882 * The sock structure should already be zero-filled memory.
3883 */
3884 static int sctp_init_sock(struct sock *sk)
3885 {
3886 struct net *net = sock_net(sk);
3887 struct sctp_sock *sp;
3888
3889 pr_debug("%s: sk:%p\n", __func__, sk);
3890
3891 sp = sctp_sk(sk);
3892
3893 /* Initialize the SCTP per socket area. */
3894 switch (sk->sk_type) {
3895 case SOCK_SEQPACKET:
3896 sp->type = SCTP_SOCKET_UDP;
3897 break;
3898 case SOCK_STREAM:
3899 sp->type = SCTP_SOCKET_TCP;
3900 break;
3901 default:
3902 return -ESOCKTNOSUPPORT;
3903 }
3904
3905 /* Initialize default send parameters. These parameters can be
3906 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
3907 */
3908 sp->default_stream = 0;
3909 sp->default_ppid = 0;
3910 sp->default_flags = 0;
3911 sp->default_context = 0;
3912 sp->default_timetolive = 0;
3913
3914 sp->default_rcv_context = 0;
3915 sp->max_burst = net->sctp.max_burst;
3916
3917 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
3918
3919 /* Initialize default setup parameters. These parameters
3920 * can be modified with the SCTP_INITMSG socket option or
3921 * overridden by the SCTP_INIT CMSG.
3922 */
3923 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
3924 sp->initmsg.sinit_max_instreams = sctp_max_instreams;
3925 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
3926 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
3927
3928 /* Initialize default RTO related parameters. These parameters can
3929 * be modified for with the SCTP_RTOINFO socket option.
3930 */
3931 sp->rtoinfo.srto_initial = net->sctp.rto_initial;
3932 sp->rtoinfo.srto_max = net->sctp.rto_max;
3933 sp->rtoinfo.srto_min = net->sctp.rto_min;
3934
3935 /* Initialize default association related parameters. These parameters
3936 * can be modified with the SCTP_ASSOCINFO socket option.
3937 */
3938 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
3939 sp->assocparams.sasoc_number_peer_destinations = 0;
3940 sp->assocparams.sasoc_peer_rwnd = 0;
3941 sp->assocparams.sasoc_local_rwnd = 0;
3942 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
3943
3944 /* Initialize default event subscriptions. By default, all the
3945 * options are off.
3946 */
3947 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
3948
3949 /* Default Peer Address Parameters. These defaults can
3950 * be modified via SCTP_PEER_ADDR_PARAMS
3951 */
3952 sp->hbinterval = net->sctp.hb_interval;
3953 sp->pathmaxrxt = net->sctp.max_retrans_path;
3954 sp->pathmtu = 0; /* allow default discovery */
3955 sp->sackdelay = net->sctp.sack_timeout;
3956 sp->sackfreq = 2;
3957 sp->param_flags = SPP_HB_ENABLE |
3958 SPP_PMTUD_ENABLE |
3959 SPP_SACKDELAY_ENABLE;
3960
3961 /* If enabled no SCTP message fragmentation will be performed.
3962 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
3963 */
3964 sp->disable_fragments = 0;
3965
3966 /* Enable Nagle algorithm by default. */
3967 sp->nodelay = 0;
3968
3969 /* Enable by default. */
3970 sp->v4mapped = 1;
3971
3972 /* Auto-close idle associations after the configured
3973 * number of seconds. A value of 0 disables this
3974 * feature. Configure through the SCTP_AUTOCLOSE socket option,
3975 * for UDP-style sockets only.
3976 */
3977 sp->autoclose = 0;
3978
3979 /* User specified fragmentation limit. */
3980 sp->user_frag = 0;
3981
3982 sp->adaptation_ind = 0;
3983
3984 sp->pf = sctp_get_pf_specific(sk->sk_family);
3985
3986 /* Control variables for partial data delivery. */
3987 atomic_set(&sp->pd_mode, 0);
3988 skb_queue_head_init(&sp->pd_lobby);
3989 sp->frag_interleave = 0;
3990
3991 /* Create a per socket endpoint structure. Even if we
3992 * change the data structure relationships, this may still
3993 * be useful for storing pre-connect address information.
3994 */
3995 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
3996 if (!sp->ep)
3997 return -ENOMEM;
3998
3999 sp->hmac = NULL;
4000
4001 sk->sk_destruct = sctp_destruct_sock;
4002
4003 SCTP_DBG_OBJCNT_INC(sock);
4004
4005 local_bh_disable();
4006 percpu_counter_inc(&sctp_sockets_allocated);
4007 sock_prot_inuse_add(net, sk->sk_prot, 1);
4008 if (net->sctp.default_auto_asconf) {
4009 list_add_tail(&sp->auto_asconf_list,
4010 &net->sctp.auto_asconf_splist);
4011 sp->do_auto_asconf = 1;
4012 } else
4013 sp->do_auto_asconf = 0;
4014 local_bh_enable();
4015
4016 return 0;
4017 }
4018
4019 /* Cleanup any SCTP per socket resources. */
4020 static void sctp_destroy_sock(struct sock *sk)
4021 {
4022 struct sctp_sock *sp;
4023
4024 pr_debug("%s: sk:%p\n", __func__, sk);
4025
4026 /* Release our hold on the endpoint. */
4027 sp = sctp_sk(sk);
4028 /* This could happen during socket init, thus we bail out
4029 * early, since the rest of the below is not setup either.
4030 */
4031 if (sp->ep == NULL)
4032 return;
4033
4034 if (sp->do_auto_asconf) {
4035 sp->do_auto_asconf = 0;
4036 list_del(&sp->auto_asconf_list);
4037 }
4038 sctp_endpoint_free(sp->ep);
4039 local_bh_disable();
4040 percpu_counter_dec(&sctp_sockets_allocated);
4041 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
4042 local_bh_enable();
4043 }
4044
4045 /* Triggered when there are no references on the socket anymore */
4046 static void sctp_destruct_sock(struct sock *sk)
4047 {
4048 struct sctp_sock *sp = sctp_sk(sk);
4049
4050 /* Free up the HMAC transform. */
4051 crypto_free_hash(sp->hmac);
4052
4053 inet_sock_destruct(sk);
4054 }
4055
4056 /* API 4.1.7 shutdown() - TCP Style Syntax
4057 * int shutdown(int socket, int how);
4058 *
4059 * sd - the socket descriptor of the association to be closed.
4060 * how - Specifies the type of shutdown. The values are
4061 * as follows:
4062 * SHUT_RD
4063 * Disables further receive operations. No SCTP
4064 * protocol action is taken.
4065 * SHUT_WR
4066 * Disables further send operations, and initiates
4067 * the SCTP shutdown sequence.
4068 * SHUT_RDWR
4069 * Disables further send and receive operations
4070 * and initiates the SCTP shutdown sequence.
4071 */
4072 static void sctp_shutdown(struct sock *sk, int how)
4073 {
4074 struct net *net = sock_net(sk);
4075 struct sctp_endpoint *ep;
4076 struct sctp_association *asoc;
4077
4078 if (!sctp_style(sk, TCP))
4079 return;
4080
4081 if (how & SEND_SHUTDOWN) {
4082 ep = sctp_sk(sk)->ep;
4083 if (!list_empty(&ep->asocs)) {
4084 asoc = list_entry(ep->asocs.next,
4085 struct sctp_association, asocs);
4086 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4087 }
4088 }
4089 }
4090
4091 /* 7.2.1 Association Status (SCTP_STATUS)
4092
4093 * Applications can retrieve current status information about an
4094 * association, including association state, peer receiver window size,
4095 * number of unacked data chunks, and number of data chunks pending
4096 * receipt. This information is read-only.
4097 */
4098 static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4099 char __user *optval,
4100 int __user *optlen)
4101 {
4102 struct sctp_status status;
4103 struct sctp_association *asoc = NULL;
4104 struct sctp_transport *transport;
4105 sctp_assoc_t associd;
4106 int retval = 0;
4107
4108 if (len < sizeof(status)) {
4109 retval = -EINVAL;
4110 goto out;
4111 }
4112
4113 len = sizeof(status);
4114 if (copy_from_user(&status, optval, len)) {
4115 retval = -EFAULT;
4116 goto out;
4117 }
4118
4119 associd = status.sstat_assoc_id;
4120 asoc = sctp_id2assoc(sk, associd);
4121 if (!asoc) {
4122 retval = -EINVAL;
4123 goto out;
4124 }
4125
4126 transport = asoc->peer.primary_path;
4127
4128 status.sstat_assoc_id = sctp_assoc2id(asoc);
4129 status.sstat_state = asoc->state;
4130 status.sstat_rwnd = asoc->peer.rwnd;
4131 status.sstat_unackdata = asoc->unack_data;
4132
4133 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4134 status.sstat_instrms = asoc->c.sinit_max_instreams;
4135 status.sstat_outstrms = asoc->c.sinit_num_ostreams;
4136 status.sstat_fragmentation_point = asoc->frag_point;
4137 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4138 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
4139 transport->af_specific->sockaddr_len);
4140 /* Map ipv4 address into v4-mapped-on-v6 address. */
4141 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4142 (union sctp_addr *)&status.sstat_primary.spinfo_address);
4143 status.sstat_primary.spinfo_state = transport->state;
4144 status.sstat_primary.spinfo_cwnd = transport->cwnd;
4145 status.sstat_primary.spinfo_srtt = transport->srtt;
4146 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
4147 status.sstat_primary.spinfo_mtu = transport->pathmtu;
4148
4149 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
4150 status.sstat_primary.spinfo_state = SCTP_ACTIVE;
4151
4152 if (put_user(len, optlen)) {
4153 retval = -EFAULT;
4154 goto out;
4155 }
4156
4157 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
4158 __func__, len, status.sstat_state, status.sstat_rwnd,
4159 status.sstat_assoc_id);
4160
4161 if (copy_to_user(optval, &status, len)) {
4162 retval = -EFAULT;
4163 goto out;
4164 }
4165
4166 out:
4167 return retval;
4168 }
4169
4170
4171 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
4172 *
4173 * Applications can retrieve information about a specific peer address
4174 * of an association, including its reachability state, congestion
4175 * window, and retransmission timer values. This information is
4176 * read-only.
4177 */
4178 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
4179 char __user *optval,
4180 int __user *optlen)
4181 {
4182 struct sctp_paddrinfo pinfo;
4183 struct sctp_transport *transport;
4184 int retval = 0;
4185
4186 if (len < sizeof(pinfo)) {
4187 retval = -EINVAL;
4188 goto out;
4189 }
4190
4191 len = sizeof(pinfo);
4192 if (copy_from_user(&pinfo, optval, len)) {
4193 retval = -EFAULT;
4194 goto out;
4195 }
4196
4197 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
4198 pinfo.spinfo_assoc_id);
4199 if (!transport)
4200 return -EINVAL;
4201
4202 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4203 pinfo.spinfo_state = transport->state;
4204 pinfo.spinfo_cwnd = transport->cwnd;
4205 pinfo.spinfo_srtt = transport->srtt;
4206 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
4207 pinfo.spinfo_mtu = transport->pathmtu;
4208
4209 if (pinfo.spinfo_state == SCTP_UNKNOWN)
4210 pinfo.spinfo_state = SCTP_ACTIVE;
4211
4212 if (put_user(len, optlen)) {
4213 retval = -EFAULT;
4214 goto out;
4215 }
4216
4217 if (copy_to_user(optval, &pinfo, len)) {
4218 retval = -EFAULT;
4219 goto out;
4220 }
4221
4222 out:
4223 return retval;
4224 }
4225
4226 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
4227 *
4228 * This option is a on/off flag. If enabled no SCTP message
4229 * fragmentation will be performed. Instead if a message being sent
4230 * exceeds the current PMTU size, the message will NOT be sent and
4231 * instead a error will be indicated to the user.
4232 */
4233 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4234 char __user *optval, int __user *optlen)
4235 {
4236 int val;
4237
4238 if (len < sizeof(int))
4239 return -EINVAL;
4240
4241 len = sizeof(int);
4242 val = (sctp_sk(sk)->disable_fragments == 1);
4243 if (put_user(len, optlen))
4244 return -EFAULT;
4245 if (copy_to_user(optval, &val, len))
4246 return -EFAULT;
4247 return 0;
4248 }
4249
4250 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
4251 *
4252 * This socket option is used to specify various notifications and
4253 * ancillary data the user wishes to receive.
4254 */
4255 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4256 int __user *optlen)
4257 {
4258 if (len <= 0)
4259 return -EINVAL;
4260 if (len > sizeof(struct sctp_event_subscribe))
4261 len = sizeof(struct sctp_event_subscribe);
4262 if (put_user(len, optlen))
4263 return -EFAULT;
4264 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
4265 return -EFAULT;
4266 return 0;
4267 }
4268
4269 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
4270 *
4271 * This socket option is applicable to the UDP-style socket only. When
4272 * set it will cause associations that are idle for more than the
4273 * specified number of seconds to automatically close. An association
4274 * being idle is defined an association that has NOT sent or received
4275 * user data. The special value of '0' indicates that no automatic
4276 * close of any associations should be performed. The option expects an
4277 * integer defining the number of seconds of idle time before an
4278 * association is closed.
4279 */
4280 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
4281 {
4282 /* Applicable to UDP-style socket only */
4283 if (sctp_style(sk, TCP))
4284 return -EOPNOTSUPP;
4285 if (len < sizeof(int))
4286 return -EINVAL;
4287 len = sizeof(int);
4288 if (put_user(len, optlen))
4289 return -EFAULT;
4290 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
4291 return -EFAULT;
4292 return 0;
4293 }
4294
4295 /* Helper routine to branch off an association to a new socket. */
4296 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4297 {
4298 struct sctp_association *asoc = sctp_id2assoc(sk, id);
4299 struct socket *sock;
4300 struct sctp_af *af;
4301 int err = 0;
4302
4303 if (!asoc)
4304 return -EINVAL;
4305
4306 /* An association cannot be branched off from an already peeled-off
4307 * socket, nor is this supported for tcp style sockets.
4308 */
4309 if (!sctp_style(sk, UDP))
4310 return -EINVAL;
4311
4312 /* Create a new socket. */
4313 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
4314 if (err < 0)
4315 return err;
4316
4317 sctp_copy_sock(sock->sk, sk, asoc);
4318
4319 /* Make peeled-off sockets more like 1-1 accepted sockets.
4320 * Set the daddr and initialize id to something more random
4321 */
4322 af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family);
4323 af->to_sk_daddr(&asoc->peer.primary_addr, sk);
4324
4325 /* Populate the fields of the newsk from the oldsk and migrate the
4326 * asoc to the newsk.
4327 */
4328 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
4329
4330 *sockp = sock;
4331
4332 return err;
4333 }
4334 EXPORT_SYMBOL(sctp_do_peeloff);
4335
4336 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
4337 {
4338 sctp_peeloff_arg_t peeloff;
4339 struct socket *newsock;
4340 struct file *newfile;
4341 int retval = 0;
4342
4343 if (len < sizeof(sctp_peeloff_arg_t))
4344 return -EINVAL;
4345 len = sizeof(sctp_peeloff_arg_t);
4346 if (copy_from_user(&peeloff, optval, len))
4347 return -EFAULT;
4348
4349 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
4350 if (retval < 0)
4351 goto out;
4352
4353 /* Map the socket to an unused fd that can be returned to the user. */
4354 retval = get_unused_fd_flags(0);
4355 if (retval < 0) {
4356 sock_release(newsock);
4357 goto out;
4358 }
4359
4360 newfile = sock_alloc_file(newsock, 0, NULL);
4361 if (unlikely(IS_ERR(newfile))) {
4362 put_unused_fd(retval);
4363 sock_release(newsock);
4364 return PTR_ERR(newfile);
4365 }
4366
4367 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
4368 retval);
4369
4370 /* Return the fd mapped to the new socket. */
4371 if (put_user(len, optlen)) {
4372 fput(newfile);
4373 put_unused_fd(retval);
4374 return -EFAULT;
4375 }
4376 peeloff.sd = retval;
4377 if (copy_to_user(optval, &peeloff, len)) {
4378 fput(newfile);
4379 put_unused_fd(retval);
4380 return -EFAULT;
4381 }
4382 fd_install(retval, newfile);
4383 out:
4384 return retval;
4385 }
4386
4387 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
4388 *
4389 * Applications can enable or disable heartbeats for any peer address of
4390 * an association, modify an address's heartbeat interval, force a
4391 * heartbeat to be sent immediately, and adjust the address's maximum
4392 * number of retransmissions sent before an address is considered
4393 * unreachable. The following structure is used to access and modify an
4394 * address's parameters:
4395 *
4396 * struct sctp_paddrparams {
4397 * sctp_assoc_t spp_assoc_id;
4398 * struct sockaddr_storage spp_address;
4399 * uint32_t spp_hbinterval;
4400 * uint16_t spp_pathmaxrxt;
4401 * uint32_t spp_pathmtu;
4402 * uint32_t spp_sackdelay;
4403 * uint32_t spp_flags;
4404 * };
4405 *
4406 * spp_assoc_id - (one-to-many style socket) This is filled in the
4407 * application, and identifies the association for
4408 * this query.
4409 * spp_address - This specifies which address is of interest.
4410 * spp_hbinterval - This contains the value of the heartbeat interval,
4411 * in milliseconds. If a value of zero
4412 * is present in this field then no changes are to
4413 * be made to this parameter.
4414 * spp_pathmaxrxt - This contains the maximum number of
4415 * retransmissions before this address shall be
4416 * considered unreachable. If a value of zero
4417 * is present in this field then no changes are to
4418 * be made to this parameter.
4419 * spp_pathmtu - When Path MTU discovery is disabled the value
4420 * specified here will be the "fixed" path mtu.
4421 * Note that if the spp_address field is empty
4422 * then all associations on this address will
4423 * have this fixed path mtu set upon them.
4424 *
4425 * spp_sackdelay - When delayed sack is enabled, this value specifies
4426 * the number of milliseconds that sacks will be delayed
4427 * for. This value will apply to all addresses of an
4428 * association if the spp_address field is empty. Note
4429 * also, that if delayed sack is enabled and this
4430 * value is set to 0, no change is made to the last
4431 * recorded delayed sack timer value.
4432 *
4433 * spp_flags - These flags are used to control various features
4434 * on an association. The flag field may contain
4435 * zero or more of the following options.
4436 *
4437 * SPP_HB_ENABLE - Enable heartbeats on the
4438 * specified address. Note that if the address
4439 * field is empty all addresses for the association
4440 * have heartbeats enabled upon them.
4441 *
4442 * SPP_HB_DISABLE - Disable heartbeats on the
4443 * speicifed address. Note that if the address
4444 * field is empty all addresses for the association
4445 * will have their heartbeats disabled. Note also
4446 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
4447 * mutually exclusive, only one of these two should
4448 * be specified. Enabling both fields will have
4449 * undetermined results.
4450 *
4451 * SPP_HB_DEMAND - Request a user initiated heartbeat
4452 * to be made immediately.
4453 *
4454 * SPP_PMTUD_ENABLE - This field will enable PMTU
4455 * discovery upon the specified address. Note that
4456 * if the address feild is empty then all addresses
4457 * on the association are effected.
4458 *
4459 * SPP_PMTUD_DISABLE - This field will disable PMTU
4460 * discovery upon the specified address. Note that
4461 * if the address feild is empty then all addresses
4462 * on the association are effected. Not also that
4463 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
4464 * exclusive. Enabling both will have undetermined
4465 * results.
4466 *
4467 * SPP_SACKDELAY_ENABLE - Setting this flag turns
4468 * on delayed sack. The time specified in spp_sackdelay
4469 * is used to specify the sack delay for this address. Note
4470 * that if spp_address is empty then all addresses will
4471 * enable delayed sack and take on the sack delay
4472 * value specified in spp_sackdelay.
4473 * SPP_SACKDELAY_DISABLE - Setting this flag turns
4474 * off delayed sack. If the spp_address field is blank then
4475 * delayed sack is disabled for the entire association. Note
4476 * also that this field is mutually exclusive to
4477 * SPP_SACKDELAY_ENABLE, setting both will have undefined
4478 * results.
4479 */
4480 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
4481 char __user *optval, int __user *optlen)
4482 {
4483 struct sctp_paddrparams params;
4484 struct sctp_transport *trans = NULL;
4485 struct sctp_association *asoc = NULL;
4486 struct sctp_sock *sp = sctp_sk(sk);
4487
4488 if (len < sizeof(struct sctp_paddrparams))
4489 return -EINVAL;
4490 len = sizeof(struct sctp_paddrparams);
4491 if (copy_from_user(&params, optval, len))
4492 return -EFAULT;
4493
4494 /* If an address other than INADDR_ANY is specified, and
4495 * no transport is found, then the request is invalid.
4496 */
4497 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
4498 trans = sctp_addr_id2transport(sk, &params.spp_address,
4499 params.spp_assoc_id);
4500 if (!trans) {
4501 pr_debug("%s: failed no transport\n", __func__);
4502 return -EINVAL;
4503 }
4504 }
4505
4506 /* Get association, if assoc_id != 0 and the socket is a one
4507 * to many style socket, and an association was not found, then
4508 * the id was invalid.
4509 */
4510 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
4511 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
4512 pr_debug("%s: failed no association\n", __func__);
4513 return -EINVAL;
4514 }
4515
4516 if (trans) {
4517 /* Fetch transport values. */
4518 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
4519 params.spp_pathmtu = trans->pathmtu;
4520 params.spp_pathmaxrxt = trans->pathmaxrxt;
4521 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
4522
4523 /*draft-11 doesn't say what to return in spp_flags*/
4524 params.spp_flags = trans->param_flags;
4525 } else if (asoc) {
4526 /* Fetch association values. */
4527 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
4528 params.spp_pathmtu = asoc->pathmtu;
4529 params.spp_pathmaxrxt = asoc->pathmaxrxt;
4530 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
4531
4532 /*draft-11 doesn't say what to return in spp_flags*/
4533 params.spp_flags = asoc->param_flags;
4534 } else {
4535 /* Fetch socket values. */
4536 params.spp_hbinterval = sp->hbinterval;
4537 params.spp_pathmtu = sp->pathmtu;
4538 params.spp_sackdelay = sp->sackdelay;
4539 params.spp_pathmaxrxt = sp->pathmaxrxt;
4540
4541 /*draft-11 doesn't say what to return in spp_flags*/
4542 params.spp_flags = sp->param_flags;
4543 }
4544
4545 if (copy_to_user(optval, &params, len))
4546 return -EFAULT;
4547
4548 if (put_user(len, optlen))
4549 return -EFAULT;
4550
4551 return 0;
4552 }
4553
4554 /*
4555 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
4556 *
4557 * This option will effect the way delayed acks are performed. This
4558 * option allows you to get or set the delayed ack time, in
4559 * milliseconds. It also allows changing the delayed ack frequency.
4560 * Changing the frequency to 1 disables the delayed sack algorithm. If
4561 * the assoc_id is 0, then this sets or gets the endpoints default
4562 * values. If the assoc_id field is non-zero, then the set or get
4563 * effects the specified association for the one to many model (the
4564 * assoc_id field is ignored by the one to one model). Note that if
4565 * sack_delay or sack_freq are 0 when setting this option, then the
4566 * current values will remain unchanged.
4567 *
4568 * struct sctp_sack_info {
4569 * sctp_assoc_t sack_assoc_id;
4570 * uint32_t sack_delay;
4571 * uint32_t sack_freq;
4572 * };
4573 *
4574 * sack_assoc_id - This parameter, indicates which association the user
4575 * is performing an action upon. Note that if this field's value is
4576 * zero then the endpoints default value is changed (effecting future
4577 * associations only).
4578 *
4579 * sack_delay - This parameter contains the number of milliseconds that
4580 * the user is requesting the delayed ACK timer be set to. Note that
4581 * this value is defined in the standard to be between 200 and 500
4582 * milliseconds.
4583 *
4584 * sack_freq - This parameter contains the number of packets that must
4585 * be received before a sack is sent without waiting for the delay
4586 * timer to expire. The default value for this is 2, setting this
4587 * value to 1 will disable the delayed sack algorithm.
4588 */
4589 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4590 char __user *optval,
4591 int __user *optlen)
4592 {
4593 struct sctp_sack_info params;
4594 struct sctp_association *asoc = NULL;
4595 struct sctp_sock *sp = sctp_sk(sk);
4596
4597 if (len >= sizeof(struct sctp_sack_info)) {
4598 len = sizeof(struct sctp_sack_info);
4599
4600 if (copy_from_user(&params, optval, len))
4601 return -EFAULT;
4602 } else if (len == sizeof(struct sctp_assoc_value)) {
4603 pr_warn_ratelimited(DEPRECATED
4604 "%s (pid %d) "
4605 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
4606 "Use struct sctp_sack_info instead\n",
4607 current->comm, task_pid_nr(current));
4608 if (copy_from_user(&params, optval, len))
4609 return -EFAULT;
4610 } else
4611 return -EINVAL;
4612
4613 /* Get association, if sack_assoc_id != 0 and the socket is a one
4614 * to many style socket, and an association was not found, then
4615 * the id was invalid.
4616 */
4617 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
4618 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
4619 return -EINVAL;
4620
4621 if (asoc) {
4622 /* Fetch association values. */
4623 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
4624 params.sack_delay = jiffies_to_msecs(
4625 asoc->sackdelay);
4626 params.sack_freq = asoc->sackfreq;
4627
4628 } else {
4629 params.sack_delay = 0;
4630 params.sack_freq = 1;
4631 }
4632 } else {
4633 /* Fetch socket values. */
4634 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
4635 params.sack_delay = sp->sackdelay;
4636 params.sack_freq = sp->sackfreq;
4637 } else {
4638 params.sack_delay = 0;
4639 params.sack_freq = 1;
4640 }
4641 }
4642
4643 if (copy_to_user(optval, &params, len))
4644 return -EFAULT;
4645
4646 if (put_user(len, optlen))
4647 return -EFAULT;
4648
4649 return 0;
4650 }
4651
4652 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
4653 *
4654 * Applications can specify protocol parameters for the default association
4655 * initialization. The option name argument to setsockopt() and getsockopt()
4656 * is SCTP_INITMSG.
4657 *
4658 * Setting initialization parameters is effective only on an unconnected
4659 * socket (for UDP-style sockets only future associations are effected
4660 * by the change). With TCP-style sockets, this option is inherited by
4661 * sockets derived from a listener socket.
4662 */
4663 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
4664 {
4665 if (len < sizeof(struct sctp_initmsg))
4666 return -EINVAL;
4667 len = sizeof(struct sctp_initmsg);
4668 if (put_user(len, optlen))
4669 return -EFAULT;
4670 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
4671 return -EFAULT;
4672 return 0;
4673 }
4674
4675
4676 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4677 char __user *optval, int __user *optlen)
4678 {
4679 struct sctp_association *asoc;
4680 int cnt = 0;
4681 struct sctp_getaddrs getaddrs;
4682 struct sctp_transport *from;
4683 void __user *to;
4684 union sctp_addr temp;
4685 struct sctp_sock *sp = sctp_sk(sk);
4686 int addrlen;
4687 size_t space_left;
4688 int bytes_copied;
4689
4690 if (len < sizeof(struct sctp_getaddrs))
4691 return -EINVAL;
4692
4693 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
4694 return -EFAULT;
4695
4696 /* For UDP-style sockets, id specifies the association to query. */
4697 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4698 if (!asoc)
4699 return -EINVAL;
4700
4701 to = optval + offsetof(struct sctp_getaddrs, addrs);
4702 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4703
4704 list_for_each_entry(from, &asoc->peer.transport_addr_list,
4705 transports) {
4706 memcpy(&temp, &from->ipaddr, sizeof(temp));
4707 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4708 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4709 if (space_left < addrlen)
4710 return -ENOMEM;
4711 if (copy_to_user(to, &temp, addrlen))
4712 return -EFAULT;
4713 to += addrlen;
4714 cnt++;
4715 space_left -= addrlen;
4716 }
4717
4718 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
4719 return -EFAULT;
4720 bytes_copied = ((char __user *)to) - optval;
4721 if (put_user(bytes_copied, optlen))
4722 return -EFAULT;
4723
4724 return 0;
4725 }
4726
4727 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4728 size_t space_left, int *bytes_copied)
4729 {
4730 struct sctp_sockaddr_entry *addr;
4731 union sctp_addr temp;
4732 int cnt = 0;
4733 int addrlen;
4734 struct net *net = sock_net(sk);
4735
4736 rcu_read_lock();
4737 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
4738 if (!addr->valid)
4739 continue;
4740
4741 if ((PF_INET == sk->sk_family) &&
4742 (AF_INET6 == addr->a.sa.sa_family))
4743 continue;
4744 if ((PF_INET6 == sk->sk_family) &&
4745 inet_v6_ipv6only(sk) &&
4746 (AF_INET == addr->a.sa.sa_family))
4747 continue;
4748 memcpy(&temp, &addr->a, sizeof(temp));
4749 if (!temp.v4.sin_port)
4750 temp.v4.sin_port = htons(port);
4751
4752 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
4753 &temp);
4754 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4755 if (space_left < addrlen) {
4756 cnt = -ENOMEM;
4757 break;
4758 }
4759 memcpy(to, &temp, addrlen);
4760
4761 to += addrlen;
4762 cnt++;
4763 space_left -= addrlen;
4764 *bytes_copied += addrlen;
4765 }
4766 rcu_read_unlock();
4767
4768 return cnt;
4769 }
4770
4771
4772 static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4773 char __user *optval, int __user *optlen)
4774 {
4775 struct sctp_bind_addr *bp;
4776 struct sctp_association *asoc;
4777 int cnt = 0;
4778 struct sctp_getaddrs getaddrs;
4779 struct sctp_sockaddr_entry *addr;
4780 void __user *to;
4781 union sctp_addr temp;
4782 struct sctp_sock *sp = sctp_sk(sk);
4783 int addrlen;
4784 int err = 0;
4785 size_t space_left;
4786 int bytes_copied = 0;
4787 void *addrs;
4788 void *buf;
4789
4790 if (len < sizeof(struct sctp_getaddrs))
4791 return -EINVAL;
4792
4793 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
4794 return -EFAULT;
4795
4796 /*
4797 * For UDP-style sockets, id specifies the association to query.
4798 * If the id field is set to the value '0' then the locally bound
4799 * addresses are returned without regard to any particular
4800 * association.
4801 */
4802 if (0 == getaddrs.assoc_id) {
4803 bp = &sctp_sk(sk)->ep->base.bind_addr;
4804 } else {
4805 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4806 if (!asoc)
4807 return -EINVAL;
4808 bp = &asoc->base.bind_addr;
4809 }
4810
4811 to = optval + offsetof(struct sctp_getaddrs, addrs);
4812 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4813
4814 addrs = kmalloc(space_left, GFP_KERNEL);
4815 if (!addrs)
4816 return -ENOMEM;
4817
4818 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4819 * addresses from the global local address list.
4820 */
4821 if (sctp_list_single_entry(&bp->address_list)) {
4822 addr = list_entry(bp->address_list.next,
4823 struct sctp_sockaddr_entry, list);
4824 if (sctp_is_any(sk, &addr->a)) {
4825 cnt = sctp_copy_laddrs(sk, bp->port, addrs,
4826 space_left, &bytes_copied);
4827 if (cnt < 0) {
4828 err = cnt;
4829 goto out;
4830 }
4831 goto copy_getaddrs;
4832 }
4833 }
4834
4835 buf = addrs;
4836 /* Protection on the bound address list is not needed since
4837 * in the socket option context we hold a socket lock and
4838 * thus the bound address list can't change.
4839 */
4840 list_for_each_entry(addr, &bp->address_list, list) {
4841 memcpy(&temp, &addr->a, sizeof(temp));
4842 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
4843 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
4844 if (space_left < addrlen) {
4845 err = -ENOMEM; /*fixme: right error?*/
4846 goto out;
4847 }
4848 memcpy(buf, &temp, addrlen);
4849 buf += addrlen;
4850 bytes_copied += addrlen;
4851 cnt++;
4852 space_left -= addrlen;
4853 }
4854
4855 copy_getaddrs:
4856 if (copy_to_user(to, addrs, bytes_copied)) {
4857 err = -EFAULT;
4858 goto out;
4859 }
4860 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
4861 err = -EFAULT;
4862 goto out;
4863 }
4864 if (put_user(bytes_copied, optlen))
4865 err = -EFAULT;
4866 out:
4867 kfree(addrs);
4868 return err;
4869 }
4870
4871 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
4872 *
4873 * Requests that the local SCTP stack use the enclosed peer address as
4874 * the association primary. The enclosed address must be one of the
4875 * association peer's addresses.
4876 */
4877 static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
4878 char __user *optval, int __user *optlen)
4879 {
4880 struct sctp_prim prim;
4881 struct sctp_association *asoc;
4882 struct sctp_sock *sp = sctp_sk(sk);
4883
4884 if (len < sizeof(struct sctp_prim))
4885 return -EINVAL;
4886
4887 len = sizeof(struct sctp_prim);
4888
4889 if (copy_from_user(&prim, optval, len))
4890 return -EFAULT;
4891
4892 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
4893 if (!asoc)
4894 return -EINVAL;
4895
4896 if (!asoc->peer.primary_path)
4897 return -ENOTCONN;
4898
4899 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
4900 asoc->peer.primary_path->af_specific->sockaddr_len);
4901
4902 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
4903 (union sctp_addr *)&prim.ssp_addr);
4904
4905 if (put_user(len, optlen))
4906 return -EFAULT;
4907 if (copy_to_user(optval, &prim, len))
4908 return -EFAULT;
4909
4910 return 0;
4911 }
4912
4913 /*
4914 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
4915 *
4916 * Requests that the local endpoint set the specified Adaptation Layer
4917 * Indication parameter for all future INIT and INIT-ACK exchanges.
4918 */
4919 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
4920 char __user *optval, int __user *optlen)
4921 {
4922 struct sctp_setadaptation adaptation;
4923
4924 if (len < sizeof(struct sctp_setadaptation))
4925 return -EINVAL;
4926
4927 len = sizeof(struct sctp_setadaptation);
4928
4929 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
4930
4931 if (put_user(len, optlen))
4932 return -EFAULT;
4933 if (copy_to_user(optval, &adaptation, len))
4934 return -EFAULT;
4935
4936 return 0;
4937 }
4938
4939 /*
4940 *
4941 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
4942 *
4943 * Applications that wish to use the sendto() system call may wish to
4944 * specify a default set of parameters that would normally be supplied
4945 * through the inclusion of ancillary data. This socket option allows
4946 * such an application to set the default sctp_sndrcvinfo structure.
4947
4948
4949 * The application that wishes to use this socket option simply passes
4950 * in to this call the sctp_sndrcvinfo structure defined in Section
4951 * 5.2.2) The input parameters accepted by this call include
4952 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
4953 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
4954 * to this call if the caller is using the UDP model.
4955 *
4956 * For getsockopt, it get the default sctp_sndrcvinfo structure.
4957 */
4958 static int sctp_getsockopt_default_send_param(struct sock *sk,
4959 int len, char __user *optval,
4960 int __user *optlen)
4961 {
4962 struct sctp_sndrcvinfo info;
4963 struct sctp_association *asoc;
4964 struct sctp_sock *sp = sctp_sk(sk);
4965
4966 if (len < sizeof(struct sctp_sndrcvinfo))
4967 return -EINVAL;
4968
4969 len = sizeof(struct sctp_sndrcvinfo);
4970
4971 if (copy_from_user(&info, optval, len))
4972 return -EFAULT;
4973
4974 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
4975 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
4976 return -EINVAL;
4977
4978 if (asoc) {
4979 info.sinfo_stream = asoc->default_stream;
4980 info.sinfo_flags = asoc->default_flags;
4981 info.sinfo_ppid = asoc->default_ppid;
4982 info.sinfo_context = asoc->default_context;
4983 info.sinfo_timetolive = asoc->default_timetolive;
4984 } else {
4985 info.sinfo_stream = sp->default_stream;
4986 info.sinfo_flags = sp->default_flags;
4987 info.sinfo_ppid = sp->default_ppid;
4988 info.sinfo_context = sp->default_context;
4989 info.sinfo_timetolive = sp->default_timetolive;
4990 }
4991
4992 if (put_user(len, optlen))
4993 return -EFAULT;
4994 if (copy_to_user(optval, &info, len))
4995 return -EFAULT;
4996
4997 return 0;
4998 }
4999
5000 /*
5001 *
5002 * 7.1.5 SCTP_NODELAY
5003 *
5004 * Turn on/off any Nagle-like algorithm. This means that packets are
5005 * generally sent as soon as possible and no unnecessary delays are
5006 * introduced, at the cost of more packets in the network. Expects an
5007 * integer boolean flag.
5008 */
5009
5010 static int sctp_getsockopt_nodelay(struct sock *sk, int len,
5011 char __user *optval, int __user *optlen)
5012 {
5013 int val;
5014
5015 if (len < sizeof(int))
5016 return -EINVAL;
5017
5018 len = sizeof(int);
5019 val = (sctp_sk(sk)->nodelay == 1);
5020 if (put_user(len, optlen))
5021 return -EFAULT;
5022 if (copy_to_user(optval, &val, len))
5023 return -EFAULT;
5024 return 0;
5025 }
5026
5027 /*
5028 *
5029 * 7.1.1 SCTP_RTOINFO
5030 *
5031 * The protocol parameters used to initialize and bound retransmission
5032 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
5033 * and modify these parameters.
5034 * All parameters are time values, in milliseconds. A value of 0, when
5035 * modifying the parameters, indicates that the current value should not
5036 * be changed.
5037 *
5038 */
5039 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
5040 char __user *optval,
5041 int __user *optlen) {
5042 struct sctp_rtoinfo rtoinfo;
5043 struct sctp_association *asoc;
5044
5045 if (len < sizeof (struct sctp_rtoinfo))
5046 return -EINVAL;
5047
5048 len = sizeof(struct sctp_rtoinfo);
5049
5050 if (copy_from_user(&rtoinfo, optval, len))
5051 return -EFAULT;
5052
5053 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
5054
5055 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
5056 return -EINVAL;
5057
5058 /* Values corresponding to the specific association. */
5059 if (asoc) {
5060 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
5061 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
5062 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
5063 } else {
5064 /* Values corresponding to the endpoint. */
5065 struct sctp_sock *sp = sctp_sk(sk);
5066
5067 rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
5068 rtoinfo.srto_max = sp->rtoinfo.srto_max;
5069 rtoinfo.srto_min = sp->rtoinfo.srto_min;
5070 }
5071
5072 if (put_user(len, optlen))
5073 return -EFAULT;
5074
5075 if (copy_to_user(optval, &rtoinfo, len))
5076 return -EFAULT;
5077
5078 return 0;
5079 }
5080
5081 /*
5082 *
5083 * 7.1.2 SCTP_ASSOCINFO
5084 *
5085 * This option is used to tune the maximum retransmission attempts
5086 * of the association.
5087 * Returns an error if the new association retransmission value is
5088 * greater than the sum of the retransmission value of the peer.
5089 * See [SCTP] for more information.
5090 *
5091 */
5092 static int sctp_getsockopt_associnfo(struct sock *sk, int len,
5093 char __user *optval,
5094 int __user *optlen)
5095 {
5096
5097 struct sctp_assocparams assocparams;
5098 struct sctp_association *asoc;
5099 struct list_head *pos;
5100 int cnt = 0;
5101
5102 if (len < sizeof (struct sctp_assocparams))
5103 return -EINVAL;
5104
5105 len = sizeof(struct sctp_assocparams);
5106
5107 if (copy_from_user(&assocparams, optval, len))
5108 return -EFAULT;
5109
5110 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
5111
5112 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
5113 return -EINVAL;
5114
5115 /* Values correspoinding to the specific association */
5116 if (asoc) {
5117 assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
5118 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
5119 assocparams.sasoc_local_rwnd = asoc->a_rwnd;
5120 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
5121
5122 list_for_each(pos, &asoc->peer.transport_addr_list) {
5123 cnt++;
5124 }
5125
5126 assocparams.sasoc_number_peer_destinations = cnt;
5127 } else {
5128 /* Values corresponding to the endpoint */
5129 struct sctp_sock *sp = sctp_sk(sk);
5130
5131 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
5132 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
5133 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
5134 assocparams.sasoc_cookie_life =
5135 sp->assocparams.sasoc_cookie_life;
5136 assocparams.sasoc_number_peer_destinations =
5137 sp->assocparams.
5138 sasoc_number_peer_destinations;
5139 }
5140
5141 if (put_user(len, optlen))
5142 return -EFAULT;
5143
5144 if (copy_to_user(optval, &assocparams, len))
5145 return -EFAULT;
5146
5147 return 0;
5148 }
5149
5150 /*
5151 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
5152 *
5153 * This socket option is a boolean flag which turns on or off mapped V4
5154 * addresses. If this option is turned on and the socket is type
5155 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
5156 * If this option is turned off, then no mapping will be done of V4
5157 * addresses and a user will receive both PF_INET6 and PF_INET type
5158 * addresses on the socket.
5159 */
5160 static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
5161 char __user *optval, int __user *optlen)
5162 {
5163 int val;
5164 struct sctp_sock *sp = sctp_sk(sk);
5165
5166 if (len < sizeof(int))
5167 return -EINVAL;
5168
5169 len = sizeof(int);
5170 val = sp->v4mapped;
5171 if (put_user(len, optlen))
5172 return -EFAULT;
5173 if (copy_to_user(optval, &val, len))
5174 return -EFAULT;
5175
5176 return 0;
5177 }
5178
5179 /*
5180 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
5181 * (chapter and verse is quoted at sctp_setsockopt_context())
5182 */
5183 static int sctp_getsockopt_context(struct sock *sk, int len,
5184 char __user *optval, int __user *optlen)
5185 {
5186 struct sctp_assoc_value params;
5187 struct sctp_sock *sp;
5188 struct sctp_association *asoc;
5189
5190 if (len < sizeof(struct sctp_assoc_value))
5191 return -EINVAL;
5192
5193 len = sizeof(struct sctp_assoc_value);
5194
5195 if (copy_from_user(&params, optval, len))
5196 return -EFAULT;
5197
5198 sp = sctp_sk(sk);
5199
5200 if (params.assoc_id != 0) {
5201 asoc = sctp_id2assoc(sk, params.assoc_id);
5202 if (!asoc)
5203 return -EINVAL;
5204 params.assoc_value = asoc->default_rcv_context;
5205 } else {
5206 params.assoc_value = sp->default_rcv_context;
5207 }
5208
5209 if (put_user(len, optlen))
5210 return -EFAULT;
5211 if (copy_to_user(optval, &params, len))
5212 return -EFAULT;
5213
5214 return 0;
5215 }
5216
5217 /*
5218 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
5219 * This option will get or set the maximum size to put in any outgoing
5220 * SCTP DATA chunk. If a message is larger than this size it will be
5221 * fragmented by SCTP into the specified size. Note that the underlying
5222 * SCTP implementation may fragment into smaller sized chunks when the
5223 * PMTU of the underlying association is smaller than the value set by
5224 * the user. The default value for this option is '0' which indicates
5225 * the user is NOT limiting fragmentation and only the PMTU will effect
5226 * SCTP's choice of DATA chunk size. Note also that values set larger
5227 * than the maximum size of an IP datagram will effectively let SCTP
5228 * control fragmentation (i.e. the same as setting this option to 0).
5229 *
5230 * The following structure is used to access and modify this parameter:
5231 *
5232 * struct sctp_assoc_value {
5233 * sctp_assoc_t assoc_id;
5234 * uint32_t assoc_value;
5235 * };
5236 *
5237 * assoc_id: This parameter is ignored for one-to-one style sockets.
5238 * For one-to-many style sockets this parameter indicates which
5239 * association the user is performing an action upon. Note that if
5240 * this field's value is zero then the endpoints default value is
5241 * changed (effecting future associations only).
5242 * assoc_value: This parameter specifies the maximum size in bytes.
5243 */
5244 static int sctp_getsockopt_maxseg(struct sock *sk, int len,
5245 char __user *optval, int __user *optlen)
5246 {
5247 struct sctp_assoc_value params;
5248 struct sctp_association *asoc;
5249
5250 if (len == sizeof(int)) {
5251 pr_warn_ratelimited(DEPRECATED
5252 "%s (pid %d) "
5253 "Use of int in maxseg socket option.\n"
5254 "Use struct sctp_assoc_value instead\n",
5255 current->comm, task_pid_nr(current));
5256 params.assoc_id = 0;
5257 } else if (len >= sizeof(struct sctp_assoc_value)) {
5258 len = sizeof(struct sctp_assoc_value);
5259 if (copy_from_user(&params, optval, sizeof(params)))
5260 return -EFAULT;
5261 } else
5262 return -EINVAL;
5263
5264 asoc = sctp_id2assoc(sk, params.assoc_id);
5265 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
5266 return -EINVAL;
5267
5268 if (asoc)
5269 params.assoc_value = asoc->frag_point;
5270 else
5271 params.assoc_value = sctp_sk(sk)->user_frag;
5272
5273 if (put_user(len, optlen))
5274 return -EFAULT;
5275 if (len == sizeof(int)) {
5276 if (copy_to_user(optval, &params.assoc_value, len))
5277 return -EFAULT;
5278 } else {
5279 if (copy_to_user(optval, &params, len))
5280 return -EFAULT;
5281 }
5282
5283 return 0;
5284 }
5285
5286 /*
5287 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
5288 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
5289 */
5290 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
5291 char __user *optval, int __user *optlen)
5292 {
5293 int val;
5294
5295 if (len < sizeof(int))
5296 return -EINVAL;
5297
5298 len = sizeof(int);
5299
5300 val = sctp_sk(sk)->frag_interleave;
5301 if (put_user(len, optlen))
5302 return -EFAULT;
5303 if (copy_to_user(optval, &val, len))
5304 return -EFAULT;
5305
5306 return 0;
5307 }
5308
5309 /*
5310 * 7.1.25. Set or Get the sctp partial delivery point
5311 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
5312 */
5313 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
5314 char __user *optval,
5315 int __user *optlen)
5316 {
5317 u32 val;
5318
5319 if (len < sizeof(u32))
5320 return -EINVAL;
5321
5322 len = sizeof(u32);
5323
5324 val = sctp_sk(sk)->pd_point;
5325 if (put_user(len, optlen))
5326 return -EFAULT;
5327 if (copy_to_user(optval, &val, len))
5328 return -EFAULT;
5329
5330 return 0;
5331 }
5332
5333 /*
5334 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
5335 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
5336 */
5337 static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5338 char __user *optval,
5339 int __user *optlen)
5340 {
5341 struct sctp_assoc_value params;
5342 struct sctp_sock *sp;
5343 struct sctp_association *asoc;
5344
5345 if (len == sizeof(int)) {
5346 pr_warn_ratelimited(DEPRECATED
5347 "%s (pid %d) "
5348 "Use of int in max_burst socket option.\n"
5349 "Use struct sctp_assoc_value instead\n",
5350 current->comm, task_pid_nr(current));
5351 params.assoc_id = 0;
5352 } else if (len >= sizeof(struct sctp_assoc_value)) {
5353 len = sizeof(struct sctp_assoc_value);
5354 if (copy_from_user(&params, optval, len))
5355 return -EFAULT;
5356 } else
5357 return -EINVAL;
5358
5359 sp = sctp_sk(sk);
5360
5361 if (params.assoc_id != 0) {
5362 asoc = sctp_id2assoc(sk, params.assoc_id);
5363 if (!asoc)
5364 return -EINVAL;
5365 params.assoc_value = asoc->max_burst;
5366 } else
5367 params.assoc_value = sp->max_burst;
5368
5369 if (len == sizeof(int)) {
5370 if (copy_to_user(optval, &params.assoc_value, len))
5371 return -EFAULT;
5372 } else {
5373 if (copy_to_user(optval, &params, len))
5374 return -EFAULT;
5375 }
5376
5377 return 0;
5378
5379 }
5380
5381 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5382 char __user *optval, int __user *optlen)
5383 {
5384 struct net *net = sock_net(sk);
5385 struct sctp_hmacalgo __user *p = (void __user *)optval;
5386 struct sctp_hmac_algo_param *hmacs;
5387 __u16 data_len = 0;
5388 u32 num_idents;
5389
5390 if (!net->sctp.auth_enable)
5391 return -EACCES;
5392
5393 hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
5394 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
5395
5396 if (len < sizeof(struct sctp_hmacalgo) + data_len)
5397 return -EINVAL;
5398
5399 len = sizeof(struct sctp_hmacalgo) + data_len;
5400 num_idents = data_len / sizeof(u16);
5401
5402 if (put_user(len, optlen))
5403 return -EFAULT;
5404 if (put_user(num_idents, &p->shmac_num_idents))
5405 return -EFAULT;
5406 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
5407 return -EFAULT;
5408 return 0;
5409 }
5410
5411 static int sctp_getsockopt_active_key(struct sock *sk, int len,
5412 char __user *optval, int __user *optlen)
5413 {
5414 struct net *net = sock_net(sk);
5415 struct sctp_authkeyid val;
5416 struct sctp_association *asoc;
5417
5418 if (!net->sctp.auth_enable)
5419 return -EACCES;
5420
5421 if (len < sizeof(struct sctp_authkeyid))
5422 return -EINVAL;
5423 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
5424 return -EFAULT;
5425
5426 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
5427 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
5428 return -EINVAL;
5429
5430 if (asoc)
5431 val.scact_keynumber = asoc->active_key_id;
5432 else
5433 val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
5434
5435 len = sizeof(struct sctp_authkeyid);
5436 if (put_user(len, optlen))
5437 return -EFAULT;
5438 if (copy_to_user(optval, &val, len))
5439 return -EFAULT;
5440
5441 return 0;
5442 }
5443
5444 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5445 char __user *optval, int __user *optlen)
5446 {
5447 struct net *net = sock_net(sk);
5448 struct sctp_authchunks __user *p = (void __user *)optval;
5449 struct sctp_authchunks val;
5450 struct sctp_association *asoc;
5451 struct sctp_chunks_param *ch;
5452 u32 num_chunks = 0;
5453 char __user *to;
5454
5455 if (!net->sctp.auth_enable)
5456 return -EACCES;
5457
5458 if (len < sizeof(struct sctp_authchunks))
5459 return -EINVAL;
5460
5461 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5462 return -EFAULT;
5463
5464 to = p->gauth_chunks;
5465 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
5466 if (!asoc)
5467 return -EINVAL;
5468
5469 ch = asoc->peer.peer_chunks;
5470 if (!ch)
5471 goto num;
5472
5473 /* See if the user provided enough room for all the data */
5474 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5475 if (len < num_chunks)
5476 return -EINVAL;
5477
5478 if (copy_to_user(to, ch->chunks, num_chunks))
5479 return -EFAULT;
5480 num:
5481 len = sizeof(struct sctp_authchunks) + num_chunks;
5482 if (put_user(len, optlen))
5483 return -EFAULT;
5484 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5485 return -EFAULT;
5486 return 0;
5487 }
5488
5489 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5490 char __user *optval, int __user *optlen)
5491 {
5492 struct net *net = sock_net(sk);
5493 struct sctp_authchunks __user *p = (void __user *)optval;
5494 struct sctp_authchunks val;
5495 struct sctp_association *asoc;
5496 struct sctp_chunks_param *ch;
5497 u32 num_chunks = 0;
5498 char __user *to;
5499
5500 if (!net->sctp.auth_enable)
5501 return -EACCES;
5502
5503 if (len < sizeof(struct sctp_authchunks))
5504 return -EINVAL;
5505
5506 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5507 return -EFAULT;
5508
5509 to = p->gauth_chunks;
5510 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
5511 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
5512 return -EINVAL;
5513
5514 if (asoc)
5515 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
5516 else
5517 ch = sctp_sk(sk)->ep->auth_chunk_list;
5518
5519 if (!ch)
5520 goto num;
5521
5522 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5523 if (len < sizeof(struct sctp_authchunks) + num_chunks)
5524 return -EINVAL;
5525
5526 if (copy_to_user(to, ch->chunks, num_chunks))
5527 return -EFAULT;
5528 num:
5529 len = sizeof(struct sctp_authchunks) + num_chunks;
5530 if (put_user(len, optlen))
5531 return -EFAULT;
5532 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5533 return -EFAULT;
5534
5535 return 0;
5536 }
5537
5538 /*
5539 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
5540 * This option gets the current number of associations that are attached
5541 * to a one-to-many style socket. The option value is an uint32_t.
5542 */
5543 static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5544 char __user *optval, int __user *optlen)
5545 {
5546 struct sctp_sock *sp = sctp_sk(sk);
5547 struct sctp_association *asoc;
5548 u32 val = 0;
5549
5550 if (sctp_style(sk, TCP))
5551 return -EOPNOTSUPP;
5552
5553 if (len < sizeof(u32))
5554 return -EINVAL;
5555
5556 len = sizeof(u32);
5557
5558 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5559 val++;
5560 }
5561
5562 if (put_user(len, optlen))
5563 return -EFAULT;
5564 if (copy_to_user(optval, &val, len))
5565 return -EFAULT;
5566
5567 return 0;
5568 }
5569
5570 /*
5571 * 8.1.23 SCTP_AUTO_ASCONF
5572 * See the corresponding setsockopt entry as description
5573 */
5574 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
5575 char __user *optval, int __user *optlen)
5576 {
5577 int val = 0;
5578
5579 if (len < sizeof(int))
5580 return -EINVAL;
5581
5582 len = sizeof(int);
5583 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
5584 val = 1;
5585 if (put_user(len, optlen))
5586 return -EFAULT;
5587 if (copy_to_user(optval, &val, len))
5588 return -EFAULT;
5589 return 0;
5590 }
5591
5592 /*
5593 * 8.2.6. Get the Current Identifiers of Associations
5594 * (SCTP_GET_ASSOC_ID_LIST)
5595 *
5596 * This option gets the current list of SCTP association identifiers of
5597 * the SCTP associations handled by a one-to-many style socket.
5598 */
5599 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5600 char __user *optval, int __user *optlen)
5601 {
5602 struct sctp_sock *sp = sctp_sk(sk);
5603 struct sctp_association *asoc;
5604 struct sctp_assoc_ids *ids;
5605 u32 num = 0;
5606
5607 if (sctp_style(sk, TCP))
5608 return -EOPNOTSUPP;
5609
5610 if (len < sizeof(struct sctp_assoc_ids))
5611 return -EINVAL;
5612
5613 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5614 num++;
5615 }
5616
5617 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
5618 return -EINVAL;
5619
5620 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
5621
5622 ids = kmalloc(len, GFP_KERNEL);
5623 if (unlikely(!ids))
5624 return -ENOMEM;
5625
5626 ids->gaids_number_of_ids = num;
5627 num = 0;
5628 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5629 ids->gaids_assoc_id[num++] = asoc->assoc_id;
5630 }
5631
5632 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
5633 kfree(ids);
5634 return -EFAULT;
5635 }
5636
5637 kfree(ids);
5638 return 0;
5639 }
5640
5641 /*
5642 * SCTP_PEER_ADDR_THLDS
5643 *
5644 * This option allows us to fetch the partially failed threshold for one or all
5645 * transports in an association. See Section 6.1 of:
5646 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
5647 */
5648 static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
5649 char __user *optval,
5650 int len,
5651 int __user *optlen)
5652 {
5653 struct sctp_paddrthlds val;
5654 struct sctp_transport *trans;
5655 struct sctp_association *asoc;
5656
5657 if (len < sizeof(struct sctp_paddrthlds))
5658 return -EINVAL;
5659 len = sizeof(struct sctp_paddrthlds);
5660 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
5661 return -EFAULT;
5662
5663 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
5664 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
5665 if (!asoc)
5666 return -ENOENT;
5667
5668 val.spt_pathpfthld = asoc->pf_retrans;
5669 val.spt_pathmaxrxt = asoc->pathmaxrxt;
5670 } else {
5671 trans = sctp_addr_id2transport(sk, &val.spt_address,
5672 val.spt_assoc_id);
5673 if (!trans)
5674 return -ENOENT;
5675
5676 val.spt_pathmaxrxt = trans->pathmaxrxt;
5677 val.spt_pathpfthld = trans->pf_retrans;
5678 }
5679
5680 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
5681 return -EFAULT;
5682
5683 return 0;
5684 }
5685
5686 /*
5687 * SCTP_GET_ASSOC_STATS
5688 *
5689 * This option retrieves local per endpoint statistics. It is modeled
5690 * after OpenSolaris' implementation
5691 */
5692 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5693 char __user *optval,
5694 int __user *optlen)
5695 {
5696 struct sctp_assoc_stats sas;
5697 struct sctp_association *asoc = NULL;
5698
5699 /* User must provide at least the assoc id */
5700 if (len < sizeof(sctp_assoc_t))
5701 return -EINVAL;
5702
5703 /* Allow the struct to grow and fill in as much as possible */
5704 len = min_t(size_t, len, sizeof(sas));
5705
5706 if (copy_from_user(&sas, optval, len))
5707 return -EFAULT;
5708
5709 asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
5710 if (!asoc)
5711 return -EINVAL;
5712
5713 sas.sas_rtxchunks = asoc->stats.rtxchunks;
5714 sas.sas_gapcnt = asoc->stats.gapcnt;
5715 sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
5716 sas.sas_osacks = asoc->stats.osacks;
5717 sas.sas_isacks = asoc->stats.isacks;
5718 sas.sas_octrlchunks = asoc->stats.octrlchunks;
5719 sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
5720 sas.sas_oodchunks = asoc->stats.oodchunks;
5721 sas.sas_iodchunks = asoc->stats.iodchunks;
5722 sas.sas_ouodchunks = asoc->stats.ouodchunks;
5723 sas.sas_iuodchunks = asoc->stats.iuodchunks;
5724 sas.sas_idupchunks = asoc->stats.idupchunks;
5725 sas.sas_opackets = asoc->stats.opackets;
5726 sas.sas_ipackets = asoc->stats.ipackets;
5727
5728 /* New high max rto observed, will return 0 if not a single
5729 * RTO update took place. obs_rto_ipaddr will be bogus
5730 * in such a case
5731 */
5732 sas.sas_maxrto = asoc->stats.max_obs_rto;
5733 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
5734 sizeof(struct sockaddr_storage));
5735
5736 /* Mark beginning of a new observation period */
5737 asoc->stats.max_obs_rto = asoc->rto_min;
5738
5739 if (put_user(len, optlen))
5740 return -EFAULT;
5741
5742 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
5743
5744 if (copy_to_user(optval, &sas, len))
5745 return -EFAULT;
5746
5747 return 0;
5748 }
5749
5750 static int sctp_getsockopt(struct sock *sk, int level, int optname,
5751 char __user *optval, int __user *optlen)
5752 {
5753 int retval = 0;
5754 int len;
5755
5756 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
5757
5758 /* I can hardly begin to describe how wrong this is. This is
5759 * so broken as to be worse than useless. The API draft
5760 * REALLY is NOT helpful here... I am not convinced that the
5761 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
5762 * are at all well-founded.
5763 */
5764 if (level != SOL_SCTP) {
5765 struct sctp_af *af = sctp_sk(sk)->pf->af;
5766
5767 retval = af->getsockopt(sk, level, optname, optval, optlen);
5768 return retval;
5769 }
5770
5771 if (get_user(len, optlen))
5772 return -EFAULT;
5773
5774 lock_sock(sk);
5775
5776 switch (optname) {
5777 case SCTP_STATUS:
5778 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
5779 break;
5780 case SCTP_DISABLE_FRAGMENTS:
5781 retval = sctp_getsockopt_disable_fragments(sk, len, optval,
5782 optlen);
5783 break;
5784 case SCTP_EVENTS:
5785 retval = sctp_getsockopt_events(sk, len, optval, optlen);
5786 break;
5787 case SCTP_AUTOCLOSE:
5788 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
5789 break;
5790 case SCTP_SOCKOPT_PEELOFF:
5791 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
5792 break;
5793 case SCTP_PEER_ADDR_PARAMS:
5794 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
5795 optlen);
5796 break;
5797 case SCTP_DELAYED_SACK:
5798 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
5799 optlen);
5800 break;
5801 case SCTP_INITMSG:
5802 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
5803 break;
5804 case SCTP_GET_PEER_ADDRS:
5805 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
5806 optlen);
5807 break;
5808 case SCTP_GET_LOCAL_ADDRS:
5809 retval = sctp_getsockopt_local_addrs(sk, len, optval,
5810 optlen);
5811 break;
5812 case SCTP_SOCKOPT_CONNECTX3:
5813 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
5814 break;
5815 case SCTP_DEFAULT_SEND_PARAM:
5816 retval = sctp_getsockopt_default_send_param(sk, len,
5817 optval, optlen);
5818 break;
5819 case SCTP_PRIMARY_ADDR:
5820 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
5821 break;
5822 case SCTP_NODELAY:
5823 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
5824 break;
5825 case SCTP_RTOINFO:
5826 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
5827 break;
5828 case SCTP_ASSOCINFO:
5829 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
5830 break;
5831 case SCTP_I_WANT_MAPPED_V4_ADDR:
5832 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
5833 break;
5834 case SCTP_MAXSEG:
5835 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
5836 break;
5837 case SCTP_GET_PEER_ADDR_INFO:
5838 retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
5839 optlen);
5840 break;
5841 case SCTP_ADAPTATION_LAYER:
5842 retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
5843 optlen);
5844 break;
5845 case SCTP_CONTEXT:
5846 retval = sctp_getsockopt_context(sk, len, optval, optlen);
5847 break;
5848 case SCTP_FRAGMENT_INTERLEAVE:
5849 retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
5850 optlen);
5851 break;
5852 case SCTP_PARTIAL_DELIVERY_POINT:
5853 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
5854 optlen);
5855 break;
5856 case SCTP_MAX_BURST:
5857 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
5858 break;
5859 case SCTP_AUTH_KEY:
5860 case SCTP_AUTH_CHUNK:
5861 case SCTP_AUTH_DELETE_KEY:
5862 retval = -EOPNOTSUPP;
5863 break;
5864 case SCTP_HMAC_IDENT:
5865 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
5866 break;
5867 case SCTP_AUTH_ACTIVE_KEY:
5868 retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
5869 break;
5870 case SCTP_PEER_AUTH_CHUNKS:
5871 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
5872 optlen);
5873 break;
5874 case SCTP_LOCAL_AUTH_CHUNKS:
5875 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
5876 optlen);
5877 break;
5878 case SCTP_GET_ASSOC_NUMBER:
5879 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
5880 break;
5881 case SCTP_GET_ASSOC_ID_LIST:
5882 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
5883 break;
5884 case SCTP_AUTO_ASCONF:
5885 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
5886 break;
5887 case SCTP_PEER_ADDR_THLDS:
5888 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
5889 break;
5890 case SCTP_GET_ASSOC_STATS:
5891 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
5892 break;
5893 default:
5894 retval = -ENOPROTOOPT;
5895 break;
5896 }
5897
5898 release_sock(sk);
5899 return retval;
5900 }
5901
5902 static void sctp_hash(struct sock *sk)
5903 {
5904 /* STUB */
5905 }
5906
5907 static void sctp_unhash(struct sock *sk)
5908 {
5909 /* STUB */
5910 }
5911
5912 /* Check if port is acceptable. Possibly find first available port.
5913 *
5914 * The port hash table (contained in the 'global' SCTP protocol storage
5915 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
5916 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
5917 * list (the list number is the port number hashed out, so as you
5918 * would expect from a hash function, all the ports in a given list have
5919 * such a number that hashes out to the same list number; you were
5920 * expecting that, right?); so each list has a set of ports, with a
5921 * link to the socket (struct sock) that uses it, the port number and
5922 * a fastreuse flag (FIXME: NPI ipg).
5923 */
5924 static struct sctp_bind_bucket *sctp_bucket_create(
5925 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
5926
5927 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
5928 {
5929 struct sctp_bind_hashbucket *head; /* hash list */
5930 struct sctp_bind_bucket *pp;
5931 unsigned short snum;
5932 int ret;
5933
5934 snum = ntohs(addr->v4.sin_port);
5935
5936 pr_debug("%s: begins, snum:%d\n", __func__, snum);
5937
5938 local_bh_disable();
5939
5940 if (snum == 0) {
5941 /* Search for an available port. */
5942 int low, high, remaining, index;
5943 unsigned int rover;
5944
5945 inet_get_local_port_range(sock_net(sk), &low, &high);
5946 remaining = (high - low) + 1;
5947 rover = prandom_u32() % remaining + low;
5948
5949 do {
5950 rover++;
5951 if ((rover < low) || (rover > high))
5952 rover = low;
5953 if (inet_is_reserved_local_port(rover))
5954 continue;
5955 index = sctp_phashfn(sock_net(sk), rover);
5956 head = &sctp_port_hashtable[index];
5957 spin_lock(&head->lock);
5958 sctp_for_each_hentry(pp, &head->chain)
5959 if ((pp->port == rover) &&
5960 net_eq(sock_net(sk), pp->net))
5961 goto next;
5962 break;
5963 next:
5964 spin_unlock(&head->lock);
5965 } while (--remaining > 0);
5966
5967 /* Exhausted local port range during search? */
5968 ret = 1;
5969 if (remaining <= 0)
5970 goto fail;
5971
5972 /* OK, here is the one we will use. HEAD (the port
5973 * hash table list entry) is non-NULL and we hold it's
5974 * mutex.
5975 */
5976 snum = rover;
5977 } else {
5978 /* We are given an specific port number; we verify
5979 * that it is not being used. If it is used, we will
5980 * exahust the search in the hash list corresponding
5981 * to the port number (snum) - we detect that with the
5982 * port iterator, pp being NULL.
5983 */
5984 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
5985 spin_lock(&head->lock);
5986 sctp_for_each_hentry(pp, &head->chain) {
5987 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
5988 goto pp_found;
5989 }
5990 }
5991 pp = NULL;
5992 goto pp_not_found;
5993 pp_found:
5994 if (!hlist_empty(&pp->owner)) {
5995 /* We had a port hash table hit - there is an
5996 * available port (pp != NULL) and it is being
5997 * used by other socket (pp->owner not empty); that other
5998 * socket is going to be sk2.
5999 */
6000 int reuse = sk->sk_reuse;
6001 struct sock *sk2;
6002
6003 pr_debug("%s: found a possible match\n", __func__);
6004
6005 if (pp->fastreuse && sk->sk_reuse &&
6006 sk->sk_state != SCTP_SS_LISTENING)
6007 goto success;
6008
6009 /* Run through the list of sockets bound to the port
6010 * (pp->port) [via the pointers bind_next and
6011 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
6012 * we get the endpoint they describe and run through
6013 * the endpoint's list of IP (v4 or v6) addresses,
6014 * comparing each of the addresses with the address of
6015 * the socket sk. If we find a match, then that means
6016 * that this port/socket (sk) combination are already
6017 * in an endpoint.
6018 */
6019 sk_for_each_bound(sk2, &pp->owner) {
6020 struct sctp_endpoint *ep2;
6021 ep2 = sctp_sk(sk2)->ep;
6022
6023 if (sk == sk2 ||
6024 (reuse && sk2->sk_reuse &&
6025 sk2->sk_state != SCTP_SS_LISTENING))
6026 continue;
6027
6028 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
6029 sctp_sk(sk2), sctp_sk(sk))) {
6030 ret = (long)sk2;
6031 goto fail_unlock;
6032 }
6033 }
6034
6035 pr_debug("%s: found a match\n", __func__);
6036 }
6037 pp_not_found:
6038 /* If there was a hash table miss, create a new port. */
6039 ret = 1;
6040 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
6041 goto fail_unlock;
6042
6043 /* In either case (hit or miss), make sure fastreuse is 1 only
6044 * if sk->sk_reuse is too (that is, if the caller requested
6045 * SO_REUSEADDR on this socket -sk-).
6046 */
6047 if (hlist_empty(&pp->owner)) {
6048 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
6049 pp->fastreuse = 1;
6050 else
6051 pp->fastreuse = 0;
6052 } else if (pp->fastreuse &&
6053 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
6054 pp->fastreuse = 0;
6055
6056 /* We are set, so fill up all the data in the hash table
6057 * entry, tie the socket list information with the rest of the
6058 * sockets FIXME: Blurry, NPI (ipg).
6059 */
6060 success:
6061 if (!sctp_sk(sk)->bind_hash) {
6062 inet_sk(sk)->inet_num = snum;
6063 sk_add_bind_node(sk, &pp->owner);
6064 sctp_sk(sk)->bind_hash = pp;
6065 }
6066 ret = 0;
6067
6068 fail_unlock:
6069 spin_unlock(&head->lock);
6070
6071 fail:
6072 local_bh_enable();
6073 return ret;
6074 }
6075
6076 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
6077 * port is requested.
6078 */
6079 static int sctp_get_port(struct sock *sk, unsigned short snum)
6080 {
6081 union sctp_addr addr;
6082 struct sctp_af *af = sctp_sk(sk)->pf->af;
6083
6084 /* Set up a dummy address struct from the sk. */
6085 af->from_sk(&addr, sk);
6086 addr.v4.sin_port = htons(snum);
6087
6088 /* Note: sk->sk_num gets filled in if ephemeral port request. */
6089 return !!sctp_get_port_local(sk, &addr);
6090 }
6091
6092 /*
6093 * Move a socket to LISTENING state.
6094 */
6095 static int sctp_listen_start(struct sock *sk, int backlog)
6096 {
6097 struct sctp_sock *sp = sctp_sk(sk);
6098 struct sctp_endpoint *ep = sp->ep;
6099 struct crypto_hash *tfm = NULL;
6100 char alg[32];
6101
6102 /* Allocate HMAC for generating cookie. */
6103 if (!sp->hmac && sp->sctp_hmac_alg) {
6104 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
6105 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
6106 if (IS_ERR(tfm)) {
6107 net_info_ratelimited("failed to load transform for %s: %ld\n",
6108 sp->sctp_hmac_alg, PTR_ERR(tfm));
6109 return -ENOSYS;
6110 }
6111 sctp_sk(sk)->hmac = tfm;
6112 }
6113
6114 /*
6115 * If a bind() or sctp_bindx() is not called prior to a listen()
6116 * call that allows new associations to be accepted, the system
6117 * picks an ephemeral port and will choose an address set equivalent
6118 * to binding with a wildcard address.
6119 *
6120 * This is not currently spelled out in the SCTP sockets
6121 * extensions draft, but follows the practice as seen in TCP
6122 * sockets.
6123 *
6124 */
6125 sk->sk_state = SCTP_SS_LISTENING;
6126 if (!ep->base.bind_addr.port) {
6127 if (sctp_autobind(sk))
6128 return -EAGAIN;
6129 } else {
6130 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
6131 sk->sk_state = SCTP_SS_CLOSED;
6132 return -EADDRINUSE;
6133 }
6134 }
6135
6136 sk->sk_max_ack_backlog = backlog;
6137 sctp_hash_endpoint(ep);
6138 return 0;
6139 }
6140
6141 /*
6142 * 4.1.3 / 5.1.3 listen()
6143 *
6144 * By default, new associations are not accepted for UDP style sockets.
6145 * An application uses listen() to mark a socket as being able to
6146 * accept new associations.
6147 *
6148 * On TCP style sockets, applications use listen() to ready the SCTP
6149 * endpoint for accepting inbound associations.
6150 *
6151 * On both types of endpoints a backlog of '0' disables listening.
6152 *
6153 * Move a socket to LISTENING state.
6154 */
6155 int sctp_inet_listen(struct socket *sock, int backlog)
6156 {
6157 struct sock *sk = sock->sk;
6158 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6159 int err = -EINVAL;
6160
6161 if (unlikely(backlog < 0))
6162 return err;
6163
6164 lock_sock(sk);
6165
6166 /* Peeled-off sockets are not allowed to listen(). */
6167 if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
6168 goto out;
6169
6170 if (sock->state != SS_UNCONNECTED)
6171 goto out;
6172
6173 /* If backlog is zero, disable listening. */
6174 if (!backlog) {
6175 if (sctp_sstate(sk, CLOSED))
6176 goto out;
6177
6178 err = 0;
6179 sctp_unhash_endpoint(ep);
6180 sk->sk_state = SCTP_SS_CLOSED;
6181 if (sk->sk_reuse)
6182 sctp_sk(sk)->bind_hash->fastreuse = 1;
6183 goto out;
6184 }
6185
6186 /* If we are already listening, just update the backlog */
6187 if (sctp_sstate(sk, LISTENING))
6188 sk->sk_max_ack_backlog = backlog;
6189 else {
6190 err = sctp_listen_start(sk, backlog);
6191 if (err)
6192 goto out;
6193 }
6194
6195 err = 0;
6196 out:
6197 release_sock(sk);
6198 return err;
6199 }
6200
6201 /*
6202 * This function is done by modeling the current datagram_poll() and the
6203 * tcp_poll(). Note that, based on these implementations, we don't
6204 * lock the socket in this function, even though it seems that,
6205 * ideally, locking or some other mechanisms can be used to ensure
6206 * the integrity of the counters (sndbuf and wmem_alloc) used
6207 * in this place. We assume that we don't need locks either until proven
6208 * otherwise.
6209 *
6210 * Another thing to note is that we include the Async I/O support
6211 * here, again, by modeling the current TCP/UDP code. We don't have
6212 * a good way to test with it yet.
6213 */
6214 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6215 {
6216 struct sock *sk = sock->sk;
6217 struct sctp_sock *sp = sctp_sk(sk);
6218 unsigned int mask;
6219
6220 poll_wait(file, sk_sleep(sk), wait);
6221
6222 /* A TCP-style listening socket becomes readable when the accept queue
6223 * is not empty.
6224 */
6225 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
6226 return (!list_empty(&sp->ep->asocs)) ?
6227 (POLLIN | POLLRDNORM) : 0;
6228
6229 mask = 0;
6230
6231 /* Is there any exceptional events? */
6232 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
6233 mask |= POLLERR |
6234 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
6235 if (sk->sk_shutdown & RCV_SHUTDOWN)
6236 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
6237 if (sk->sk_shutdown == SHUTDOWN_MASK)
6238 mask |= POLLHUP;
6239
6240 /* Is it readable? Reconsider this code with TCP-style support. */
6241 if (!skb_queue_empty(&sk->sk_receive_queue))
6242 mask |= POLLIN | POLLRDNORM;
6243
6244 /* The association is either gone or not ready. */
6245 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
6246 return mask;
6247
6248 /* Is it writable? */
6249 if (sctp_writeable(sk)) {
6250 mask |= POLLOUT | POLLWRNORM;
6251 } else {
6252 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
6253 /*
6254 * Since the socket is not locked, the buffer
6255 * might be made available after the writeable check and
6256 * before the bit is set. This could cause a lost I/O
6257 * signal. tcp_poll() has a race breaker for this race
6258 * condition. Based on their implementation, we put
6259 * in the following code to cover it as well.
6260 */
6261 if (sctp_writeable(sk))
6262 mask |= POLLOUT | POLLWRNORM;
6263 }
6264 return mask;
6265 }
6266
6267 /********************************************************************
6268 * 2nd Level Abstractions
6269 ********************************************************************/
6270
6271 static struct sctp_bind_bucket *sctp_bucket_create(
6272 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
6273 {
6274 struct sctp_bind_bucket *pp;
6275
6276 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
6277 if (pp) {
6278 SCTP_DBG_OBJCNT_INC(bind_bucket);
6279 pp->port = snum;
6280 pp->fastreuse = 0;
6281 INIT_HLIST_HEAD(&pp->owner);
6282 pp->net = net;
6283 hlist_add_head(&pp->node, &head->chain);
6284 }
6285 return pp;
6286 }
6287
6288 /* Caller must hold hashbucket lock for this tb with local BH disabled */
6289 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
6290 {
6291 if (pp && hlist_empty(&pp->owner)) {
6292 __hlist_del(&pp->node);
6293 kmem_cache_free(sctp_bucket_cachep, pp);
6294 SCTP_DBG_OBJCNT_DEC(bind_bucket);
6295 }
6296 }
6297
6298 /* Release this socket's reference to a local port. */
6299 static inline void __sctp_put_port(struct sock *sk)
6300 {
6301 struct sctp_bind_hashbucket *head =
6302 &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
6303 inet_sk(sk)->inet_num)];
6304 struct sctp_bind_bucket *pp;
6305
6306 spin_lock(&head->lock);
6307 pp = sctp_sk(sk)->bind_hash;
6308 __sk_del_bind_node(sk);
6309 sctp_sk(sk)->bind_hash = NULL;
6310 inet_sk(sk)->inet_num = 0;
6311 sctp_bucket_destroy(pp);
6312 spin_unlock(&head->lock);
6313 }
6314
6315 void sctp_put_port(struct sock *sk)
6316 {
6317 local_bh_disable();
6318 __sctp_put_port(sk);
6319 local_bh_enable();
6320 }
6321
6322 /*
6323 * The system picks an ephemeral port and choose an address set equivalent
6324 * to binding with a wildcard address.
6325 * One of those addresses will be the primary address for the association.
6326 * This automatically enables the multihoming capability of SCTP.
6327 */
6328 static int sctp_autobind(struct sock *sk)
6329 {
6330 union sctp_addr autoaddr;
6331 struct sctp_af *af;
6332 __be16 port;
6333
6334 /* Initialize a local sockaddr structure to INADDR_ANY. */
6335 af = sctp_sk(sk)->pf->af;
6336
6337 port = htons(inet_sk(sk)->inet_num);
6338 af->inaddr_any(&autoaddr, port);
6339
6340 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
6341 }
6342
6343 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
6344 *
6345 * From RFC 2292
6346 * 4.2 The cmsghdr Structure *
6347 *
6348 * When ancillary data is sent or received, any number of ancillary data
6349 * objects can be specified by the msg_control and msg_controllen members of
6350 * the msghdr structure, because each object is preceded by
6351 * a cmsghdr structure defining the object's length (the cmsg_len member).
6352 * Historically Berkeley-derived implementations have passed only one object
6353 * at a time, but this API allows multiple objects to be
6354 * passed in a single call to sendmsg() or recvmsg(). The following example
6355 * shows two ancillary data objects in a control buffer.
6356 *
6357 * |<--------------------------- msg_controllen -------------------------->|
6358 * | |
6359 *
6360 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
6361 *
6362 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
6363 * | | |
6364 *
6365 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
6366 *
6367 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
6368 * | | | | |
6369 *
6370 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
6371 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
6372 *
6373 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
6374 *
6375 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
6376 * ^
6377 * |
6378 *
6379 * msg_control
6380 * points here
6381 */
6382 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
6383 {
6384 struct cmsghdr *cmsg;
6385 struct msghdr *my_msg = (struct msghdr *)msg;
6386
6387 for (cmsg = CMSG_FIRSTHDR(msg);
6388 cmsg != NULL;
6389 cmsg = CMSG_NXTHDR(my_msg, cmsg)) {
6390 if (!CMSG_OK(my_msg, cmsg))
6391 return -EINVAL;
6392
6393 /* Should we parse this header or ignore? */
6394 if (cmsg->cmsg_level != IPPROTO_SCTP)
6395 continue;
6396
6397 /* Strictly check lengths following example in SCM code. */
6398 switch (cmsg->cmsg_type) {
6399 case SCTP_INIT:
6400 /* SCTP Socket API Extension
6401 * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
6402 *
6403 * This cmsghdr structure provides information for
6404 * initializing new SCTP associations with sendmsg().
6405 * The SCTP_INITMSG socket option uses this same data
6406 * structure. This structure is not used for
6407 * recvmsg().
6408 *
6409 * cmsg_level cmsg_type cmsg_data[]
6410 * ------------ ------------ ----------------------
6411 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
6412 */
6413 if (cmsg->cmsg_len !=
6414 CMSG_LEN(sizeof(struct sctp_initmsg)))
6415 return -EINVAL;
6416 cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg);
6417 break;
6418
6419 case SCTP_SNDRCV:
6420 /* SCTP Socket API Extension
6421 * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV)
6422 *
6423 * This cmsghdr structure specifies SCTP options for
6424 * sendmsg() and describes SCTP header information
6425 * about a received message through recvmsg().
6426 *
6427 * cmsg_level cmsg_type cmsg_data[]
6428 * ------------ ------------ ----------------------
6429 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
6430 */
6431 if (cmsg->cmsg_len !=
6432 CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
6433 return -EINVAL;
6434
6435 cmsgs->info =
6436 (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
6437
6438 /* Minimally, validate the sinfo_flags. */
6439 if (cmsgs->info->sinfo_flags &
6440 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
6441 SCTP_ABORT | SCTP_EOF))
6442 return -EINVAL;
6443 break;
6444
6445 default:
6446 return -EINVAL;
6447 }
6448 }
6449 return 0;
6450 }
6451
6452 /*
6453 * Wait for a packet..
6454 * Note: This function is the same function as in core/datagram.c
6455 * with a few modifications to make lksctp work.
6456 */
6457 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
6458 {
6459 int error;
6460 DEFINE_WAIT(wait);
6461
6462 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
6463
6464 /* Socket errors? */
6465 error = sock_error(sk);
6466 if (error)
6467 goto out;
6468
6469 if (!skb_queue_empty(&sk->sk_receive_queue))
6470 goto ready;
6471
6472 /* Socket shut down? */
6473 if (sk->sk_shutdown & RCV_SHUTDOWN)
6474 goto out;
6475
6476 /* Sequenced packets can come disconnected. If so we report the
6477 * problem.
6478 */
6479 error = -ENOTCONN;
6480
6481 /* Is there a good reason to think that we may receive some data? */
6482 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
6483 goto out;
6484
6485 /* Handle signals. */
6486 if (signal_pending(current))
6487 goto interrupted;
6488
6489 /* Let another process have a go. Since we are going to sleep
6490 * anyway. Note: This may cause odd behaviors if the message
6491 * does not fit in the user's buffer, but this seems to be the
6492 * only way to honor MSG_DONTWAIT realistically.
6493 */
6494 release_sock(sk);
6495 *timeo_p = schedule_timeout(*timeo_p);
6496 lock_sock(sk);
6497
6498 ready:
6499 finish_wait(sk_sleep(sk), &wait);
6500 return 0;
6501
6502 interrupted:
6503 error = sock_intr_errno(*timeo_p);
6504
6505 out:
6506 finish_wait(sk_sleep(sk), &wait);
6507 *err = error;
6508 return error;
6509 }
6510
6511 /* Receive a datagram.
6512 * Note: This is pretty much the same routine as in core/datagram.c
6513 * with a few changes to make lksctp work.
6514 */
6515 static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
6516 int noblock, int *err)
6517 {
6518 int error;
6519 struct sk_buff *skb;
6520 long timeo;
6521
6522 timeo = sock_rcvtimeo(sk, noblock);
6523
6524 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
6525 MAX_SCHEDULE_TIMEOUT);
6526
6527 do {
6528 /* Again only user level code calls this function,
6529 * so nothing interrupt level
6530 * will suddenly eat the receive_queue.
6531 *
6532 * Look at current nfs client by the way...
6533 * However, this function was correct in any case. 8)
6534 */
6535 if (flags & MSG_PEEK) {
6536 spin_lock_bh(&sk->sk_receive_queue.lock);
6537 skb = skb_peek(&sk->sk_receive_queue);
6538 if (skb)
6539 atomic_inc(&skb->users);
6540 spin_unlock_bh(&sk->sk_receive_queue.lock);
6541 } else {
6542 skb = skb_dequeue(&sk->sk_receive_queue);
6543 }
6544
6545 if (skb)
6546 return skb;
6547
6548 /* Caller is allowed not to check sk->sk_err before calling. */
6549 error = sock_error(sk);
6550 if (error)
6551 goto no_packet;
6552
6553 if (sk->sk_shutdown & RCV_SHUTDOWN)
6554 break;
6555
6556 /* User doesn't want to wait. */
6557 error = -EAGAIN;
6558 if (!timeo)
6559 goto no_packet;
6560 } while (sctp_wait_for_packet(sk, err, &timeo) == 0);
6561
6562 return NULL;
6563
6564 no_packet:
6565 *err = error;
6566 return NULL;
6567 }
6568
6569 /* If sndbuf has changed, wake up per association sndbuf waiters. */
6570 static void __sctp_write_space(struct sctp_association *asoc)
6571 {
6572 struct sock *sk = asoc->base.sk;
6573 struct socket *sock = sk->sk_socket;
6574
6575 if ((sctp_wspace(asoc) > 0) && sock) {
6576 if (waitqueue_active(&asoc->wait))
6577 wake_up_interruptible(&asoc->wait);
6578
6579 if (sctp_writeable(sk)) {
6580 wait_queue_head_t *wq = sk_sleep(sk);
6581
6582 if (wq && waitqueue_active(wq))
6583 wake_up_interruptible(wq);
6584
6585 /* Note that we try to include the Async I/O support
6586 * here by modeling from the current TCP/UDP code.
6587 * We have not tested with it yet.
6588 */
6589 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
6590 sock_wake_async(sock,
6591 SOCK_WAKE_SPACE, POLL_OUT);
6592 }
6593 }
6594 }
6595
6596 static void sctp_wake_up_waiters(struct sock *sk,
6597 struct sctp_association *asoc)
6598 {
6599 struct sctp_association *tmp = asoc;
6600
6601 /* We do accounting for the sndbuf space per association,
6602 * so we only need to wake our own association.
6603 */
6604 if (asoc->ep->sndbuf_policy)
6605 return __sctp_write_space(asoc);
6606
6607 /* If association goes down and is just flushing its
6608 * outq, then just normally notify others.
6609 */
6610 if (asoc->base.dead)
6611 return sctp_write_space(sk);
6612
6613 /* Accounting for the sndbuf space is per socket, so we
6614 * need to wake up others, try to be fair and in case of
6615 * other associations, let them have a go first instead
6616 * of just doing a sctp_write_space() call.
6617 *
6618 * Note that we reach sctp_wake_up_waiters() only when
6619 * associations free up queued chunks, thus we are under
6620 * lock and the list of associations on a socket is
6621 * guaranteed not to change.
6622 */
6623 for (tmp = list_next_entry(tmp, asocs); 1;
6624 tmp = list_next_entry(tmp, asocs)) {
6625 /* Manually skip the head element. */
6626 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
6627 continue;
6628 /* Wake up association. */
6629 __sctp_write_space(tmp);
6630 /* We've reached the end. */
6631 if (tmp == asoc)
6632 break;
6633 }
6634 }
6635
6636 /* Do accounting for the sndbuf space.
6637 * Decrement the used sndbuf space of the corresponding association by the
6638 * data size which was just transmitted(freed).
6639 */
6640 static void sctp_wfree(struct sk_buff *skb)
6641 {
6642 struct sctp_association *asoc;
6643 struct sctp_chunk *chunk;
6644 struct sock *sk;
6645
6646 /* Get the saved chunk pointer. */
6647 chunk = *((struct sctp_chunk **)(skb->cb));
6648 asoc = chunk->asoc;
6649 sk = asoc->base.sk;
6650 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
6651 sizeof(struct sk_buff) +
6652 sizeof(struct sctp_chunk);
6653
6654 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
6655
6656 /*
6657 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
6658 */
6659 sk->sk_wmem_queued -= skb->truesize;
6660 sk_mem_uncharge(sk, skb->truesize);
6661
6662 sock_wfree(skb);
6663 sctp_wake_up_waiters(sk, asoc);
6664
6665 sctp_association_put(asoc);
6666 }
6667
6668 /* Do accounting for the receive space on the socket.
6669 * Accounting for the association is done in ulpevent.c
6670 * We set this as a destructor for the cloned data skbs so that
6671 * accounting is done at the correct time.
6672 */
6673 void sctp_sock_rfree(struct sk_buff *skb)
6674 {
6675 struct sock *sk = skb->sk;
6676 struct sctp_ulpevent *event = sctp_skb2event(skb);
6677
6678 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
6679
6680 /*
6681 * Mimic the behavior of sock_rfree
6682 */
6683 sk_mem_uncharge(sk, event->rmem_len);
6684 }
6685
6686
6687 /* Helper function to wait for space in the sndbuf. */
6688 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
6689 size_t msg_len)
6690 {
6691 struct sock *sk = asoc->base.sk;
6692 int err = 0;
6693 long current_timeo = *timeo_p;
6694 DEFINE_WAIT(wait);
6695
6696 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
6697 *timeo_p, msg_len);
6698
6699 /* Increment the association's refcnt. */
6700 sctp_association_hold(asoc);
6701
6702 /* Wait on the association specific sndbuf space. */
6703 for (;;) {
6704 prepare_to_wait_exclusive(&asoc->wait, &wait,
6705 TASK_INTERRUPTIBLE);
6706 if (!*timeo_p)
6707 goto do_nonblock;
6708 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
6709 asoc->base.dead)
6710 goto do_error;
6711 if (signal_pending(current))
6712 goto do_interrupted;
6713 if (msg_len <= sctp_wspace(asoc))
6714 break;
6715
6716 /* Let another process have a go. Since we are going
6717 * to sleep anyway.
6718 */
6719 release_sock(sk);
6720 current_timeo = schedule_timeout(current_timeo);
6721 BUG_ON(sk != asoc->base.sk);
6722 lock_sock(sk);
6723
6724 *timeo_p = current_timeo;
6725 }
6726
6727 out:
6728 finish_wait(&asoc->wait, &wait);
6729
6730 /* Release the association's refcnt. */
6731 sctp_association_put(asoc);
6732
6733 return err;
6734
6735 do_error:
6736 err = -EPIPE;
6737 goto out;
6738
6739 do_interrupted:
6740 err = sock_intr_errno(*timeo_p);
6741 goto out;
6742
6743 do_nonblock:
6744 err = -EAGAIN;
6745 goto out;
6746 }
6747
6748 void sctp_data_ready(struct sock *sk)
6749 {
6750 struct socket_wq *wq;
6751
6752 rcu_read_lock();
6753 wq = rcu_dereference(sk->sk_wq);
6754 if (wq_has_sleeper(wq))
6755 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
6756 POLLRDNORM | POLLRDBAND);
6757 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6758 rcu_read_unlock();
6759 }
6760
6761 /* If socket sndbuf has changed, wake up all per association waiters. */
6762 void sctp_write_space(struct sock *sk)
6763 {
6764 struct sctp_association *asoc;
6765
6766 /* Wake up the tasks in each wait queue. */
6767 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
6768 __sctp_write_space(asoc);
6769 }
6770 }
6771
6772 /* Is there any sndbuf space available on the socket?
6773 *
6774 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
6775 * associations on the same socket. For a UDP-style socket with
6776 * multiple associations, it is possible for it to be "unwriteable"
6777 * prematurely. I assume that this is acceptable because
6778 * a premature "unwriteable" is better than an accidental "writeable" which
6779 * would cause an unwanted block under certain circumstances. For the 1-1
6780 * UDP-style sockets or TCP-style sockets, this code should work.
6781 * - Daisy
6782 */
6783 static int sctp_writeable(struct sock *sk)
6784 {
6785 int amt = 0;
6786
6787 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
6788 if (amt < 0)
6789 amt = 0;
6790 return amt;
6791 }
6792
6793 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
6794 * returns immediately with EINPROGRESS.
6795 */
6796 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
6797 {
6798 struct sock *sk = asoc->base.sk;
6799 int err = 0;
6800 long current_timeo = *timeo_p;
6801 DEFINE_WAIT(wait);
6802
6803 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
6804
6805 /* Increment the association's refcnt. */
6806 sctp_association_hold(asoc);
6807
6808 for (;;) {
6809 prepare_to_wait_exclusive(&asoc->wait, &wait,
6810 TASK_INTERRUPTIBLE);
6811 if (!*timeo_p)
6812 goto do_nonblock;
6813 if (sk->sk_shutdown & RCV_SHUTDOWN)
6814 break;
6815 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
6816 asoc->base.dead)
6817 goto do_error;
6818 if (signal_pending(current))
6819 goto do_interrupted;
6820
6821 if (sctp_state(asoc, ESTABLISHED))
6822 break;
6823
6824 /* Let another process have a go. Since we are going
6825 * to sleep anyway.
6826 */
6827 release_sock(sk);
6828 current_timeo = schedule_timeout(current_timeo);
6829 lock_sock(sk);
6830
6831 *timeo_p = current_timeo;
6832 }
6833
6834 out:
6835 finish_wait(&asoc->wait, &wait);
6836
6837 /* Release the association's refcnt. */
6838 sctp_association_put(asoc);
6839
6840 return err;
6841
6842 do_error:
6843 if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
6844 err = -ETIMEDOUT;
6845 else
6846 err = -ECONNREFUSED;
6847 goto out;
6848
6849 do_interrupted:
6850 err = sock_intr_errno(*timeo_p);
6851 goto out;
6852
6853 do_nonblock:
6854 err = -EINPROGRESS;
6855 goto out;
6856 }
6857
6858 static int sctp_wait_for_accept(struct sock *sk, long timeo)
6859 {
6860 struct sctp_endpoint *ep;
6861 int err = 0;
6862 DEFINE_WAIT(wait);
6863
6864 ep = sctp_sk(sk)->ep;
6865
6866
6867 for (;;) {
6868 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
6869 TASK_INTERRUPTIBLE);
6870
6871 if (list_empty(&ep->asocs)) {
6872 release_sock(sk);
6873 timeo = schedule_timeout(timeo);
6874 lock_sock(sk);
6875 }
6876
6877 err = -EINVAL;
6878 if (!sctp_sstate(sk, LISTENING))
6879 break;
6880
6881 err = 0;
6882 if (!list_empty(&ep->asocs))
6883 break;
6884
6885 err = sock_intr_errno(timeo);
6886 if (signal_pending(current))
6887 break;
6888
6889 err = -EAGAIN;
6890 if (!timeo)
6891 break;
6892 }
6893
6894 finish_wait(sk_sleep(sk), &wait);
6895
6896 return err;
6897 }
6898
6899 static void sctp_wait_for_close(struct sock *sk, long timeout)
6900 {
6901 DEFINE_WAIT(wait);
6902
6903 do {
6904 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
6905 if (list_empty(&sctp_sk(sk)->ep->asocs))
6906 break;
6907 release_sock(sk);
6908 timeout = schedule_timeout(timeout);
6909 lock_sock(sk);
6910 } while (!signal_pending(current) && timeout);
6911
6912 finish_wait(sk_sleep(sk), &wait);
6913 }
6914
6915 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
6916 {
6917 struct sk_buff *frag;
6918
6919 if (!skb->data_len)
6920 goto done;
6921
6922 /* Don't forget the fragments. */
6923 skb_walk_frags(skb, frag)
6924 sctp_skb_set_owner_r_frag(frag, sk);
6925
6926 done:
6927 sctp_skb_set_owner_r(skb, sk);
6928 }
6929
6930 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
6931 struct sctp_association *asoc)
6932 {
6933 struct inet_sock *inet = inet_sk(sk);
6934 struct inet_sock *newinet;
6935
6936 newsk->sk_type = sk->sk_type;
6937 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
6938 newsk->sk_flags = sk->sk_flags;
6939 newsk->sk_no_check = sk->sk_no_check;
6940 newsk->sk_reuse = sk->sk_reuse;
6941
6942 newsk->sk_shutdown = sk->sk_shutdown;
6943 newsk->sk_destruct = sctp_destruct_sock;
6944 newsk->sk_family = sk->sk_family;
6945 newsk->sk_protocol = IPPROTO_SCTP;
6946 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
6947 newsk->sk_sndbuf = sk->sk_sndbuf;
6948 newsk->sk_rcvbuf = sk->sk_rcvbuf;
6949 newsk->sk_lingertime = sk->sk_lingertime;
6950 newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
6951 newsk->sk_sndtimeo = sk->sk_sndtimeo;
6952
6953 newinet = inet_sk(newsk);
6954
6955 /* Initialize sk's sport, dport, rcv_saddr and daddr for
6956 * getsockname() and getpeername()
6957 */
6958 newinet->inet_sport = inet->inet_sport;
6959 newinet->inet_saddr = inet->inet_saddr;
6960 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
6961 newinet->inet_dport = htons(asoc->peer.port);
6962 newinet->pmtudisc = inet->pmtudisc;
6963 newinet->inet_id = asoc->next_tsn ^ jiffies;
6964
6965 newinet->uc_ttl = inet->uc_ttl;
6966 newinet->mc_loop = 1;
6967 newinet->mc_ttl = 1;
6968 newinet->mc_index = 0;
6969 newinet->mc_list = NULL;
6970 }
6971
6972 /* Populate the fields of the newsk from the oldsk and migrate the assoc
6973 * and its messages to the newsk.
6974 */
6975 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6976 struct sctp_association *assoc,
6977 sctp_socket_type_t type)
6978 {
6979 struct sctp_sock *oldsp = sctp_sk(oldsk);
6980 struct sctp_sock *newsp = sctp_sk(newsk);
6981 struct sctp_bind_bucket *pp; /* hash list port iterator */
6982 struct sctp_endpoint *newep = newsp->ep;
6983 struct sk_buff *skb, *tmp;
6984 struct sctp_ulpevent *event;
6985 struct sctp_bind_hashbucket *head;
6986 struct list_head tmplist;
6987
6988 /* Migrate socket buffer sizes and all the socket level options to the
6989 * new socket.
6990 */
6991 newsk->sk_sndbuf = oldsk->sk_sndbuf;
6992 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
6993 /* Brute force copy old sctp opt. */
6994 if (oldsp->do_auto_asconf) {
6995 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
6996 inet_sk_copy_descendant(newsk, oldsk);
6997 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
6998 } else
6999 inet_sk_copy_descendant(newsk, oldsk);
7000
7001 /* Restore the ep value that was overwritten with the above structure
7002 * copy.
7003 */
7004 newsp->ep = newep;
7005 newsp->hmac = NULL;
7006
7007 /* Hook this new socket in to the bind_hash list. */
7008 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
7009 inet_sk(oldsk)->inet_num)];
7010 local_bh_disable();
7011 spin_lock(&head->lock);
7012 pp = sctp_sk(oldsk)->bind_hash;
7013 sk_add_bind_node(newsk, &pp->owner);
7014 sctp_sk(newsk)->bind_hash = pp;
7015 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
7016 spin_unlock(&head->lock);
7017 local_bh_enable();
7018
7019 /* Copy the bind_addr list from the original endpoint to the new
7020 * endpoint so that we can handle restarts properly
7021 */
7022 sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
7023 &oldsp->ep->base.bind_addr, GFP_KERNEL);
7024
7025 /* Move any messages in the old socket's receive queue that are for the
7026 * peeled off association to the new socket's receive queue.
7027 */
7028 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
7029 event = sctp_skb2event(skb);
7030 if (event->asoc == assoc) {
7031 __skb_unlink(skb, &oldsk->sk_receive_queue);
7032 __skb_queue_tail(&newsk->sk_receive_queue, skb);
7033 sctp_skb_set_owner_r_frag(skb, newsk);
7034 }
7035 }
7036
7037 /* Clean up any messages pending delivery due to partial
7038 * delivery. Three cases:
7039 * 1) No partial deliver; no work.
7040 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
7041 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
7042 */
7043 skb_queue_head_init(&newsp->pd_lobby);
7044 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
7045
7046 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
7047 struct sk_buff_head *queue;
7048
7049 /* Decide which queue to move pd_lobby skbs to. */
7050 if (assoc->ulpq.pd_mode) {
7051 queue = &newsp->pd_lobby;
7052 } else
7053 queue = &newsk->sk_receive_queue;
7054
7055 /* Walk through the pd_lobby, looking for skbs that
7056 * need moved to the new socket.
7057 */
7058 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
7059 event = sctp_skb2event(skb);
7060 if (event->asoc == assoc) {
7061 __skb_unlink(skb, &oldsp->pd_lobby);
7062 __skb_queue_tail(queue, skb);
7063 sctp_skb_set_owner_r_frag(skb, newsk);
7064 }
7065 }
7066
7067 /* Clear up any skbs waiting for the partial
7068 * delivery to finish.
7069 */
7070 if (assoc->ulpq.pd_mode)
7071 sctp_clear_pd(oldsk, NULL);
7072
7073 }
7074
7075 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
7076 sctp_skb_set_owner_r_frag(skb, newsk);
7077
7078 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
7079 sctp_skb_set_owner_r_frag(skb, newsk);
7080
7081 /* Set the type of socket to indicate that it is peeled off from the
7082 * original UDP-style socket or created with the accept() call on a
7083 * TCP-style socket..
7084 */
7085 newsp->type = type;
7086
7087 /* Mark the new socket "in-use" by the user so that any packets
7088 * that may arrive on the association after we've moved it are
7089 * queued to the backlog. This prevents a potential race between
7090 * backlog processing on the old socket and new-packet processing
7091 * on the new socket.
7092 *
7093 * The caller has just allocated newsk so we can guarantee that other
7094 * paths won't try to lock it and then oldsk.
7095 */
7096 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
7097 sctp_assoc_migrate(assoc, newsk);
7098
7099 /* If the association on the newsk is already closed before accept()
7100 * is called, set RCV_SHUTDOWN flag.
7101 */
7102 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
7103 newsk->sk_shutdown |= RCV_SHUTDOWN;
7104
7105 newsk->sk_state = SCTP_SS_ESTABLISHED;
7106 release_sock(newsk);
7107 }
7108
7109
7110 /* This proto struct describes the ULP interface for SCTP. */
7111 struct proto sctp_prot = {
7112 .name = "SCTP",
7113 .owner = THIS_MODULE,
7114 .close = sctp_close,
7115 .connect = sctp_connect,
7116 .disconnect = sctp_disconnect,
7117 .accept = sctp_accept,
7118 .ioctl = sctp_ioctl,
7119 .init = sctp_init_sock,
7120 .destroy = sctp_destroy_sock,
7121 .shutdown = sctp_shutdown,
7122 .setsockopt = sctp_setsockopt,
7123 .getsockopt = sctp_getsockopt,
7124 .sendmsg = sctp_sendmsg,
7125 .recvmsg = sctp_recvmsg,
7126 .bind = sctp_bind,
7127 .backlog_rcv = sctp_backlog_rcv,
7128 .hash = sctp_hash,
7129 .unhash = sctp_unhash,
7130 .get_port = sctp_get_port,
7131 .obj_size = sizeof(struct sctp_sock),
7132 .sysctl_mem = sysctl_sctp_mem,
7133 .sysctl_rmem = sysctl_sctp_rmem,
7134 .sysctl_wmem = sysctl_sctp_wmem,
7135 .memory_pressure = &sctp_memory_pressure,
7136 .enter_memory_pressure = sctp_enter_memory_pressure,
7137 .memory_allocated = &sctp_memory_allocated,
7138 .sockets_allocated = &sctp_sockets_allocated,
7139 };
7140
7141 #if IS_ENABLED(CONFIG_IPV6)
7142
7143 struct proto sctpv6_prot = {
7144 .name = "SCTPv6",
7145 .owner = THIS_MODULE,
7146 .close = sctp_close,
7147 .connect = sctp_connect,
7148 .disconnect = sctp_disconnect,
7149 .accept = sctp_accept,
7150 .ioctl = sctp_ioctl,
7151 .init = sctp_init_sock,
7152 .destroy = sctp_destroy_sock,
7153 .shutdown = sctp_shutdown,
7154 .setsockopt = sctp_setsockopt,
7155 .getsockopt = sctp_getsockopt,
7156 .sendmsg = sctp_sendmsg,
7157 .recvmsg = sctp_recvmsg,
7158 .bind = sctp_bind,
7159 .backlog_rcv = sctp_backlog_rcv,
7160 .hash = sctp_hash,
7161 .unhash = sctp_unhash,
7162 .get_port = sctp_get_port,
7163 .obj_size = sizeof(struct sctp6_sock),
7164 .sysctl_mem = sysctl_sctp_mem,
7165 .sysctl_rmem = sysctl_sctp_rmem,
7166 .sysctl_wmem = sysctl_sctp_wmem,
7167 .memory_pressure = &sctp_memory_pressure,
7168 .enter_memory_pressure = sctp_enter_memory_pressure,
7169 .memory_allocated = &sctp_memory_allocated,
7170 .sockets_allocated = &sctp_sockets_allocated,
7171 };
7172 #endif /* IS_ENABLED(CONFIG_IPV6) */
This page took 0.703847 seconds and 5 git commands to generate.