sctp: Make sysctl tunables per net
[deliverable/linux.git] / net / sctp / input.c
CommitLineData
60c778b2 1/* SCTP kernel implementation
1da177e4
LT
2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2001-2003 International Business Machines, Corp.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
60c778b2 9 * This file is part of the SCTP kernel implementation
1da177e4
LT
10 *
11 * These functions handle all input from the IP layer into SCTP.
12 *
60c778b2 13 * This SCTP implementation is free software;
1da177e4
LT
14 * you can redistribute it and/or modify it under the terms of
15 * the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
60c778b2 19 * This SCTP implementation is distributed in the hope that it
1da177e4
LT
20 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
21 * ************************
22 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
23 * See the GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with GNU CC; see the file COPYING. If not, write to
27 * the Free Software Foundation, 59 Temple Place - Suite 330,
28 * Boston, MA 02111-1307, USA.
29 *
30 * Please send any bug reports or fixes you make to the
31 * email address(es):
32 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 *
34 * Or submit a bug report through the following website:
35 * http://www.sf.net/projects/lksctp
36 *
37 * Written or modified by:
38 * La Monte H.P. Yarroll <piggy@acm.org>
39 * Karl Knutson <karl@athena.chicago.il.us>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Jon Grimm <jgrimm@us.ibm.com>
42 * Hui Huang <hui.huang@nokia.com>
43 * Daisy Chang <daisyc@us.ibm.com>
44 * Sridhar Samudrala <sri@us.ibm.com>
45 * Ardelle Fan <ardelle.fan@intel.com>
46 *
47 * Any bugs reported given to us we will try to fix... any fixes shared will
48 * be incorporated into the next SCTP release.
49 */
50
51#include <linux/types.h>
52#include <linux/list.h> /* For struct list_head */
53#include <linux/socket.h>
54#include <linux/ip.h>
55#include <linux/time.h> /* For struct timeval */
5a0e3ad6 56#include <linux/slab.h>
1da177e4
LT
57#include <net/ip.h>
58#include <net/icmp.h>
59#include <net/snmp.h>
60#include <net/sock.h>
61#include <net/xfrm.h>
62#include <net/sctp/sctp.h>
63#include <net/sctp/sm.h>
9ad0977f 64#include <net/sctp/checksum.h>
dcfc23ca 65#include <net/net_namespace.h>
1da177e4
LT
66
67/* Forward declarations for internal helpers. */
68static int sctp_rcv_ootb(struct sk_buff *);
4110cc25
EB
69static struct sctp_association *__sctp_rcv_lookup(struct net *net,
70 struct sk_buff *skb,
1da177e4
LT
71 const union sctp_addr *laddr,
72 const union sctp_addr *paddr,
73 struct sctp_transport **transportp);
4cdadcbc
EB
74static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
75 const union sctp_addr *laddr);
1da177e4 76static struct sctp_association *__sctp_lookup_association(
4110cc25 77 struct net *net,
1da177e4
LT
78 const union sctp_addr *local,
79 const union sctp_addr *peer,
80 struct sctp_transport **pt);
81
50b1a782 82static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
61c9fed4 83
1da177e4
LT
84
85/* Calculate the SCTP checksum of an SCTP packet. */
b01a2407 86static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
1da177e4 87{
2c0fd387 88 struct sctphdr *sh = sctp_hdr(skb);
4458f04c 89 __le32 cmp = sh->checksum;
1b003be3 90 struct sk_buff *list;
4458f04c
VY
91 __le32 val;
92 __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
1da177e4 93
1b003be3 94 skb_walk_frags(skb, list)
4458f04c
VY
95 tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
96 tmp);
1da177e4 97
4458f04c 98 val = sctp_end_cksum(tmp);
1da177e4
LT
99
100 if (val != cmp) {
101 /* CRC failure, dump it. */
b01a2407 102 SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
1da177e4
LT
103 return -1;
104 }
105 return 0;
106}
107
79af02c2
DM
108struct sctp_input_cb {
109 union {
110 struct inet_skb_parm h4;
dfd56b8b 111#if IS_ENABLED(CONFIG_IPV6)
79af02c2
DM
112 struct inet6_skb_parm h6;
113#endif
114 } header;
115 struct sctp_chunk *chunk;
116};
117#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
118
1da177e4
LT
119/*
120 * This is the routine which IP calls when receiving an SCTP packet.
121 */
122int sctp_rcv(struct sk_buff *skb)
123{
124 struct sock *sk;
125 struct sctp_association *asoc;
126 struct sctp_endpoint *ep = NULL;
127 struct sctp_ep_common *rcvr;
128 struct sctp_transport *transport = NULL;
129 struct sctp_chunk *chunk;
130 struct sctphdr *sh;
131 union sctp_addr src;
132 union sctp_addr dest;
133 int family;
134 struct sctp_af *af;
4cdadcbc 135 struct net *net = dev_net(skb->dev);
1da177e4
LT
136
137 if (skb->pkt_type!=PACKET_HOST)
138 goto discard_it;
139
b01a2407 140 SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
1da177e4 141
28cd7752
HX
142 if (skb_linearize(skb))
143 goto discard_it;
144
2c0fd387 145 sh = sctp_hdr(skb);
1da177e4
LT
146
147 /* Pull up the IP and SCTP headers. */
ea2ae17d 148 __skb_pull(skb, skb_transport_offset(skb));
1da177e4
LT
149 if (skb->len < sizeof(struct sctphdr))
150 goto discard_it;
06e86806 151 if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
b01a2407 152 sctp_rcv_checksum(net, skb) < 0)
1da177e4
LT
153 goto discard_it;
154
155 skb_pull(skb, sizeof(struct sctphdr));
156
157 /* Make sure we at least have chunk headers worth of data left. */
158 if (skb->len < sizeof(struct sctp_chunkhdr))
159 goto discard_it;
160
eddc9ec5 161 family = ipver2af(ip_hdr(skb)->version);
1da177e4
LT
162 af = sctp_get_af_specific(family);
163 if (unlikely(!af))
164 goto discard_it;
165
166 /* Initialize local addresses for lookups. */
167 af->from_skb(&src, skb, 1);
168 af->from_skb(&dest, skb, 0);
169
170 /* If the packet is to or from a non-unicast address,
171 * silently discard the packet.
172 *
173 * This is not clearly defined in the RFC except in section
174 * 8.4 - OOTB handling. However, based on the book "Stream Control
175 * Transmission Protocol" 2.1, "It is important to note that the
176 * IP address of an SCTP transport address must be a routable
177 * unicast address. In other words, IP multicast addresses and
178 * IP broadcast addresses cannot be used in an SCTP transport
179 * address."
180 */
5636bef7
VY
181 if (!af->addr_valid(&src, NULL, skb) ||
182 !af->addr_valid(&dest, NULL, skb))
1da177e4
LT
183 goto discard_it;
184
4110cc25 185 asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
1c7d1fc1 186
0fd9a65a 187 if (!asoc)
4cdadcbc 188 ep = __sctp_rcv_lookup_endpoint(net, &dest);
0fd9a65a
NH
189
190 /* Retrieve the common input handling substructure. */
191 rcvr = asoc ? &asoc->base : &ep->base;
192 sk = rcvr->sk;
193
194 /*
195 * If a frame arrives on an interface and the receiving socket is
196 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
197 */
198 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
199 {
0fd9a65a
NH
200 if (asoc) {
201 sctp_association_put(asoc);
202 asoc = NULL;
203 } else {
204 sctp_endpoint_put(ep);
205 ep = NULL;
206 }
2ce95503 207 sk = net->sctp.ctl_sock;
0fd9a65a
NH
208 ep = sctp_sk(sk)->ep;
209 sctp_endpoint_hold(ep);
0fd9a65a
NH
210 rcvr = &ep->base;
211 }
212
1da177e4
LT
213 /*
214 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
215 * An SCTP packet is called an "out of the blue" (OOTB)
216 * packet if it is correctly formed, i.e., passed the
217 * receiver's checksum check, but the receiver is not
218 * able to identify the association to which this
219 * packet belongs.
220 */
221 if (!asoc) {
1da177e4 222 if (sctp_rcv_ootb(skb)) {
b01a2407 223 SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
1da177e4
LT
224 goto discard_release;
225 }
226 }
227
1da177e4
LT
228 if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
229 goto discard_release;
b59c2701 230 nf_reset(skb);
1da177e4 231
fda9ef5d 232 if (sk_filter(sk, skb))
d808ad9a 233 goto discard_release;
1da177e4
LT
234
235 /* Create an SCTP packet structure. */
236 chunk = sctp_chunkify(skb, asoc, sk);
2babf9da 237 if (!chunk)
1da177e4 238 goto discard_release;
79af02c2 239 SCTP_INPUT_CB(skb)->chunk = chunk;
1da177e4 240
1da177e4
LT
241 /* Remember what endpoint is to handle this packet. */
242 chunk->rcvr = rcvr;
243
244 /* Remember the SCTP header. */
245 chunk->sctp_hdr = sh;
246
247 /* Set the source and destination addresses of the incoming chunk. */
d55c41b1 248 sctp_init_addrs(chunk, &src, &dest);
1da177e4
LT
249
250 /* Remember where we came from. */
251 chunk->transport = transport;
252
253 /* Acquire access to the sock lock. Note: We are safe from other
254 * bottom halves on this lock, but a user may be in the lock too,
255 * so check if it is busy.
256 */
257 sctp_bh_lock_sock(sk);
258
ae53b5bd
VY
259 if (sk != rcvr->sk) {
260 /* Our cached sk is different from the rcvr->sk. This is
261 * because migrate()/accept() may have moved the association
262 * to a new socket and released all the sockets. So now we
263 * are holding a lock on the old socket while the user may
264 * be doing something with the new socket. Switch our veiw
265 * of the current sk.
266 */
267 sctp_bh_unlock_sock(sk);
268 sk = rcvr->sk;
269 sctp_bh_lock_sock(sk);
270 }
271
ac0b0462 272 if (sock_owned_by_user(sk)) {
50b1a782
ZY
273 if (sctp_add_backlog(sk, skb)) {
274 sctp_bh_unlock_sock(sk);
275 sctp_chunk_free(chunk);
276 skb = NULL; /* sctp_chunk_free already freed the skb */
277 goto discard_release;
278 }
b01a2407 279 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
ac0b0462 280 } else {
b01a2407 281 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
61c9fed4 282 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
ac0b0462 283 }
1da177e4 284
1da177e4 285 sctp_bh_unlock_sock(sk);
61c9fed4
VY
286
287 /* Release the asoc/ep ref we took in the lookup calls. */
288 if (asoc)
289 sctp_association_put(asoc);
290 else
291 sctp_endpoint_put(ep);
7a48f923 292
2babf9da 293 return 0;
1da177e4
LT
294
295discard_it:
b01a2407 296 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
1da177e4 297 kfree_skb(skb);
2babf9da 298 return 0;
1da177e4
LT
299
300discard_release:
61c9fed4 301 /* Release the asoc/ep ref we took in the lookup calls. */
0fd9a65a 302 if (asoc)
1da177e4 303 sctp_association_put(asoc);
0fd9a65a 304 else
1da177e4 305 sctp_endpoint_put(ep);
1da177e4
LT
306
307 goto discard_it;
308}
309
61c9fed4
VY
310/* Process the backlog queue of the socket. Every skb on
311 * the backlog holds a ref on an association or endpoint.
312 * We hold this ref throughout the state machine to make
313 * sure that the structure we need is still around.
1da177e4
LT
314 */
315int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
316{
79af02c2 317 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
d808ad9a
YH
318 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
319 struct sctp_ep_common *rcvr = NULL;
61c9fed4 320 int backloged = 0;
7a48f923 321
d808ad9a 322 rcvr = chunk->rcvr;
c4d2444e 323
61c9fed4
VY
324 /* If the rcvr is dead then the association or endpoint
325 * has been deleted and we can safely drop the chunk
326 * and refs that we are holding.
327 */
328 if (rcvr->dead) {
329 sctp_chunk_free(chunk);
330 goto done;
331 }
332
333 if (unlikely(rcvr->sk != sk)) {
334 /* In this case, the association moved from one socket to
335 * another. We are currently sitting on the backlog of the
336 * old socket, so we need to move.
337 * However, since we are here in the process context we
338 * need to take make sure that the user doesn't own
339 * the new socket when we process the packet.
340 * If the new socket is user-owned, queue the chunk to the
341 * backlog of the new socket without dropping any refs.
342 * Otherwise, we can safely push the chunk on the inqueue.
343 */
344
345 sk = rcvr->sk;
346 sctp_bh_lock_sock(sk);
347
348 if (sock_owned_by_user(sk)) {
f545a38f 349 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
50b1a782
ZY
350 sctp_chunk_free(chunk);
351 else
352 backloged = 1;
61c9fed4
VY
353 } else
354 sctp_inq_push(inqueue, chunk);
355
356 sctp_bh_unlock_sock(sk);
357
358 /* If the chunk was backloged again, don't drop refs */
359 if (backloged)
360 return 0;
361 } else {
362 sctp_inq_push(inqueue, chunk);
363 }
364
365done:
366 /* Release the refs we took in sctp_add_backlog */
367 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
368 sctp_association_put(sctp_assoc(rcvr));
369 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
370 sctp_endpoint_put(sctp_ep(rcvr));
371 else
372 BUG();
373
d808ad9a 374 return 0;
1da177e4
LT
375}
376
50b1a782 377static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
c4d2444e 378{
61c9fed4
VY
379 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
380 struct sctp_ep_common *rcvr = chunk->rcvr;
50b1a782 381 int ret;
c4d2444e 382
f545a38f 383 ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
50b1a782
ZY
384 if (!ret) {
385 /* Hold the assoc/ep while hanging on the backlog queue.
386 * This way, we know structures we need will not disappear
387 * from us
388 */
389 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
390 sctp_association_hold(sctp_assoc(rcvr));
391 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
392 sctp_endpoint_hold(sctp_ep(rcvr));
393 else
394 BUG();
395 }
396 return ret;
61c9fed4 397
c4d2444e
SS
398}
399
1da177e4
LT
400/* Handle icmp frag needed error. */
401void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
402 struct sctp_transport *t, __u32 pmtu)
403{
91bd6b1e 404 if (!t || (t->pathmtu <= pmtu))
52ccb8e9
FF
405 return;
406
8a479491
VY
407 if (sock_owned_by_user(sk)) {
408 asoc->pmtu_pending = 1;
409 t->pmtu_pending = 1;
410 return;
411 }
412
52ccb8e9 413 if (t->param_flags & SPP_PMTUD_ENABLE) {
c910b47e 414 /* Update transports view of the MTU */
02f3d4ce 415 sctp_transport_update_pmtu(sk, t, pmtu);
1da177e4 416
52ccb8e9 417 /* Update association pmtu. */
02f3d4ce 418 sctp_assoc_sync_pmtu(sk, asoc);
1da177e4 419 }
52ccb8e9
FF
420
421 /* Retransmit with the new pmtu setting.
422 * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
423 * Needed will never be sent, but if a message was sent before
424 * PMTU discovery was disabled that was larger than the PMTU, it
d808ad9a 425 * would not be fragmented, so it must be re-transmitted fragmented.
52ccb8e9
FF
426 */
427 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
1da177e4
LT
428}
429
ec18d9a2
DM
430void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
431 struct sk_buff *skb)
55be7a9c
DM
432{
433 struct dst_entry *dst;
434
435 if (!t)
436 return;
437 dst = sctp_transport_dst_check(t);
1ed5c48f 438 if (dst)
6700c270 439 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
440}
441
1da177e4
LT
442/*
443 * SCTP Implementer's Guide, 2.37 ICMP handling procedures
444 *
445 * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
446 * or a "Protocol Unreachable" treat this message as an abort
447 * with the T bit set.
448 *
449 * This function sends an event to the state machine, which will abort the
450 * association.
451 *
452 */
453void sctp_icmp_proto_unreachable(struct sock *sk,
d808ad9a
YH
454 struct sctp_association *asoc,
455 struct sctp_transport *t)
1da177e4 456{
0dc47877 457 SCTP_DEBUG_PRINTK("%s\n", __func__);
1da177e4 458
50b5d6ad
VY
459 if (sock_owned_by_user(sk)) {
460 if (timer_pending(&t->proto_unreach_timer))
461 return;
462 else {
463 if (!mod_timer(&t->proto_unreach_timer,
464 jiffies + (HZ/20)))
465 sctp_association_hold(asoc);
466 }
467
468 } else {
55e26eb9
EB
469 struct net *net = sock_net(sk);
470
50b5d6ad
VY
471 if (timer_pending(&t->proto_unreach_timer) &&
472 del_timer(&t->proto_unreach_timer))
473 sctp_association_put(asoc);
1da177e4 474
55e26eb9 475 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
50b5d6ad
VY
476 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
477 asoc->state, asoc->ep, asoc, t,
478 GFP_ATOMIC);
479 }
1da177e4
LT
480}
481
482/* Common lookup code for icmp/icmpv6 error handler. */
4110cc25 483struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
1da177e4 484 struct sctphdr *sctphdr,
1da177e4
LT
485 struct sctp_association **app,
486 struct sctp_transport **tpp)
487{
488 union sctp_addr saddr;
489 union sctp_addr daddr;
490 struct sctp_af *af;
491 struct sock *sk = NULL;
8de8c873 492 struct sctp_association *asoc;
1da177e4 493 struct sctp_transport *transport = NULL;
7115e632
WY
494 struct sctp_init_chunk *chunkhdr;
495 __u32 vtag = ntohl(sctphdr->vtag);
496 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
1da177e4 497
d1ad1ff2 498 *app = NULL; *tpp = NULL;
1da177e4
LT
499
500 af = sctp_get_af_specific(family);
501 if (unlikely(!af)) {
502 return NULL;
503 }
504
505 /* Initialize local addresses for lookups. */
506 af->from_skb(&saddr, skb, 1);
507 af->from_skb(&daddr, skb, 0);
508
509 /* Look for an association that matches the incoming ICMP error
510 * packet.
511 */
4110cc25 512 asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
d1ad1ff2
SS
513 if (!asoc)
514 return NULL;
1da177e4 515
d1ad1ff2 516 sk = asoc->base.sk;
1da177e4 517
7115e632
WY
518 /* RFC 4960, Appendix C. ICMP Handling
519 *
520 * ICMP6) An implementation MUST validate that the Verification Tag
521 * contained in the ICMP message matches the Verification Tag of
522 * the peer. If the Verification Tag is not 0 and does NOT
523 * match, discard the ICMP message. If it is 0 and the ICMP
524 * message contains enough bytes to verify that the chunk type is
525 * an INIT chunk and that the Initiate Tag matches the tag of the
526 * peer, continue with ICMP7. If the ICMP message is too short
527 * or the chunk type or the Initiate Tag does not match, silently
528 * discard the packet.
529 */
530 if (vtag == 0) {
ea110733 531 chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
7115e632
WY
532 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
533 + sizeof(__be32) ||
534 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
535 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
536 goto out;
537 }
538 } else if (vtag != asoc->c.peer_vtag) {
d1ad1ff2
SS
539 goto out;
540 }
1da177e4
LT
541
542 sctp_bh_lock_sock(sk);
543
544 /* If too many ICMPs get dropped on busy
545 * servers this needs to be solved differently.
546 */
547 if (sock_owned_by_user(sk))
b01a2407 548 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4 549
1da177e4
LT
550 *app = asoc;
551 *tpp = transport;
552 return sk;
553
554out:
1da177e4
LT
555 if (asoc)
556 sctp_association_put(asoc);
1da177e4
LT
557 return NULL;
558}
559
560/* Common cleanup code for icmp/icmpv6 error handler. */
d1ad1ff2 561void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
1da177e4
LT
562{
563 sctp_bh_unlock_sock(sk);
1da177e4
LT
564 if (asoc)
565 sctp_association_put(asoc);
1da177e4
LT
566}
567
568/*
569 * This routine is called by the ICMP module when it gets some
570 * sort of error condition. If err < 0 then the socket should
571 * be closed and the error returned to the user. If err > 0
572 * it's just the icmp type << 8 | icmp code. After adjustment
573 * header points to the first 8 bytes of the sctp header. We need
574 * to find the appropriate port.
575 *
576 * The locking strategy used here is very "optimistic". When
577 * someone else accesses the socket the ICMP is just dropped
578 * and for some paths there is no check at all.
579 * A more general error queue to queue errors for later handling
580 * is probably better.
581 *
582 */
583void sctp_v4_err(struct sk_buff *skb, __u32 info)
584{
b71d1d42 585 const struct iphdr *iph = (const struct iphdr *)skb->data;
a27ef749 586 const int ihlen = iph->ihl * 4;
88c7664f
ACM
587 const int type = icmp_hdr(skb)->type;
588 const int code = icmp_hdr(skb)->code;
1da177e4 589 struct sock *sk;
8de8c873 590 struct sctp_association *asoc = NULL;
1da177e4
LT
591 struct sctp_transport *transport;
592 struct inet_sock *inet;
2e07fa9c 593 sk_buff_data_t saveip, savesctp;
1da177e4 594 int err;
4110cc25 595 struct net *net = dev_net(skb->dev);
1da177e4 596
a27ef749 597 if (skb->len < ihlen + 8) {
b01a2407 598 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
599 return;
600 }
601
602 /* Fix up skb to look at the embedded net header. */
b0e380b1
ACM
603 saveip = skb->network_header;
604 savesctp = skb->transport_header;
31c7711b 605 skb_reset_network_header(skb);
a27ef749 606 skb_set_transport_header(skb, ihlen);
4110cc25 607 sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
2e07fa9c 608 /* Put back, the original values. */
b0e380b1
ACM
609 skb->network_header = saveip;
610 skb->transport_header = savesctp;
1da177e4 611 if (!sk) {
b01a2407 612 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
613 return;
614 }
615 /* Warning: The sock lock is held. Remember to call
616 * sctp_err_finish!
617 */
618
619 switch (type) {
620 case ICMP_PARAMETERPROB:
621 err = EPROTO;
622 break;
623 case ICMP_DEST_UNREACH:
624 if (code > NR_ICMP_UNREACH)
625 goto out_unlock;
626
627 /* PMTU discovery (RFC1191) */
628 if (ICMP_FRAG_NEEDED == code) {
629 sctp_icmp_frag_needed(sk, asoc, transport, info);
630 goto out_unlock;
631 }
632 else {
633 if (ICMP_PROT_UNREACH == code) {
d1ad1ff2 634 sctp_icmp_proto_unreachable(sk, asoc,
1da177e4
LT
635 transport);
636 goto out_unlock;
637 }
638 }
639 err = icmp_err_convert[code].errno;
640 break;
641 case ICMP_TIME_EXCEEDED:
642 /* Ignore any time exceeded errors due to fragment reassembly
643 * timeouts.
644 */
645 if (ICMP_EXC_FRAGTIME == code)
646 goto out_unlock;
647
648 err = EHOSTUNREACH;
649 break;
55be7a9c
DM
650 case ICMP_REDIRECT:
651 sctp_icmp_redirect(sk, transport, skb);
652 err = 0;
653 break;
1da177e4
LT
654 default:
655 goto out_unlock;
656 }
657
658 inet = inet_sk(sk);
659 if (!sock_owned_by_user(sk) && inet->recverr) {
660 sk->sk_err = err;
661 sk->sk_error_report(sk);
662 } else { /* Only an error on timeout */
663 sk->sk_err_soft = err;
664 }
665
666out_unlock:
d1ad1ff2 667 sctp_err_finish(sk, asoc);
1da177e4
LT
668}
669
670/*
671 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
672 *
673 * This function scans all the chunks in the OOTB packet to determine if
674 * the packet should be discarded right away. If a response might be needed
675 * for this packet, or, if further processing is possible, the packet will
676 * be queued to a proper inqueue for the next phase of handling.
677 *
678 * Output:
679 * Return 0 - If further processing is needed.
680 * Return 1 - If the packet can be discarded right away.
681 */
04675210 682static int sctp_rcv_ootb(struct sk_buff *skb)
1da177e4
LT
683{
684 sctp_chunkhdr_t *ch;
685 __u8 *ch_end;
1da177e4
LT
686
687 ch = (sctp_chunkhdr_t *) skb->data;
1da177e4
LT
688
689 /* Scan through all the chunks in the packet. */
a7d1f1b6
TF
690 do {
691 /* Break out if chunk length is less then minimal. */
692 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
693 break;
694
695 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
27a884dc 696 if (ch_end > skb_tail_pointer(skb))
a7d1f1b6 697 break;
1da177e4
LT
698
699 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
700 * receiver MUST silently discard the OOTB packet and take no
701 * further action.
702 */
703 if (SCTP_CID_ABORT == ch->type)
704 goto discard;
705
706 /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
707 * chunk, the receiver should silently discard the packet
708 * and take no further action.
709 */
710 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
711 goto discard;
712
3c77f961
VY
713 /* RFC 4460, 2.11.2
714 * This will discard packets with INIT chunk bundled as
715 * subsequent chunks in the packet. When INIT is first,
716 * the normal INIT processing will discard the chunk.
717 */
718 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
719 goto discard;
720
1da177e4 721 ch = (sctp_chunkhdr_t *) ch_end;
27a884dc 722 } while (ch_end < skb_tail_pointer(skb));
1da177e4
LT
723
724 return 0;
725
726discard:
727 return 1;
728}
729
730/* Insert endpoint into the hash table. */
731static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
732{
4cdadcbc 733 struct net *net = sock_net(ep->base.sk);
1da177e4
LT
734 struct sctp_ep_common *epb;
735 struct sctp_hashbucket *head;
736
737 epb = &ep->base;
738
4cdadcbc 739 epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
1da177e4
LT
740 head = &sctp_ep_hashtable[epb->hashent];
741
742 sctp_write_lock(&head->lock);
d970dbf8 743 hlist_add_head(&epb->node, &head->chain);
1da177e4
LT
744 sctp_write_unlock(&head->lock);
745}
746
747/* Add an endpoint to the hash. Local BH-safe. */
748void sctp_hash_endpoint(struct sctp_endpoint *ep)
749{
750 sctp_local_bh_disable();
751 __sctp_hash_endpoint(ep);
752 sctp_local_bh_enable();
753}
754
755/* Remove endpoint from the hash table. */
756static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
757{
4cdadcbc 758 struct net *net = sock_net(ep->base.sk);
1da177e4
LT
759 struct sctp_hashbucket *head;
760 struct sctp_ep_common *epb;
761
762 epb = &ep->base;
763
4cdadcbc 764 epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
1da177e4
LT
765
766 head = &sctp_ep_hashtable[epb->hashent];
767
768 sctp_write_lock(&head->lock);
2eebc1e1 769 hlist_del_init(&epb->node);
1da177e4
LT
770 sctp_write_unlock(&head->lock);
771}
772
773/* Remove endpoint from the hash. Local BH-safe. */
774void sctp_unhash_endpoint(struct sctp_endpoint *ep)
775{
776 sctp_local_bh_disable();
777 __sctp_unhash_endpoint(ep);
778 sctp_local_bh_enable();
779}
780
781/* Look up an endpoint. */
4cdadcbc
EB
782static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
783 const union sctp_addr *laddr)
1da177e4
LT
784{
785 struct sctp_hashbucket *head;
786 struct sctp_ep_common *epb;
787 struct sctp_endpoint *ep;
d970dbf8 788 struct hlist_node *node;
1da177e4
LT
789 int hash;
790
4cdadcbc 791 hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
1da177e4
LT
792 head = &sctp_ep_hashtable[hash];
793 read_lock(&head->lock);
d970dbf8 794 sctp_for_each_hentry(epb, node, &head->chain) {
1da177e4 795 ep = sctp_ep(epb);
4cdadcbc 796 if (sctp_endpoint_is_match(ep, net, laddr))
1da177e4
LT
797 goto hit;
798 }
799
2ce95503 800 ep = sctp_sk(net->sctp.ctl_sock)->ep;
1da177e4
LT
801
802hit:
803 sctp_endpoint_hold(ep);
1da177e4
LT
804 read_unlock(&head->lock);
805 return ep;
806}
807
808/* Insert association into the hash table. */
809static void __sctp_hash_established(struct sctp_association *asoc)
810{
4110cc25 811 struct net *net = sock_net(asoc->base.sk);
1da177e4
LT
812 struct sctp_ep_common *epb;
813 struct sctp_hashbucket *head;
814
815 epb = &asoc->base;
816
817 /* Calculate which chain this entry will belong to. */
4110cc25
EB
818 epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
819 asoc->peer.port);
1da177e4
LT
820
821 head = &sctp_assoc_hashtable[epb->hashent];
822
823 sctp_write_lock(&head->lock);
d970dbf8 824 hlist_add_head(&epb->node, &head->chain);
1da177e4
LT
825 sctp_write_unlock(&head->lock);
826}
827
828/* Add an association to the hash. Local BH-safe. */
829void sctp_hash_established(struct sctp_association *asoc)
830{
de76e695
VY
831 if (asoc->temp)
832 return;
833
1da177e4
LT
834 sctp_local_bh_disable();
835 __sctp_hash_established(asoc);
836 sctp_local_bh_enable();
837}
838
839/* Remove association from the hash table. */
840static void __sctp_unhash_established(struct sctp_association *asoc)
841{
4110cc25 842 struct net *net = sock_net(asoc->base.sk);
1da177e4
LT
843 struct sctp_hashbucket *head;
844 struct sctp_ep_common *epb;
845
846 epb = &asoc->base;
847
4110cc25 848 epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
1da177e4
LT
849 asoc->peer.port);
850
851 head = &sctp_assoc_hashtable[epb->hashent];
852
853 sctp_write_lock(&head->lock);
2eebc1e1 854 hlist_del_init(&epb->node);
1da177e4
LT
855 sctp_write_unlock(&head->lock);
856}
857
858/* Remove association from the hash table. Local BH-safe. */
859void sctp_unhash_established(struct sctp_association *asoc)
860{
de76e695
VY
861 if (asoc->temp)
862 return;
863
1da177e4
LT
864 sctp_local_bh_disable();
865 __sctp_unhash_established(asoc);
866 sctp_local_bh_enable();
867}
868
869/* Look up an association. */
870static struct sctp_association *__sctp_lookup_association(
4110cc25 871 struct net *net,
1da177e4
LT
872 const union sctp_addr *local,
873 const union sctp_addr *peer,
874 struct sctp_transport **pt)
875{
876 struct sctp_hashbucket *head;
877 struct sctp_ep_common *epb;
878 struct sctp_association *asoc;
879 struct sctp_transport *transport;
d970dbf8 880 struct hlist_node *node;
1da177e4
LT
881 int hash;
882
883 /* Optimize here for direct hit, only listening connections can
884 * have wildcards anyways.
885 */
4110cc25
EB
886 hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
887 ntohs(peer->v4.sin_port));
1da177e4
LT
888 head = &sctp_assoc_hashtable[hash];
889 read_lock(&head->lock);
d970dbf8 890 sctp_for_each_hentry(epb, node, &head->chain) {
1da177e4 891 asoc = sctp_assoc(epb);
4110cc25 892 transport = sctp_assoc_is_match(asoc, net, local, peer);
1da177e4
LT
893 if (transport)
894 goto hit;
895 }
896
897 read_unlock(&head->lock);
898
899 return NULL;
900
901hit:
902 *pt = transport;
903 sctp_association_hold(asoc);
1da177e4
LT
904 read_unlock(&head->lock);
905 return asoc;
906}
907
908/* Look up an association. BH-safe. */
909SCTP_STATIC
4110cc25
EB
910struct sctp_association *sctp_lookup_association(struct net *net,
911 const union sctp_addr *laddr,
1da177e4
LT
912 const union sctp_addr *paddr,
913 struct sctp_transport **transportp)
914{
915 struct sctp_association *asoc;
916
917 sctp_local_bh_disable();
4110cc25 918 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1da177e4
LT
919 sctp_local_bh_enable();
920
921 return asoc;
922}
923
924/* Is there an association matching the given local and peer addresses? */
4110cc25
EB
925int sctp_has_association(struct net *net,
926 const union sctp_addr *laddr,
1da177e4
LT
927 const union sctp_addr *paddr)
928{
929 struct sctp_association *asoc;
930 struct sctp_transport *transport;
931
4110cc25 932 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
1da177e4
LT
933 sctp_association_put(asoc);
934 return 1;
935 }
936
937 return 0;
938}
939
940/*
941 * SCTP Implementors Guide, 2.18 Handling of address
942 * parameters within the INIT or INIT-ACK.
943 *
944 * D) When searching for a matching TCB upon reception of an INIT
945 * or INIT-ACK chunk the receiver SHOULD use not only the
946 * source address of the packet (containing the INIT or
947 * INIT-ACK) but the receiver SHOULD also use all valid
948 * address parameters contained within the chunk.
949 *
950 * 2.18.3 Solution description
951 *
952 * This new text clearly specifies to an implementor the need
953 * to look within the INIT or INIT-ACK. Any implementation that
954 * does not do this, may not be able to establish associations
955 * in certain circumstances.
956 *
957 */
4110cc25
EB
958static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
959 struct sk_buff *skb,
1da177e4
LT
960 const union sctp_addr *laddr, struct sctp_transport **transportp)
961{
962 struct sctp_association *asoc;
963 union sctp_addr addr;
964 union sctp_addr *paddr = &addr;
2c0fd387 965 struct sctphdr *sh = sctp_hdr(skb);
1da177e4
LT
966 union sctp_params params;
967 sctp_init_chunk_t *init;
968 struct sctp_transport *transport;
969 struct sctp_af *af;
970
1da177e4
LT
971 /*
972 * This code will NOT touch anything inside the chunk--it is
973 * strictly READ-ONLY.
974 *
975 * RFC 2960 3 SCTP packet Format
976 *
977 * Multiple chunks can be bundled into one SCTP packet up to
978 * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
979 * COMPLETE chunks. These chunks MUST NOT be bundled with any
980 * other chunk in a packet. See Section 6.10 for more details
981 * on chunk bundling.
982 */
983
984 /* Find the start of the TLVs and the end of the chunk. This is
985 * the region we search for address parameters.
986 */
987 init = (sctp_init_chunk_t *)skb->data;
988
989 /* Walk the parameters looking for embedded addresses. */
990 sctp_walk_params(params, init, init_hdr.params) {
991
992 /* Note: Ignoring hostname addresses. */
993 af = sctp_get_af_specific(param_type2af(params.p->type));
994 if (!af)
995 continue;
996
dd86d136 997 af->from_addr_param(paddr, params.addr, sh->source, 0);
1da177e4 998
4110cc25 999 asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
1da177e4
LT
1000 if (asoc)
1001 return asoc;
1002 }
1003
1004 return NULL;
1005}
1006
df218577
VY
1007/* ADD-IP, Section 5.2
1008 * When an endpoint receives an ASCONF Chunk from the remote peer
1009 * special procedures may be needed to identify the association the
1010 * ASCONF Chunk is associated with. To properly find the association
1011 * the following procedures SHOULD be followed:
1012 *
1013 * D2) If the association is not found, use the address found in the
1014 * Address Parameter TLV combined with the port number found in the
1015 * SCTP common header. If found proceed to rule D4.
1016 *
1017 * D2-ext) If more than one ASCONF Chunks are packed together, use the
1018 * address found in the ASCONF Address Parameter TLV of each of the
1019 * subsequent ASCONF Chunks. If found, proceed to rule D4.
1020 */
1021static struct sctp_association *__sctp_rcv_asconf_lookup(
4110cc25 1022 struct net *net,
df218577
VY
1023 sctp_chunkhdr_t *ch,
1024 const union sctp_addr *laddr,
bc92dd19 1025 __be16 peer_port,
df218577
VY
1026 struct sctp_transport **transportp)
1027{
1028 sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch;
1029 struct sctp_af *af;
1030 union sctp_addr_param *param;
1031 union sctp_addr paddr;
1032
1033 /* Skip over the ADDIP header and find the Address parameter */
1034 param = (union sctp_addr_param *)(asconf + 1);
1035
6a435732 1036 af = sctp_get_af_specific(param_type2af(param->p.type));
df218577
VY
1037 if (unlikely(!af))
1038 return NULL;
1039
1040 af->from_addr_param(&paddr, param, peer_port, 0);
1041
4110cc25 1042 return __sctp_lookup_association(net, laddr, &paddr, transportp);
df218577
VY
1043}
1044
1045
bbd0d598
VY
1046/* SCTP-AUTH, Section 6.3:
1047* If the receiver does not find a STCB for a packet containing an AUTH
1048* chunk as the first chunk and not a COOKIE-ECHO chunk as the second
1049* chunk, it MUST use the chunks after the AUTH chunk to look up an existing
1050* association.
1051*
1052* This means that any chunks that can help us identify the association need
25985edc 1053* to be looked at to find this association.
bbd0d598 1054*/
4110cc25
EB
1055static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
1056 struct sk_buff *skb,
bbd0d598
VY
1057 const union sctp_addr *laddr,
1058 struct sctp_transport **transportp)
1059{
df218577
VY
1060 struct sctp_association *asoc = NULL;
1061 sctp_chunkhdr_t *ch;
1062 int have_auth = 0;
1063 unsigned int chunk_num = 1;
1064 __u8 *ch_end;
1065
1066 /* Walk through the chunks looking for AUTH or ASCONF chunks
1067 * to help us find the association.
bbd0d598 1068 */
df218577
VY
1069 ch = (sctp_chunkhdr_t *) skb->data;
1070 do {
1071 /* Break out if chunk length is less then minimal. */
1072 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
1073 break;
1074
1075 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1076 if (ch_end > skb_tail_pointer(skb))
1077 break;
1078
1079 switch(ch->type) {
1080 case SCTP_CID_AUTH:
1081 have_auth = chunk_num;
1082 break;
1083
1084 case SCTP_CID_COOKIE_ECHO:
1085 /* If a packet arrives containing an AUTH chunk as
1086 * a first chunk, a COOKIE-ECHO chunk as the second
1087 * chunk, and possibly more chunks after them, and
1088 * the receiver does not have an STCB for that
1089 * packet, then authentication is based on
1090 * the contents of the COOKIE- ECHO chunk.
1091 */
1092 if (have_auth == 1 && chunk_num == 2)
1093 return NULL;
1094 break;
1095
1096 case SCTP_CID_ASCONF:
e1fc3b14 1097 if (have_auth || net->sctp.addip_noauth)
4110cc25
EB
1098 asoc = __sctp_rcv_asconf_lookup(
1099 net, ch, laddr,
df218577
VY
1100 sctp_hdr(skb)->source,
1101 transportp);
1102 default:
1103 break;
1104 }
1105
1106 if (asoc)
1107 break;
1108
1109 ch = (sctp_chunkhdr_t *) ch_end;
1110 chunk_num++;
1111 } while (ch_end < skb_tail_pointer(skb));
1112
1113 return asoc;
bbd0d598
VY
1114}
1115
1116/*
1117 * There are circumstances when we need to look inside the SCTP packet
1118 * for information to help us find the association. Examples
1119 * include looking inside of INIT/INIT-ACK chunks or after the AUTH
1120 * chunks.
1121 */
4110cc25
EB
1122static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1123 struct sk_buff *skb,
bbd0d598
VY
1124 const union sctp_addr *laddr,
1125 struct sctp_transport **transportp)
1126{
1127 sctp_chunkhdr_t *ch;
1128
1129 ch = (sctp_chunkhdr_t *) skb->data;
1130
df218577
VY
1131 /* The code below will attempt to walk the chunk and extract
1132 * parameter information. Before we do that, we need to verify
1133 * that the chunk length doesn't cause overflow. Otherwise, we'll
1134 * walk off the end.
1135 */
1136 if (WORD_ROUND(ntohs(ch->length)) > skb->len)
1137 return NULL;
1138
bbd0d598
VY
1139 /* If this is INIT/INIT-ACK look inside the chunk too. */
1140 switch (ch->type) {
1141 case SCTP_CID_INIT:
1142 case SCTP_CID_INIT_ACK:
4110cc25 1143 return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
bbd0d598
VY
1144 break;
1145
df218577 1146 default:
4110cc25 1147 return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
bbd0d598
VY
1148 break;
1149 }
1150
df218577 1151
bbd0d598
VY
1152 return NULL;
1153}
1154
1da177e4 1155/* Lookup an association for an inbound skb. */
4110cc25
EB
1156static struct sctp_association *__sctp_rcv_lookup(struct net *net,
1157 struct sk_buff *skb,
1da177e4
LT
1158 const union sctp_addr *paddr,
1159 const union sctp_addr *laddr,
1160 struct sctp_transport **transportp)
1161{
1162 struct sctp_association *asoc;
1163
4110cc25 1164 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1da177e4
LT
1165
1166 /* Further lookup for INIT/INIT-ACK packets.
1167 * SCTP Implementors Guide, 2.18 Handling of address
1168 * parameters within the INIT or INIT-ACK.
1169 */
1170 if (!asoc)
4110cc25 1171 asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
1da177e4
LT
1172
1173 return asoc;
1174}
This page took 0.855059 seconds and 5 git commands to generate.