Merge branch 'r8169-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/romieu...
[deliverable/linux.git] / net / ipv6 / icmp.c
1 /*
2 * Internet Control Message Protocol (ICMPv6)
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on net/ipv4/icmp.c
9 *
10 * RFC 1885
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18 /*
19 * Changes:
20 *
21 * Andi Kleen : exception handling
22 * Andi Kleen add rate limits. never reply to a icmp.
23 * add more length checks and other fixes.
24 * yoshfuji : ensure to sent parameter problem for
25 * fragments.
26 * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit.
27 * Randy Dunlap and
28 * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support
29 * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data
30 */
31
32 #include <linux/module.h>
33 #include <linux/errno.h>
34 #include <linux/types.h>
35 #include <linux/socket.h>
36 #include <linux/in.h>
37 #include <linux/kernel.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/netfilter.h>
43
44 #ifdef CONFIG_SYSCTL
45 #include <linux/sysctl.h>
46 #endif
47
48 #include <linux/inet.h>
49 #include <linux/netdevice.h>
50 #include <linux/icmpv6.h>
51
52 #include <net/ip.h>
53 #include <net/sock.h>
54
55 #include <net/ipv6.h>
56 #include <net/ip6_checksum.h>
57 #include <net/protocol.h>
58 #include <net/raw.h>
59 #include <net/rawv6.h>
60 #include <net/transp_v6.h>
61 #include <net/ip6_route.h>
62 #include <net/addrconf.h>
63 #include <net/icmp.h>
64 #include <net/xfrm.h>
65 #include <net/inet_common.h>
66
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69
70 DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
71 EXPORT_SYMBOL(icmpv6_statistics);
72 DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
73 EXPORT_SYMBOL(icmpv6msg_statistics);
74
75 /*
76 * The ICMP socket(s). This is the most convenient way to flow control
77 * our ICMP output as well as maintain a clean interface throughout
78 * all layers. All Socketless IP sends will soon be gone.
79 *
80 * On SMP we have one ICMP socket per-cpu.
81 */
82 static inline struct sock *icmpv6_sk(struct net *net)
83 {
84 return net->ipv6.icmp_sk[smp_processor_id()];
85 }
86
87 static int icmpv6_rcv(struct sk_buff *skb);
88
89 static struct inet6_protocol icmpv6_protocol = {
90 .handler = icmpv6_rcv,
91 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
92 };
93
94 static __inline__ int icmpv6_xmit_lock(struct sock *sk)
95 {
96 local_bh_disable();
97
98 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
99 /* This can happen if the output path (f.e. SIT or
100 * ip6ip6 tunnel) signals dst_link_failure() for an
101 * outgoing ICMP6 packet.
102 */
103 local_bh_enable();
104 return 1;
105 }
106 return 0;
107 }
108
109 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
110 {
111 spin_unlock_bh(&sk->sk_lock.slock);
112 }
113
114 /*
115 * Slightly more convenient version of icmpv6_send.
116 */
117 void icmpv6_param_prob(struct sk_buff *skb, int code, int pos)
118 {
119 icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
120 kfree_skb(skb);
121 }
122
123 /*
124 * Figure out, may we reply to this packet with icmp error.
125 *
126 * We do not reply, if:
127 * - it was icmp error message.
128 * - it is truncated, so that it is known, that protocol is ICMPV6
129 * (i.e. in the middle of some exthdr)
130 *
131 * --ANK (980726)
132 */
133
134 static int is_ineligible(struct sk_buff *skb)
135 {
136 int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
137 int len = skb->len - ptr;
138 __u8 nexthdr = ipv6_hdr(skb)->nexthdr;
139
140 if (len < 0)
141 return 1;
142
143 ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
144 if (ptr < 0)
145 return 0;
146 if (nexthdr == IPPROTO_ICMPV6) {
147 u8 _type, *tp;
148 tp = skb_header_pointer(skb,
149 ptr+offsetof(struct icmp6hdr, icmp6_type),
150 sizeof(_type), &_type);
151 if (tp == NULL ||
152 !(*tp & ICMPV6_INFOMSG_MASK))
153 return 1;
154 }
155 return 0;
156 }
157
158 /*
159 * Check the ICMP output rate limit
160 */
161 static inline int icmpv6_xrlim_allow(struct sock *sk, int type,
162 struct flowi *fl)
163 {
164 struct dst_entry *dst;
165 struct net *net = sock_net(sk);
166 int res = 0;
167
168 /* Informational messages are not limited. */
169 if (type & ICMPV6_INFOMSG_MASK)
170 return 1;
171
172 /* Do not limit pmtu discovery, it would break it. */
173 if (type == ICMPV6_PKT_TOOBIG)
174 return 1;
175
176 /*
177 * Look up the output route.
178 * XXX: perhaps the expire for routing entries cloned by
179 * this lookup should be more aggressive (not longer than timeout).
180 */
181 dst = ip6_route_output(net, sk, fl);
182 if (dst->error) {
183 IP6_INC_STATS(ip6_dst_idev(dst),
184 IPSTATS_MIB_OUTNOROUTES);
185 } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
186 res = 1;
187 } else {
188 struct rt6_info *rt = (struct rt6_info *)dst;
189 int tmo = net->ipv6.sysctl.icmpv6_time;
190
191 /* Give more bandwidth to wider prefixes. */
192 if (rt->rt6i_dst.plen < 128)
193 tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
194
195 res = xrlim_allow(dst, tmo);
196 }
197 dst_release(dst);
198 return res;
199 }
200
201 /*
202 * an inline helper for the "simple" if statement below
203 * checks if parameter problem report is caused by an
204 * unrecognized IPv6 option that has the Option Type
205 * highest-order two bits set to 10
206 */
207
208 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
209 {
210 u8 _optval, *op;
211
212 offset += skb_network_offset(skb);
213 op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
214 if (op == NULL)
215 return 1;
216 return (*op & 0xC0) == 0x80;
217 }
218
219 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
220 {
221 struct sk_buff *skb;
222 struct icmp6hdr *icmp6h;
223 int err = 0;
224
225 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
226 goto out;
227
228 icmp6h = icmp6_hdr(skb);
229 memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
230 icmp6h->icmp6_cksum = 0;
231
232 if (skb_queue_len(&sk->sk_write_queue) == 1) {
233 skb->csum = csum_partial((char *)icmp6h,
234 sizeof(struct icmp6hdr), skb->csum);
235 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
236 &fl->fl6_dst,
237 len, fl->proto,
238 skb->csum);
239 } else {
240 __wsum tmp_csum = 0;
241
242 skb_queue_walk(&sk->sk_write_queue, skb) {
243 tmp_csum = csum_add(tmp_csum, skb->csum);
244 }
245
246 tmp_csum = csum_partial((char *)icmp6h,
247 sizeof(struct icmp6hdr), tmp_csum);
248 icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
249 &fl->fl6_dst,
250 len, fl->proto,
251 tmp_csum);
252 }
253 ip6_push_pending_frames(sk);
254 out:
255 return err;
256 }
257
258 struct icmpv6_msg {
259 struct sk_buff *skb;
260 int offset;
261 uint8_t type;
262 };
263
264 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
265 {
266 struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
267 struct sk_buff *org_skb = msg->skb;
268 __wsum csum = 0;
269
270 csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
271 to, len, csum);
272 skb->csum = csum_block_add(skb->csum, csum, odd);
273 if (!(msg->type & ICMPV6_INFOMSG_MASK))
274 nf_ct_attach(skb, org_skb);
275 return 0;
276 }
277
278 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
279 static void mip6_addr_swap(struct sk_buff *skb)
280 {
281 struct ipv6hdr *iph = ipv6_hdr(skb);
282 struct inet6_skb_parm *opt = IP6CB(skb);
283 struct ipv6_destopt_hao *hao;
284 struct in6_addr tmp;
285 int off;
286
287 if (opt->dsthao) {
288 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
289 if (likely(off >= 0)) {
290 hao = (struct ipv6_destopt_hao *)
291 (skb_network_header(skb) + off);
292 ipv6_addr_copy(&tmp, &iph->saddr);
293 ipv6_addr_copy(&iph->saddr, &hao->addr);
294 ipv6_addr_copy(&hao->addr, &tmp);
295 }
296 }
297 }
298 #else
299 static inline void mip6_addr_swap(struct sk_buff *skb) {}
300 #endif
301
302 /*
303 * Send an ICMP message in response to a packet in error
304 */
305 void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
306 struct net_device *dev)
307 {
308 struct net *net = dev_net(skb->dev);
309 struct inet6_dev *idev = NULL;
310 struct ipv6hdr *hdr = ipv6_hdr(skb);
311 struct sock *sk;
312 struct ipv6_pinfo *np;
313 struct in6_addr *saddr = NULL;
314 struct dst_entry *dst;
315 struct dst_entry *dst2;
316 struct icmp6hdr tmp_hdr;
317 struct flowi fl;
318 struct flowi fl2;
319 struct icmpv6_msg msg;
320 int iif = 0;
321 int addr_type = 0;
322 int len;
323 int hlimit, tclass;
324 int err = 0;
325
326 if ((u8 *)hdr < skb->head ||
327 (skb->network_header + sizeof(*hdr)) > skb->tail)
328 return;
329
330 /*
331 * Make sure we respect the rules
332 * i.e. RFC 1885 2.4(e)
333 * Rule (e.1) is enforced by not using icmpv6_send
334 * in any code that processes icmp errors.
335 */
336 addr_type = ipv6_addr_type(&hdr->daddr);
337
338 if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
339 saddr = &hdr->daddr;
340
341 /*
342 * Dest addr check
343 */
344
345 if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
346 if (type != ICMPV6_PKT_TOOBIG &&
347 !(type == ICMPV6_PARAMPROB &&
348 code == ICMPV6_UNK_OPTION &&
349 (opt_unrec(skb, info))))
350 return;
351
352 saddr = NULL;
353 }
354
355 addr_type = ipv6_addr_type(&hdr->saddr);
356
357 /*
358 * Source addr check
359 */
360
361 if (addr_type & IPV6_ADDR_LINKLOCAL)
362 iif = skb->dev->ifindex;
363
364 /*
365 * Must not send error if the source does not uniquely
366 * identify a single node (RFC2463 Section 2.4).
367 * We check unspecified / multicast addresses here,
368 * and anycast addresses will be checked later.
369 */
370 if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
371 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
372 return;
373 }
374
375 /*
376 * Never answer to a ICMP packet.
377 */
378 if (is_ineligible(skb)) {
379 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
380 return;
381 }
382
383 mip6_addr_swap(skb);
384
385 memset(&fl, 0, sizeof(fl));
386 fl.proto = IPPROTO_ICMPV6;
387 ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
388 if (saddr)
389 ipv6_addr_copy(&fl.fl6_src, saddr);
390 fl.oif = iif;
391 fl.fl_icmp_type = type;
392 fl.fl_icmp_code = code;
393 security_skb_classify_flow(skb, &fl);
394
395 sk = icmpv6_sk(net);
396 np = inet6_sk(sk);
397
398 if (icmpv6_xmit_lock(sk))
399 return;
400
401 if (!icmpv6_xrlim_allow(sk, type, &fl))
402 goto out;
403
404 tmp_hdr.icmp6_type = type;
405 tmp_hdr.icmp6_code = code;
406 tmp_hdr.icmp6_cksum = 0;
407 tmp_hdr.icmp6_pointer = htonl(info);
408
409 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
410 fl.oif = np->mcast_oif;
411
412 err = ip6_dst_lookup(sk, &dst, &fl);
413 if (err)
414 goto out;
415
416 /*
417 * We won't send icmp if the destination is known
418 * anycast.
419 */
420 if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
421 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
422 goto out_dst_release;
423 }
424
425 /* No need to clone since we're just using its address. */
426 dst2 = dst;
427
428 err = xfrm_lookup(&dst, &fl, sk, 0);
429 switch (err) {
430 case 0:
431 if (dst != dst2)
432 goto route_done;
433 break;
434 case -EPERM:
435 dst = NULL;
436 break;
437 default:
438 goto out;
439 }
440
441 if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
442 goto relookup_failed;
443
444 if (ip6_dst_lookup(sk, &dst2, &fl))
445 goto relookup_failed;
446
447 err = xfrm_lookup(&dst2, &fl, sk, XFRM_LOOKUP_ICMP);
448 switch (err) {
449 case 0:
450 dst_release(dst);
451 dst = dst2;
452 break;
453 case -EPERM:
454 goto out_dst_release;
455 default:
456 relookup_failed:
457 if (!dst)
458 goto out;
459 break;
460 }
461
462 route_done:
463 if (ipv6_addr_is_multicast(&fl.fl6_dst))
464 hlimit = np->mcast_hops;
465 else
466 hlimit = np->hop_limit;
467 if (hlimit < 0)
468 hlimit = ip6_dst_hoplimit(dst);
469
470 tclass = np->tclass;
471 if (tclass < 0)
472 tclass = 0;
473
474 msg.skb = skb;
475 msg.offset = skb_network_offset(skb);
476 msg.type = type;
477
478 len = skb->len - msg.offset;
479 len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
480 if (len < 0) {
481 LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
482 goto out_dst_release;
483 }
484
485 idev = in6_dev_get(skb->dev);
486
487 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
488 len + sizeof(struct icmp6hdr),
489 sizeof(struct icmp6hdr),
490 hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
491 MSG_DONTWAIT);
492 if (err) {
493 ip6_flush_pending_frames(sk);
494 goto out_put;
495 }
496 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
497
498 out_put:
499 if (likely(idev != NULL))
500 in6_dev_put(idev);
501 out_dst_release:
502 dst_release(dst);
503 out:
504 icmpv6_xmit_unlock(sk);
505 }
506
507 EXPORT_SYMBOL(icmpv6_send);
508
509 static void icmpv6_echo_reply(struct sk_buff *skb)
510 {
511 struct net *net = dev_net(skb->dev);
512 struct sock *sk;
513 struct inet6_dev *idev;
514 struct ipv6_pinfo *np;
515 struct in6_addr *saddr = NULL;
516 struct icmp6hdr *icmph = icmp6_hdr(skb);
517 struct icmp6hdr tmp_hdr;
518 struct flowi fl;
519 struct icmpv6_msg msg;
520 struct dst_entry *dst;
521 int err = 0;
522 int hlimit;
523 int tclass;
524
525 saddr = &ipv6_hdr(skb)->daddr;
526
527 if (!ipv6_unicast_destination(skb))
528 saddr = NULL;
529
530 memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
531 tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
532
533 memset(&fl, 0, sizeof(fl));
534 fl.proto = IPPROTO_ICMPV6;
535 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
536 if (saddr)
537 ipv6_addr_copy(&fl.fl6_src, saddr);
538 fl.oif = skb->dev->ifindex;
539 fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
540 security_skb_classify_flow(skb, &fl);
541
542 sk = icmpv6_sk(net);
543 np = inet6_sk(sk);
544
545 if (icmpv6_xmit_lock(sk))
546 return;
547
548 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
549 fl.oif = np->mcast_oif;
550
551 err = ip6_dst_lookup(sk, &dst, &fl);
552 if (err)
553 goto out;
554 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
555 goto out;
556
557 if (ipv6_addr_is_multicast(&fl.fl6_dst))
558 hlimit = np->mcast_hops;
559 else
560 hlimit = np->hop_limit;
561 if (hlimit < 0)
562 hlimit = ip6_dst_hoplimit(dst);
563
564 tclass = np->tclass;
565 if (tclass < 0)
566 tclass = 0;
567
568 idev = in6_dev_get(skb->dev);
569
570 msg.skb = skb;
571 msg.offset = 0;
572 msg.type = ICMPV6_ECHO_REPLY;
573
574 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
575 sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
576 (struct rt6_info*)dst, MSG_DONTWAIT);
577
578 if (err) {
579 ip6_flush_pending_frames(sk);
580 goto out_put;
581 }
582 err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
583
584 out_put:
585 if (likely(idev != NULL))
586 in6_dev_put(idev);
587 dst_release(dst);
588 out:
589 icmpv6_xmit_unlock(sk);
590 }
591
592 static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
593 {
594 struct inet6_protocol *ipprot;
595 int inner_offset;
596 int hash;
597 u8 nexthdr;
598
599 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
600 return;
601
602 nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
603 if (ipv6_ext_hdr(nexthdr)) {
604 /* now skip over extension headers */
605 inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
606 if (inner_offset<0)
607 return;
608 } else {
609 inner_offset = sizeof(struct ipv6hdr);
610 }
611
612 /* Checkin header including 8 bytes of inner protocol header. */
613 if (!pskb_may_pull(skb, inner_offset+8))
614 return;
615
616 /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
617 Without this we will not able f.e. to make source routed
618 pmtu discovery.
619 Corresponding argument (opt) to notifiers is already added.
620 --ANK (980726)
621 */
622
623 hash = nexthdr & (MAX_INET_PROTOS - 1);
624
625 rcu_read_lock();
626 ipprot = rcu_dereference(inet6_protos[hash]);
627 if (ipprot && ipprot->err_handler)
628 ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
629 rcu_read_unlock();
630
631 raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
632 }
633
634 /*
635 * Handle icmp messages
636 */
637
638 static int icmpv6_rcv(struct sk_buff *skb)
639 {
640 struct net_device *dev = skb->dev;
641 struct inet6_dev *idev = __in6_dev_get(dev);
642 struct in6_addr *saddr, *daddr;
643 struct ipv6hdr *orig_hdr;
644 struct icmp6hdr *hdr;
645 int type;
646
647 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
648 int nh;
649
650 if (!(skb->sp && skb->sp->xvec[skb->sp->len - 1]->props.flags &
651 XFRM_STATE_ICMP))
652 goto drop_no_count;
653
654 if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
655 goto drop_no_count;
656
657 nh = skb_network_offset(skb);
658 skb_set_network_header(skb, sizeof(*hdr));
659
660 if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
661 goto drop_no_count;
662
663 skb_set_network_header(skb, nh);
664 }
665
666 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INMSGS);
667
668 saddr = &ipv6_hdr(skb)->saddr;
669 daddr = &ipv6_hdr(skb)->daddr;
670
671 /* Perform checksum. */
672 switch (skb->ip_summed) {
673 case CHECKSUM_COMPLETE:
674 if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
675 skb->csum))
676 break;
677 /* fall through */
678 case CHECKSUM_NONE:
679 skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
680 IPPROTO_ICMPV6, 0));
681 if (__skb_checksum_complete(skb)) {
682 LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
683 NIP6(*saddr), NIP6(*daddr));
684 goto discard_it;
685 }
686 }
687
688 if (!pskb_pull(skb, sizeof(*hdr)))
689 goto discard_it;
690
691 hdr = icmp6_hdr(skb);
692
693 type = hdr->icmp6_type;
694
695 ICMP6MSGIN_INC_STATS_BH(idev, type);
696
697 switch (type) {
698 case ICMPV6_ECHO_REQUEST:
699 icmpv6_echo_reply(skb);
700 break;
701
702 case ICMPV6_ECHO_REPLY:
703 /* we couldn't care less */
704 break;
705
706 case ICMPV6_PKT_TOOBIG:
707 /* BUGGG_FUTURE: if packet contains rthdr, we cannot update
708 standard destination cache. Seems, only "advanced"
709 destination cache will allow to solve this problem
710 --ANK (980726)
711 */
712 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
713 goto discard_it;
714 hdr = icmp6_hdr(skb);
715 orig_hdr = (struct ipv6hdr *) (hdr + 1);
716 rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
717 ntohl(hdr->icmp6_mtu));
718
719 /*
720 * Drop through to notify
721 */
722
723 case ICMPV6_DEST_UNREACH:
724 case ICMPV6_TIME_EXCEED:
725 case ICMPV6_PARAMPROB:
726 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
727 break;
728
729 case NDISC_ROUTER_SOLICITATION:
730 case NDISC_ROUTER_ADVERTISEMENT:
731 case NDISC_NEIGHBOUR_SOLICITATION:
732 case NDISC_NEIGHBOUR_ADVERTISEMENT:
733 case NDISC_REDIRECT:
734 ndisc_rcv(skb);
735 break;
736
737 case ICMPV6_MGM_QUERY:
738 igmp6_event_query(skb);
739 break;
740
741 case ICMPV6_MGM_REPORT:
742 igmp6_event_report(skb);
743 break;
744
745 case ICMPV6_MGM_REDUCTION:
746 case ICMPV6_NI_QUERY:
747 case ICMPV6_NI_REPLY:
748 case ICMPV6_MLD2_REPORT:
749 case ICMPV6_DHAAD_REQUEST:
750 case ICMPV6_DHAAD_REPLY:
751 case ICMPV6_MOBILE_PREFIX_SOL:
752 case ICMPV6_MOBILE_PREFIX_ADV:
753 break;
754
755 default:
756 LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
757
758 /* informational */
759 if (type & ICMPV6_INFOMSG_MASK)
760 break;
761
762 /*
763 * error of unknown type.
764 * must pass to upper level
765 */
766
767 icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
768 }
769
770 kfree_skb(skb);
771 return 0;
772
773 discard_it:
774 ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS);
775 drop_no_count:
776 kfree_skb(skb);
777 return 0;
778 }
779
780 void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
781 u8 type,
782 const struct in6_addr *saddr,
783 const struct in6_addr *daddr,
784 int oif)
785 {
786 memset(fl, 0, sizeof(*fl));
787 ipv6_addr_copy(&fl->fl6_src, saddr);
788 ipv6_addr_copy(&fl->fl6_dst, daddr);
789 fl->proto = IPPROTO_ICMPV6;
790 fl->fl_icmp_type = type;
791 fl->fl_icmp_code = 0;
792 fl->oif = oif;
793 security_sk_classify_flow(sk, fl);
794 }
795
796 /*
797 * Special lock-class for __icmpv6_sk:
798 */
799 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
800
801 static int __net_init icmpv6_sk_init(struct net *net)
802 {
803 struct sock *sk;
804 int err, i, j;
805
806 net->ipv6.icmp_sk =
807 kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
808 if (net->ipv6.icmp_sk == NULL)
809 return -ENOMEM;
810
811 for_each_possible_cpu(i) {
812 err = inet_ctl_sock_create(&sk, PF_INET6,
813 SOCK_RAW, IPPROTO_ICMPV6, net);
814 if (err < 0) {
815 printk(KERN_ERR
816 "Failed to initialize the ICMP6 control socket "
817 "(err %d).\n",
818 err);
819 goto fail;
820 }
821
822 net->ipv6.icmp_sk[i] = sk;
823
824 /*
825 * Split off their lock-class, because sk->sk_dst_lock
826 * gets used from softirqs, which is safe for
827 * __icmpv6_sk (because those never get directly used
828 * via userspace syscalls), but unsafe for normal sockets.
829 */
830 lockdep_set_class(&sk->sk_dst_lock,
831 &icmpv6_socket_sk_dst_lock_key);
832
833 /* Enough space for 2 64K ICMP packets, including
834 * sk_buff struct overhead.
835 */
836 sk->sk_sndbuf =
837 (2 * ((64 * 1024) + sizeof(struct sk_buff)));
838 }
839 return 0;
840
841 fail:
842 for (j = 0; j < i; j++)
843 inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
844 kfree(net->ipv6.icmp_sk);
845 return err;
846 }
847
848 static void __net_exit icmpv6_sk_exit(struct net *net)
849 {
850 int i;
851
852 for_each_possible_cpu(i) {
853 inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
854 }
855 kfree(net->ipv6.icmp_sk);
856 }
857
858 static struct pernet_operations icmpv6_sk_ops = {
859 .init = icmpv6_sk_init,
860 .exit = icmpv6_sk_exit,
861 };
862
863 int __init icmpv6_init(void)
864 {
865 int err;
866
867 err = register_pernet_subsys(&icmpv6_sk_ops);
868 if (err < 0)
869 return err;
870
871 err = -EAGAIN;
872 if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
873 goto fail;
874 return 0;
875
876 fail:
877 printk(KERN_ERR "Failed to register ICMP6 protocol\n");
878 unregister_pernet_subsys(&icmpv6_sk_ops);
879 return err;
880 }
881
882 void icmpv6_cleanup(void)
883 {
884 unregister_pernet_subsys(&icmpv6_sk_ops);
885 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
886 }
887
888
889 static const struct icmp6_err {
890 int err;
891 int fatal;
892 } tab_unreach[] = {
893 { /* NOROUTE */
894 .err = ENETUNREACH,
895 .fatal = 0,
896 },
897 { /* ADM_PROHIBITED */
898 .err = EACCES,
899 .fatal = 1,
900 },
901 { /* Was NOT_NEIGHBOUR, now reserved */
902 .err = EHOSTUNREACH,
903 .fatal = 0,
904 },
905 { /* ADDR_UNREACH */
906 .err = EHOSTUNREACH,
907 .fatal = 0,
908 },
909 { /* PORT_UNREACH */
910 .err = ECONNREFUSED,
911 .fatal = 1,
912 },
913 };
914
915 int icmpv6_err_convert(int type, int code, int *err)
916 {
917 int fatal = 0;
918
919 *err = EPROTO;
920
921 switch (type) {
922 case ICMPV6_DEST_UNREACH:
923 fatal = 1;
924 if (code <= ICMPV6_PORT_UNREACH) {
925 *err = tab_unreach[code].err;
926 fatal = tab_unreach[code].fatal;
927 }
928 break;
929
930 case ICMPV6_PKT_TOOBIG:
931 *err = EMSGSIZE;
932 break;
933
934 case ICMPV6_PARAMPROB:
935 *err = EPROTO;
936 fatal = 1;
937 break;
938
939 case ICMPV6_TIME_EXCEED:
940 *err = EHOSTUNREACH;
941 break;
942 }
943
944 return fatal;
945 }
946
947 EXPORT_SYMBOL(icmpv6_err_convert);
948
949 #ifdef CONFIG_SYSCTL
950 ctl_table ipv6_icmp_table_template[] = {
951 {
952 .ctl_name = NET_IPV6_ICMP_RATELIMIT,
953 .procname = "ratelimit",
954 .data = &init_net.ipv6.sysctl.icmpv6_time,
955 .maxlen = sizeof(int),
956 .mode = 0644,
957 .proc_handler = &proc_dointvec_ms_jiffies,
958 .strategy = &sysctl_ms_jiffies
959 },
960 { .ctl_name = 0 },
961 };
962
963 struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
964 {
965 struct ctl_table *table;
966
967 table = kmemdup(ipv6_icmp_table_template,
968 sizeof(ipv6_icmp_table_template),
969 GFP_KERNEL);
970
971 if (table)
972 table[0].data = &net->ipv6.sysctl.icmpv6_time;
973
974 return table;
975 }
976 #endif
977
This page took 0.069415 seconds and 5 git commands to generate.