ARM: support generic per-device coherent dma mem
[deliverable/linux.git] / net / ipv6 / ip6_input.c
1 /*
2 * IPv6 input
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Ian P. Morris <I.P.Morris@soton.ac.uk>
8 *
9 * $Id: ip6_input.c,v 1.19 2000/12/13 18:31:50 davem Exp $
10 *
11 * Based in linux/net/ipv4/ip_input.c
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18 /* Changes
19 *
20 * Mitsuru KANDA @USAGI and
21 * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
22 */
23
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/netdevice.h>
30 #include <linux/in6.h>
31 #include <linux/icmpv6.h>
32 #include <linux/mroute6.h>
33
34 #include <linux/netfilter.h>
35 #include <linux/netfilter_ipv6.h>
36
37 #include <net/sock.h>
38 #include <net/snmp.h>
39
40 #include <net/ipv6.h>
41 #include <net/protocol.h>
42 #include <net/transp_v6.h>
43 #include <net/rawv6.h>
44 #include <net/ndisc.h>
45 #include <net/ip6_route.h>
46 #include <net/addrconf.h>
47 #include <net/xfrm.h>
48
49
50
51 inline int ip6_rcv_finish( struct sk_buff *skb)
52 {
53 if (skb->dst == NULL)
54 ip6_route_input(skb);
55
56 return dst_input(skb);
57 }
58
59 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
60 {
61 struct ipv6hdr *hdr;
62 u32 pkt_len;
63 struct inet6_dev *idev;
64
65 if (skb->pkt_type == PACKET_OTHERHOST) {
66 kfree_skb(skb);
67 return 0;
68 }
69
70 rcu_read_lock();
71
72 idev = __in6_dev_get(skb->dev);
73
74 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INRECEIVES);
75
76 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
77 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
78 rcu_read_unlock();
79 goto out;
80 }
81
82 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
83
84 /*
85 * Store incoming device index. When the packet will
86 * be queued, we cannot refer to skb->dev anymore.
87 *
88 * BTW, when we send a packet for our own local address on a
89 * non-loopback interface (e.g. ethX), it is being delivered
90 * via the loopback interface (lo) here; skb->dev = loopback_dev.
91 * It, however, should be considered as if it is being
92 * arrived via the sending interface (ethX), because of the
93 * nature of scoping architecture. --yoshfuji
94 */
95 IP6CB(skb)->iif = skb->dst ? ip6_dst_idev(skb->dst)->dev->ifindex : dev->ifindex;
96
97 if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
98 goto err;
99
100 hdr = ipv6_hdr(skb);
101
102 if (hdr->version != 6)
103 goto err;
104
105 /*
106 * RFC4291 2.5.3
107 * A packet received on an interface with a destination address
108 * of loopback must be dropped.
109 */
110 if (!(dev->flags & IFF_LOOPBACK) &&
111 ipv6_addr_loopback(&hdr->daddr))
112 goto err;
113
114 skb->transport_header = skb->network_header + sizeof(*hdr);
115 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
116
117 pkt_len = ntohs(hdr->payload_len);
118
119 /* pkt_len may be zero if Jumbo payload option is present */
120 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
121 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
122 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INTRUNCATEDPKTS);
123 goto drop;
124 }
125 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
126 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
127 goto drop;
128 }
129 hdr = ipv6_hdr(skb);
130 }
131
132 if (hdr->nexthdr == NEXTHDR_HOP) {
133 if (ipv6_parse_hopopts(skb) < 0) {
134 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
135 rcu_read_unlock();
136 return 0;
137 }
138 }
139
140 rcu_read_unlock();
141
142 return NF_HOOK(PF_INET6, NF_INET_PRE_ROUTING, skb, dev, NULL,
143 ip6_rcv_finish);
144 err:
145 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INHDRERRORS);
146 drop:
147 rcu_read_unlock();
148 kfree_skb(skb);
149 out:
150 return 0;
151 }
152
153 /*
154 * Deliver the packet to the host
155 */
156
157
158 static int ip6_input_finish(struct sk_buff *skb)
159 {
160 struct inet6_protocol *ipprot;
161 unsigned int nhoff;
162 int nexthdr, raw;
163 u8 hash;
164 struct inet6_dev *idev;
165
166 /*
167 * Parse extension headers
168 */
169
170 rcu_read_lock();
171 resubmit:
172 idev = ip6_dst_idev(skb->dst);
173 if (!pskb_pull(skb, skb_transport_offset(skb)))
174 goto discard;
175 nhoff = IP6CB(skb)->nhoff;
176 nexthdr = skb_network_header(skb)[nhoff];
177
178 raw = raw6_local_deliver(skb, nexthdr);
179
180 hash = nexthdr & (MAX_INET_PROTOS - 1);
181 if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
182 int ret;
183
184 if (ipprot->flags & INET6_PROTO_FINAL) {
185 struct ipv6hdr *hdr;
186
187 /* Free reference early: we don't need it any more,
188 and it may hold ip_conntrack module loaded
189 indefinitely. */
190 nf_reset(skb);
191
192 skb_postpull_rcsum(skb, skb_network_header(skb),
193 skb_network_header_len(skb));
194 hdr = ipv6_hdr(skb);
195 if (ipv6_addr_is_multicast(&hdr->daddr) &&
196 !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
197 &hdr->saddr) &&
198 !ipv6_is_mld(skb, nexthdr))
199 goto discard;
200 }
201 if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
202 !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
203 goto discard;
204
205 ret = ipprot->handler(skb);
206 if (ret > 0)
207 goto resubmit;
208 else if (ret == 0)
209 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
210 } else {
211 if (!raw) {
212 if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
213 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INUNKNOWNPROTOS);
214 icmpv6_send(skb, ICMPV6_PARAMPROB,
215 ICMPV6_UNK_NEXTHDR, nhoff,
216 skb->dev);
217 }
218 } else
219 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDELIVERS);
220 kfree_skb(skb);
221 }
222 rcu_read_unlock();
223 return 0;
224
225 discard:
226 IP6_INC_STATS_BH(idev, IPSTATS_MIB_INDISCARDS);
227 rcu_read_unlock();
228 kfree_skb(skb);
229 return 0;
230 }
231
232
233 int ip6_input(struct sk_buff *skb)
234 {
235 return NF_HOOK(PF_INET6, NF_INET_LOCAL_IN, skb, skb->dev, NULL,
236 ip6_input_finish);
237 }
238
239 int ip6_mc_input(struct sk_buff *skb)
240 {
241 struct ipv6hdr *hdr;
242 int deliver;
243
244 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
245
246 hdr = ipv6_hdr(skb);
247 deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
248
249 #ifdef CONFIG_IPV6_MROUTE
250 /*
251 * IPv6 multicast router mode is now supported ;)
252 */
253 if (ipv6_devconf.mc_forwarding &&
254 likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
255 /*
256 * Okay, we try to forward - split and duplicate
257 * packets.
258 */
259 struct sk_buff *skb2;
260 struct inet6_skb_parm *opt = IP6CB(skb);
261
262 /* Check for MLD */
263 if (unlikely(opt->ra)) {
264 /* Check if this is a mld message */
265 u8 *ptr = skb_network_header(skb) + opt->ra;
266 struct icmp6hdr *icmp6;
267 u8 nexthdr = hdr->nexthdr;
268 int offset;
269
270 /* Check if the value of Router Alert
271 * is for MLD (0x0000).
272 */
273 if ((ptr[2] | ptr[3]) == 0) {
274 deliver = 0;
275
276 if (!ipv6_ext_hdr(nexthdr)) {
277 /* BUG */
278 goto out;
279 }
280 offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
281 &nexthdr);
282 if (offset < 0)
283 goto out;
284
285 if (nexthdr != IPPROTO_ICMPV6)
286 goto out;
287
288 if (!pskb_may_pull(skb, (skb_network_header(skb) +
289 offset + 1 - skb->data)))
290 goto out;
291
292 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
293
294 switch (icmp6->icmp6_type) {
295 case ICMPV6_MGM_QUERY:
296 case ICMPV6_MGM_REPORT:
297 case ICMPV6_MGM_REDUCTION:
298 case ICMPV6_MLD2_REPORT:
299 deliver = 1;
300 break;
301 }
302 goto out;
303 }
304 /* unknown RA - process it normally */
305 }
306
307 if (deliver)
308 skb2 = skb_clone(skb, GFP_ATOMIC);
309 else {
310 skb2 = skb;
311 skb = NULL;
312 }
313
314 if (skb2) {
315 skb2->dev = skb2->dst->dev;
316 ip6_mr_input(skb2);
317 }
318 }
319 out:
320 #endif
321 if (likely(deliver))
322 ip6_input(skb);
323 else {
324 /* discard */
325 kfree_skb(skb);
326 }
327
328 return 0;
329 }
This page took 0.03685 seconds and 5 git commands to generate.