amdkfd: Add kernel queue module
[deliverable/linux.git] / net / bridge / br_netfilter.c
1 /*
2 * Handle firewalling
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 * Bart De Schuymer <bdschuym@pandora.be>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * Lennert dedicates this file to Kerstin Wurdinger.
15 */
16
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
34
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/route.h>
38
39 #include <asm/uaccess.h>
40 #include "br_private.h"
41 #ifdef CONFIG_SYSCTL
42 #include <linux/sysctl.h>
43 #endif
44
45 #define skb_origaddr(skb) (((struct bridge_skb_cb *) \
46 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb) (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb) (skb_origaddr(skb) != ip_hdr(skb)->daddr)
49
50 #ifdef CONFIG_SYSCTL
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
57 static int brnf_pass_vlan_indev __read_mostly = 0;
58 #else
59 #define brnf_call_iptables 1
60 #define brnf_call_ip6tables 1
61 #define brnf_call_arptables 1
62 #define brnf_filter_vlan_tagged 0
63 #define brnf_filter_pppoe_tagged 0
64 #define brnf_pass_vlan_indev 0
65 #endif
66
67 #define IS_IP(skb) \
68 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
69
70 #define IS_IPV6(skb) \
71 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
72
73 #define IS_ARP(skb) \
74 (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
75
76 static inline __be16 vlan_proto(const struct sk_buff *skb)
77 {
78 if (vlan_tx_tag_present(skb))
79 return skb->protocol;
80 else if (skb->protocol == htons(ETH_P_8021Q))
81 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
82 else
83 return 0;
84 }
85
86 #define IS_VLAN_IP(skb) \
87 (vlan_proto(skb) == htons(ETH_P_IP) && \
88 brnf_filter_vlan_tagged)
89
90 #define IS_VLAN_IPV6(skb) \
91 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
92 brnf_filter_vlan_tagged)
93
94 #define IS_VLAN_ARP(skb) \
95 (vlan_proto(skb) == htons(ETH_P_ARP) && \
96 brnf_filter_vlan_tagged)
97
98 static inline __be16 pppoe_proto(const struct sk_buff *skb)
99 {
100 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
101 sizeof(struct pppoe_hdr)));
102 }
103
104 #define IS_PPPOE_IP(skb) \
105 (skb->protocol == htons(ETH_P_PPP_SES) && \
106 pppoe_proto(skb) == htons(PPP_IP) && \
107 brnf_filter_pppoe_tagged)
108
109 #define IS_PPPOE_IPV6(skb) \
110 (skb->protocol == htons(ETH_P_PPP_SES) && \
111 pppoe_proto(skb) == htons(PPP_IPV6) && \
112 brnf_filter_pppoe_tagged)
113
114 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
115 {
116 struct net_bridge_port *port;
117
118 port = br_port_get_rcu(dev);
119 return port ? &port->br->fake_rtable : NULL;
120 }
121
122 static inline struct net_device *bridge_parent(const struct net_device *dev)
123 {
124 struct net_bridge_port *port;
125
126 port = br_port_get_rcu(dev);
127 return port ? port->br->dev : NULL;
128 }
129
130 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
131 {
132 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
133 if (likely(skb->nf_bridge))
134 atomic_set(&(skb->nf_bridge->use), 1);
135
136 return skb->nf_bridge;
137 }
138
139 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
140 {
141 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
142
143 if (atomic_read(&nf_bridge->use) > 1) {
144 struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
145
146 if (tmp) {
147 memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
148 atomic_set(&tmp->use, 1);
149 }
150 nf_bridge_put(nf_bridge);
151 nf_bridge = tmp;
152 }
153 return nf_bridge;
154 }
155
156 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
157 {
158 unsigned int len = nf_bridge_encap_header_len(skb);
159
160 skb_push(skb, len);
161 skb->network_header -= len;
162 }
163
164 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
165 {
166 unsigned int len = nf_bridge_encap_header_len(skb);
167
168 skb_pull(skb, len);
169 skb->network_header += len;
170 }
171
172 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
173 {
174 unsigned int len = nf_bridge_encap_header_len(skb);
175
176 skb_pull_rcsum(skb, len);
177 skb->network_header += len;
178 }
179
180 static inline void nf_bridge_save_header(struct sk_buff *skb)
181 {
182 int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
183
184 skb_copy_from_linear_data_offset(skb, -header_size,
185 skb->nf_bridge->data, header_size);
186 }
187
188 /* When handing a packet over to the IP layer
189 * check whether we have a skb that is in the
190 * expected format
191 */
192
193 static int br_parse_ip_options(struct sk_buff *skb)
194 {
195 const struct iphdr *iph;
196 struct net_device *dev = skb->dev;
197 u32 len;
198
199 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
200 goto inhdr_error;
201
202 iph = ip_hdr(skb);
203
204 /* Basic sanity checks */
205 if (iph->ihl < 5 || iph->version != 4)
206 goto inhdr_error;
207
208 if (!pskb_may_pull(skb, iph->ihl*4))
209 goto inhdr_error;
210
211 iph = ip_hdr(skb);
212 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
213 goto inhdr_error;
214
215 len = ntohs(iph->tot_len);
216 if (skb->len < len) {
217 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
218 goto drop;
219 } else if (len < (iph->ihl*4))
220 goto inhdr_error;
221
222 if (pskb_trim_rcsum(skb, len)) {
223 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
224 goto drop;
225 }
226
227 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
228 /* We should really parse IP options here but until
229 * somebody who actually uses IP options complains to
230 * us we'll just silently ignore the options because
231 * we're lazy!
232 */
233 return 0;
234
235 inhdr_error:
236 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
237 drop:
238 return -1;
239 }
240
241 /* PF_BRIDGE/PRE_ROUTING *********************************************/
242 /* Undo the changes made for ip6tables PREROUTING and continue the
243 * bridge PRE_ROUTING hook. */
244 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
245 {
246 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
247 struct rtable *rt;
248
249 if (nf_bridge->mask & BRNF_PKT_TYPE) {
250 skb->pkt_type = PACKET_OTHERHOST;
251 nf_bridge->mask ^= BRNF_PKT_TYPE;
252 }
253 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
254
255 rt = bridge_parent_rtable(nf_bridge->physindev);
256 if (!rt) {
257 kfree_skb(skb);
258 return 0;
259 }
260 skb_dst_set_noref(skb, &rt->dst);
261
262 skb->dev = nf_bridge->physindev;
263 nf_bridge_update_protocol(skb);
264 nf_bridge_push_encap_header(skb);
265 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
266 br_handle_frame_finish, 1);
267
268 return 0;
269 }
270
271 /* Obtain the correct destination MAC address, while preserving the original
272 * source MAC address. If we already know this address, we just copy it. If we
273 * don't, we use the neighbour framework to find out. In both cases, we make
274 * sure that br_handle_frame_finish() is called afterwards.
275 */
276 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
277 {
278 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
279 struct neighbour *neigh;
280 struct dst_entry *dst;
281
282 skb->dev = bridge_parent(skb->dev);
283 if (!skb->dev)
284 goto free_skb;
285 dst = skb_dst(skb);
286 neigh = dst_neigh_lookup_skb(dst, skb);
287 if (neigh) {
288 int ret;
289
290 if (neigh->hh.hh_len) {
291 neigh_hh_bridge(&neigh->hh, skb);
292 skb->dev = nf_bridge->physindev;
293 ret = br_handle_frame_finish(skb);
294 } else {
295 /* the neighbour function below overwrites the complete
296 * MAC header, so we save the Ethernet source address and
297 * protocol number.
298 */
299 skb_copy_from_linear_data_offset(skb,
300 -(ETH_HLEN-ETH_ALEN),
301 skb->nf_bridge->data,
302 ETH_HLEN-ETH_ALEN);
303 /* tell br_dev_xmit to continue with forwarding */
304 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
305 /* FIXME Need to refragment */
306 ret = neigh->output(neigh, skb);
307 }
308 neigh_release(neigh);
309 return ret;
310 }
311 free_skb:
312 kfree_skb(skb);
313 return 0;
314 }
315
316 /* This requires some explaining. If DNAT has taken place,
317 * we will need to fix up the destination Ethernet address.
318 *
319 * There are two cases to consider:
320 * 1. The packet was DNAT'ed to a device in the same bridge
321 * port group as it was received on. We can still bridge
322 * the packet.
323 * 2. The packet was DNAT'ed to a different device, either
324 * a non-bridged device or another bridge port group.
325 * The packet will need to be routed.
326 *
327 * The correct way of distinguishing between these two cases is to
328 * call ip_route_input() and to look at skb->dst->dev, which is
329 * changed to the destination device if ip_route_input() succeeds.
330 *
331 * Let's first consider the case that ip_route_input() succeeds:
332 *
333 * If the output device equals the logical bridge device the packet
334 * came in on, we can consider this bridging. The corresponding MAC
335 * address will be obtained in br_nf_pre_routing_finish_bridge.
336 * Otherwise, the packet is considered to be routed and we just
337 * change the destination MAC address so that the packet will
338 * later be passed up to the IP stack to be routed. For a redirected
339 * packet, ip_route_input() will give back the localhost as output device,
340 * which differs from the bridge device.
341 *
342 * Let's now consider the case that ip_route_input() fails:
343 *
344 * This can be because the destination address is martian, in which case
345 * the packet will be dropped.
346 * If IP forwarding is disabled, ip_route_input() will fail, while
347 * ip_route_output_key() can return success. The source
348 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
349 * thinks we're handling a locally generated packet and won't care
350 * if IP forwarding is enabled. If the output device equals the logical bridge
351 * device, we proceed as if ip_route_input() succeeded. If it differs from the
352 * logical bridge port or if ip_route_output_key() fails we drop the packet.
353 */
354 static int br_nf_pre_routing_finish(struct sk_buff *skb)
355 {
356 struct net_device *dev = skb->dev;
357 struct iphdr *iph = ip_hdr(skb);
358 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
359 struct rtable *rt;
360 int err;
361 int frag_max_size;
362
363 frag_max_size = IPCB(skb)->frag_max_size;
364 BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
365
366 if (nf_bridge->mask & BRNF_PKT_TYPE) {
367 skb->pkt_type = PACKET_OTHERHOST;
368 nf_bridge->mask ^= BRNF_PKT_TYPE;
369 }
370 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
371 if (dnat_took_place(skb)) {
372 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
373 struct in_device *in_dev = __in_dev_get_rcu(dev);
374
375 /* If err equals -EHOSTUNREACH the error is due to a
376 * martian destination or due to the fact that
377 * forwarding is disabled. For most martian packets,
378 * ip_route_output_key() will fail. It won't fail for 2 types of
379 * martian destinations: loopback destinations and destination
380 * 0.0.0.0. In both cases the packet will be dropped because the
381 * destination is the loopback device and not the bridge. */
382 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
383 goto free_skb;
384
385 rt = ip_route_output(dev_net(dev), iph->daddr, 0,
386 RT_TOS(iph->tos), 0);
387 if (!IS_ERR(rt)) {
388 /* - Bridged-and-DNAT'ed traffic doesn't
389 * require ip_forwarding. */
390 if (rt->dst.dev == dev) {
391 skb_dst_set(skb, &rt->dst);
392 goto bridged_dnat;
393 }
394 ip_rt_put(rt);
395 }
396 free_skb:
397 kfree_skb(skb);
398 return 0;
399 } else {
400 if (skb_dst(skb)->dev == dev) {
401 bridged_dnat:
402 skb->dev = nf_bridge->physindev;
403 nf_bridge_update_protocol(skb);
404 nf_bridge_push_encap_header(skb);
405 NF_HOOK_THRESH(NFPROTO_BRIDGE,
406 NF_BR_PRE_ROUTING,
407 skb, skb->dev, NULL,
408 br_nf_pre_routing_finish_bridge,
409 1);
410 return 0;
411 }
412 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
413 skb->pkt_type = PACKET_HOST;
414 }
415 } else {
416 rt = bridge_parent_rtable(nf_bridge->physindev);
417 if (!rt) {
418 kfree_skb(skb);
419 return 0;
420 }
421 skb_dst_set_noref(skb, &rt->dst);
422 }
423
424 skb->dev = nf_bridge->physindev;
425 nf_bridge_update_protocol(skb);
426 nf_bridge_push_encap_header(skb);
427 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
428 br_handle_frame_finish, 1);
429
430 return 0;
431 }
432
433 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb, const struct net_device *dev)
434 {
435 struct net_device *vlan, *br;
436
437 br = bridge_parent(dev);
438 if (brnf_pass_vlan_indev == 0 || !vlan_tx_tag_present(skb))
439 return br;
440
441 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
442 vlan_tx_tag_get(skb) & VLAN_VID_MASK);
443
444 return vlan ? vlan : br;
445 }
446
447 /* Some common code for IPv4/IPv6 */
448 static struct net_device *setup_pre_routing(struct sk_buff *skb)
449 {
450 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
451
452 if (skb->pkt_type == PACKET_OTHERHOST) {
453 skb->pkt_type = PACKET_HOST;
454 nf_bridge->mask |= BRNF_PKT_TYPE;
455 }
456
457 nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
458 nf_bridge->physindev = skb->dev;
459 skb->dev = brnf_get_logical_dev(skb, skb->dev);
460 if (skb->protocol == htons(ETH_P_8021Q))
461 nf_bridge->mask |= BRNF_8021Q;
462 else if (skb->protocol == htons(ETH_P_PPP_SES))
463 nf_bridge->mask |= BRNF_PPPoE;
464
465 /* Must drop socket now because of tproxy. */
466 skb_orphan(skb);
467 return skb->dev;
468 }
469
470 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
471 static int check_hbh_len(struct sk_buff *skb)
472 {
473 unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
474 u32 pkt_len;
475 const unsigned char *nh = skb_network_header(skb);
476 int off = raw - nh;
477 int len = (raw[1] + 1) << 3;
478
479 if ((raw + len) - skb->data > skb_headlen(skb))
480 goto bad;
481
482 off += 2;
483 len -= 2;
484
485 while (len > 0) {
486 int optlen = nh[off + 1] + 2;
487
488 switch (nh[off]) {
489 case IPV6_TLV_PAD1:
490 optlen = 1;
491 break;
492
493 case IPV6_TLV_PADN:
494 break;
495
496 case IPV6_TLV_JUMBO:
497 if (nh[off + 1] != 4 || (off & 3) != 2)
498 goto bad;
499 pkt_len = ntohl(*(__be32 *) (nh + off + 2));
500 if (pkt_len <= IPV6_MAXPLEN ||
501 ipv6_hdr(skb)->payload_len)
502 goto bad;
503 if (pkt_len > skb->len - sizeof(struct ipv6hdr))
504 goto bad;
505 if (pskb_trim_rcsum(skb,
506 pkt_len + sizeof(struct ipv6hdr)))
507 goto bad;
508 nh = skb_network_header(skb);
509 break;
510 default:
511 if (optlen > len)
512 goto bad;
513 break;
514 }
515 off += optlen;
516 len -= optlen;
517 }
518 if (len == 0)
519 return 0;
520 bad:
521 return -1;
522
523 }
524
525 /* Replicate the checks that IPv6 does on packet reception and pass the packet
526 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
527 static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
528 struct sk_buff *skb,
529 const struct net_device *in,
530 const struct net_device *out,
531 int (*okfn)(struct sk_buff *))
532 {
533 const struct ipv6hdr *hdr;
534 u32 pkt_len;
535
536 if (skb->len < sizeof(struct ipv6hdr))
537 return NF_DROP;
538
539 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
540 return NF_DROP;
541
542 hdr = ipv6_hdr(skb);
543
544 if (hdr->version != 6)
545 return NF_DROP;
546
547 pkt_len = ntohs(hdr->payload_len);
548
549 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
550 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
551 return NF_DROP;
552 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
553 return NF_DROP;
554 }
555 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
556 return NF_DROP;
557
558 nf_bridge_put(skb->nf_bridge);
559 if (!nf_bridge_alloc(skb))
560 return NF_DROP;
561 if (!setup_pre_routing(skb))
562 return NF_DROP;
563
564 skb->protocol = htons(ETH_P_IPV6);
565 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
566 br_nf_pre_routing_finish_ipv6);
567
568 return NF_STOLEN;
569 }
570
571 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
572 * Replicate the checks that IPv4 does on packet reception.
573 * Set skb->dev to the bridge device (i.e. parent of the
574 * receiving device) to make netfilter happy, the REDIRECT
575 * target in particular. Save the original destination IP
576 * address to be able to detect DNAT afterwards. */
577 static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
578 struct sk_buff *skb,
579 const struct net_device *in,
580 const struct net_device *out,
581 int (*okfn)(struct sk_buff *))
582 {
583 struct net_bridge_port *p;
584 struct net_bridge *br;
585 __u32 len = nf_bridge_encap_header_len(skb);
586
587 if (unlikely(!pskb_may_pull(skb, len)))
588 return NF_DROP;
589
590 p = br_port_get_rcu(in);
591 if (p == NULL)
592 return NF_DROP;
593 br = p->br;
594
595 if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
596 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
597 return NF_ACCEPT;
598
599 nf_bridge_pull_encap_header_rcsum(skb);
600 return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
601 }
602
603 if (!brnf_call_iptables && !br->nf_call_iptables)
604 return NF_ACCEPT;
605
606 if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
607 return NF_ACCEPT;
608
609 nf_bridge_pull_encap_header_rcsum(skb);
610
611 if (br_parse_ip_options(skb))
612 return NF_DROP;
613
614 nf_bridge_put(skb->nf_bridge);
615 if (!nf_bridge_alloc(skb))
616 return NF_DROP;
617 if (!setup_pre_routing(skb))
618 return NF_DROP;
619 store_orig_dstaddr(skb);
620 skb->protocol = htons(ETH_P_IP);
621
622 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
623 br_nf_pre_routing_finish);
624
625 return NF_STOLEN;
626 }
627
628
629 /* PF_BRIDGE/LOCAL_IN ************************************************/
630 /* The packet is locally destined, which requires a real
631 * dst_entry, so detach the fake one. On the way up, the
632 * packet would pass through PRE_ROUTING again (which already
633 * took place when the packet entered the bridge), but we
634 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
635 * prevent this from happening. */
636 static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
637 struct sk_buff *skb,
638 const struct net_device *in,
639 const struct net_device *out,
640 int (*okfn)(struct sk_buff *))
641 {
642 br_drop_fake_rtable(skb);
643 return NF_ACCEPT;
644 }
645
646 /* PF_BRIDGE/FORWARD *************************************************/
647 static int br_nf_forward_finish(struct sk_buff *skb)
648 {
649 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
650 struct net_device *in;
651
652 if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
653 in = nf_bridge->physindev;
654 if (nf_bridge->mask & BRNF_PKT_TYPE) {
655 skb->pkt_type = PACKET_OTHERHOST;
656 nf_bridge->mask ^= BRNF_PKT_TYPE;
657 }
658 nf_bridge_update_protocol(skb);
659 } else {
660 in = *((struct net_device **)(skb->cb));
661 }
662 nf_bridge_push_encap_header(skb);
663
664 NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
665 skb->dev, br_forward_finish, 1);
666 return 0;
667 }
668
669
670 /* This is the 'purely bridged' case. For IP, we pass the packet to
671 * netfilter with indev and outdev set to the bridge device,
672 * but we are still able to filter on the 'real' indev/outdev
673 * because of the physdev module. For ARP, indev and outdev are the
674 * bridge ports. */
675 static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
676 struct sk_buff *skb,
677 const struct net_device *in,
678 const struct net_device *out,
679 int (*okfn)(struct sk_buff *))
680 {
681 struct nf_bridge_info *nf_bridge;
682 struct net_device *parent;
683 u_int8_t pf;
684
685 if (!skb->nf_bridge)
686 return NF_ACCEPT;
687
688 /* Need exclusive nf_bridge_info since we might have multiple
689 * different physoutdevs. */
690 if (!nf_bridge_unshare(skb))
691 return NF_DROP;
692
693 parent = bridge_parent(out);
694 if (!parent)
695 return NF_DROP;
696
697 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
698 pf = NFPROTO_IPV4;
699 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
700 pf = NFPROTO_IPV6;
701 else
702 return NF_ACCEPT;
703
704 nf_bridge_pull_encap_header(skb);
705
706 nf_bridge = skb->nf_bridge;
707 if (skb->pkt_type == PACKET_OTHERHOST) {
708 skb->pkt_type = PACKET_HOST;
709 nf_bridge->mask |= BRNF_PKT_TYPE;
710 }
711
712 if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
713 return NF_DROP;
714
715 /* The physdev module checks on this */
716 nf_bridge->mask |= BRNF_BRIDGED;
717 nf_bridge->physoutdev = skb->dev;
718 if (pf == NFPROTO_IPV4)
719 skb->protocol = htons(ETH_P_IP);
720 else
721 skb->protocol = htons(ETH_P_IPV6);
722
723 NF_HOOK(pf, NF_INET_FORWARD, skb, brnf_get_logical_dev(skb, in), parent,
724 br_nf_forward_finish);
725
726 return NF_STOLEN;
727 }
728
729 static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
730 struct sk_buff *skb,
731 const struct net_device *in,
732 const struct net_device *out,
733 int (*okfn)(struct sk_buff *))
734 {
735 struct net_bridge_port *p;
736 struct net_bridge *br;
737 struct net_device **d = (struct net_device **)(skb->cb);
738
739 p = br_port_get_rcu(out);
740 if (p == NULL)
741 return NF_ACCEPT;
742 br = p->br;
743
744 if (!brnf_call_arptables && !br->nf_call_arptables)
745 return NF_ACCEPT;
746
747 if (!IS_ARP(skb)) {
748 if (!IS_VLAN_ARP(skb))
749 return NF_ACCEPT;
750 nf_bridge_pull_encap_header(skb);
751 }
752
753 if (arp_hdr(skb)->ar_pln != 4) {
754 if (IS_VLAN_ARP(skb))
755 nf_bridge_push_encap_header(skb);
756 return NF_ACCEPT;
757 }
758 *d = (struct net_device *)in;
759 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
760 (struct net_device *)out, br_nf_forward_finish);
761
762 return NF_STOLEN;
763 }
764
765 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
766 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
767 {
768 int ret;
769 int frag_max_size;
770
771 /* This is wrong! We should preserve the original fragment
772 * boundaries by preserving frag_list rather than refragmenting.
773 */
774 if (skb->protocol == htons(ETH_P_IP) &&
775 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
776 !skb_is_gso(skb)) {
777 frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
778 if (br_parse_ip_options(skb))
779 /* Drop invalid packet */
780 return NF_DROP;
781 IPCB(skb)->frag_max_size = frag_max_size;
782 ret = ip_fragment(skb, br_dev_queue_push_xmit);
783 } else
784 ret = br_dev_queue_push_xmit(skb);
785
786 return ret;
787 }
788 #else
789 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
790 {
791 return br_dev_queue_push_xmit(skb);
792 }
793 #endif
794
795 /* PF_BRIDGE/POST_ROUTING ********************************************/
796 static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
797 struct sk_buff *skb,
798 const struct net_device *in,
799 const struct net_device *out,
800 int (*okfn)(struct sk_buff *))
801 {
802 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
803 struct net_device *realoutdev = bridge_parent(skb->dev);
804 u_int8_t pf;
805
806 if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
807 return NF_ACCEPT;
808
809 if (!realoutdev)
810 return NF_DROP;
811
812 if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
813 pf = NFPROTO_IPV4;
814 else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
815 pf = NFPROTO_IPV6;
816 else
817 return NF_ACCEPT;
818
819 /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
820 * about the value of skb->pkt_type. */
821 if (skb->pkt_type == PACKET_OTHERHOST) {
822 skb->pkt_type = PACKET_HOST;
823 nf_bridge->mask |= BRNF_PKT_TYPE;
824 }
825
826 nf_bridge_pull_encap_header(skb);
827 nf_bridge_save_header(skb);
828 if (pf == NFPROTO_IPV4)
829 skb->protocol = htons(ETH_P_IP);
830 else
831 skb->protocol = htons(ETH_P_IPV6);
832
833 NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
834 br_nf_dev_queue_xmit);
835
836 return NF_STOLEN;
837 }
838
839 /* IP/SABOTAGE *****************************************************/
840 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
841 * for the second time. */
842 static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
843 struct sk_buff *skb,
844 const struct net_device *in,
845 const struct net_device *out,
846 int (*okfn)(struct sk_buff *))
847 {
848 if (skb->nf_bridge &&
849 !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
850 return NF_STOP;
851 }
852
853 return NF_ACCEPT;
854 }
855
856 void br_netfilter_enable(void)
857 {
858 }
859 EXPORT_SYMBOL_GPL(br_netfilter_enable);
860
861 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
862 * br_dev_queue_push_xmit is called afterwards */
863 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
864 {
865 .hook = br_nf_pre_routing,
866 .owner = THIS_MODULE,
867 .pf = NFPROTO_BRIDGE,
868 .hooknum = NF_BR_PRE_ROUTING,
869 .priority = NF_BR_PRI_BRNF,
870 },
871 {
872 .hook = br_nf_local_in,
873 .owner = THIS_MODULE,
874 .pf = NFPROTO_BRIDGE,
875 .hooknum = NF_BR_LOCAL_IN,
876 .priority = NF_BR_PRI_BRNF,
877 },
878 {
879 .hook = br_nf_forward_ip,
880 .owner = THIS_MODULE,
881 .pf = NFPROTO_BRIDGE,
882 .hooknum = NF_BR_FORWARD,
883 .priority = NF_BR_PRI_BRNF - 1,
884 },
885 {
886 .hook = br_nf_forward_arp,
887 .owner = THIS_MODULE,
888 .pf = NFPROTO_BRIDGE,
889 .hooknum = NF_BR_FORWARD,
890 .priority = NF_BR_PRI_BRNF,
891 },
892 {
893 .hook = br_nf_post_routing,
894 .owner = THIS_MODULE,
895 .pf = NFPROTO_BRIDGE,
896 .hooknum = NF_BR_POST_ROUTING,
897 .priority = NF_BR_PRI_LAST,
898 },
899 {
900 .hook = ip_sabotage_in,
901 .owner = THIS_MODULE,
902 .pf = NFPROTO_IPV4,
903 .hooknum = NF_INET_PRE_ROUTING,
904 .priority = NF_IP_PRI_FIRST,
905 },
906 {
907 .hook = ip_sabotage_in,
908 .owner = THIS_MODULE,
909 .pf = NFPROTO_IPV6,
910 .hooknum = NF_INET_PRE_ROUTING,
911 .priority = NF_IP6_PRI_FIRST,
912 },
913 };
914
915 #ifdef CONFIG_SYSCTL
916 static
917 int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
918 void __user *buffer, size_t *lenp, loff_t *ppos)
919 {
920 int ret;
921
922 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
923
924 if (write && *(int *)(ctl->data))
925 *(int *)(ctl->data) = 1;
926 return ret;
927 }
928
929 static struct ctl_table brnf_table[] = {
930 {
931 .procname = "bridge-nf-call-arptables",
932 .data = &brnf_call_arptables,
933 .maxlen = sizeof(int),
934 .mode = 0644,
935 .proc_handler = brnf_sysctl_call_tables,
936 },
937 {
938 .procname = "bridge-nf-call-iptables",
939 .data = &brnf_call_iptables,
940 .maxlen = sizeof(int),
941 .mode = 0644,
942 .proc_handler = brnf_sysctl_call_tables,
943 },
944 {
945 .procname = "bridge-nf-call-ip6tables",
946 .data = &brnf_call_ip6tables,
947 .maxlen = sizeof(int),
948 .mode = 0644,
949 .proc_handler = brnf_sysctl_call_tables,
950 },
951 {
952 .procname = "bridge-nf-filter-vlan-tagged",
953 .data = &brnf_filter_vlan_tagged,
954 .maxlen = sizeof(int),
955 .mode = 0644,
956 .proc_handler = brnf_sysctl_call_tables,
957 },
958 {
959 .procname = "bridge-nf-filter-pppoe-tagged",
960 .data = &brnf_filter_pppoe_tagged,
961 .maxlen = sizeof(int),
962 .mode = 0644,
963 .proc_handler = brnf_sysctl_call_tables,
964 },
965 {
966 .procname = "bridge-nf-pass-vlan-input-dev",
967 .data = &brnf_pass_vlan_indev,
968 .maxlen = sizeof(int),
969 .mode = 0644,
970 .proc_handler = brnf_sysctl_call_tables,
971 },
972 { }
973 };
974 #endif
975
976 static int __init br_netfilter_init(void)
977 {
978 int ret;
979
980 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
981 if (ret < 0)
982 return ret;
983
984 #ifdef CONFIG_SYSCTL
985 brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
986 if (brnf_sysctl_header == NULL) {
987 printk(KERN_WARNING
988 "br_netfilter: can't register to sysctl.\n");
989 ret = -ENOMEM;
990 goto err1;
991 }
992 #endif
993 printk(KERN_NOTICE "Bridge firewalling registered\n");
994 return 0;
995 err1:
996 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
997 return ret;
998 }
999
1000 static void __exit br_netfilter_fini(void)
1001 {
1002 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1003 #ifdef CONFIG_SYSCTL
1004 unregister_net_sysctl_table(brnf_sysctl_header);
1005 #endif
1006 }
1007
1008 module_init(br_netfilter_init);
1009 module_exit(br_netfilter_fini);
1010
1011 MODULE_LICENSE("GPL");
1012 MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
1013 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
1014 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
This page took 0.053632 seconds and 5 git commands to generate.