3 * Linux ethernet bridge
6 * Lennert Buytenhek <buytenh@gnu.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/netpoll.h>
19 #include <linux/skbuff.h>
20 #include <linux/if_vlan.h>
21 #include <linux/netfilter_bridge.h>
22 #include "br_private.h"
24 static int deliver_clone(const struct net_bridge_port
*prev
,
26 void (*__packet_hook
)(const struct net_bridge_port
*p
,
27 struct sk_buff
*skb
));
29 /* Don't forward packets to originating port or forwarding disabled */
30 static inline int should_deliver(const struct net_bridge_port
*p
,
31 const struct sk_buff
*skb
)
33 return ((p
->flags
& BR_HAIRPIN_MODE
) || skb
->dev
!= p
->dev
) &&
34 br_allowed_egress(p
->br
, nbp_get_vlan_info(p
), skb
) &&
35 p
->state
== BR_STATE_FORWARDING
;
38 int br_dev_queue_push_xmit(struct sock
*sk
, struct sk_buff
*skb
)
40 if (!is_skb_forwardable(skb
->dev
, skb
)) {
43 skb_push(skb
, ETH_HLEN
);
44 br_drop_fake_rtable(skb
);
45 skb_sender_cpu_clear(skb
);
51 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit
);
53 int br_forward_finish(struct sock
*sk
, struct sk_buff
*skb
)
55 return NF_HOOK(NFPROTO_BRIDGE
, NF_BR_POST_ROUTING
, sk
, skb
,
57 br_dev_queue_push_xmit
);
60 EXPORT_SYMBOL_GPL(br_forward_finish
);
62 static void __br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
64 skb
= br_handle_vlan(to
->br
, nbp_get_vlan_info(to
), skb
);
70 if (unlikely(netpoll_tx_running(to
->br
->dev
))) {
71 if (!is_skb_forwardable(skb
->dev
, skb
))
74 skb_push(skb
, ETH_HLEN
);
75 br_netpoll_send_skb(to
, skb
);
80 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_LOCAL_OUT
, NULL
, skb
,
85 static void __br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
87 struct net_device
*indev
;
89 if (skb_warn_if_lro(skb
)) {
94 skb
= br_handle_vlan(to
->br
, nbp_get_vlan_info(to
), skb
);
100 skb_forward_csum(skb
);
102 NF_HOOK(NFPROTO_BRIDGE
, NF_BR_FORWARD
, NULL
, skb
,
107 /* called with rcu_read_lock */
108 void br_deliver(const struct net_bridge_port
*to
, struct sk_buff
*skb
)
110 if (to
&& should_deliver(to
, skb
)) {
111 __br_deliver(to
, skb
);
117 EXPORT_SYMBOL_GPL(br_deliver
);
119 /* called with rcu_read_lock */
120 void br_forward(const struct net_bridge_port
*to
, struct sk_buff
*skb
, struct sk_buff
*skb0
)
122 if (should_deliver(to
, skb
)) {
124 deliver_clone(to
, skb
, __br_forward
);
126 __br_forward(to
, skb
);
134 static int deliver_clone(const struct net_bridge_port
*prev
,
136 void (*__packet_hook
)(const struct net_bridge_port
*p
,
137 struct sk_buff
*skb
))
139 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
141 skb
= skb_clone(skb
, GFP_ATOMIC
);
143 dev
->stats
.tx_dropped
++;
147 __packet_hook(prev
, skb
);
151 static struct net_bridge_port
*maybe_deliver(
152 struct net_bridge_port
*prev
, struct net_bridge_port
*p
,
154 void (*__packet_hook
)(const struct net_bridge_port
*p
,
155 struct sk_buff
*skb
))
159 if (!should_deliver(p
, skb
))
165 err
= deliver_clone(prev
, skb
, __packet_hook
);
173 /* called under bridge lock */
174 static void br_flood(struct net_bridge
*br
, struct sk_buff
*skb
,
175 struct sk_buff
*skb0
,
176 void (*__packet_hook
)(const struct net_bridge_port
*p
,
177 struct sk_buff
*skb
),
180 struct net_bridge_port
*p
;
181 struct net_bridge_port
*prev
;
185 list_for_each_entry_rcu(p
, &br
->port_list
, list
) {
186 /* Do not flood unicast traffic to ports that turn it off */
187 if (unicast
&& !(p
->flags
& BR_FLOOD
))
190 /* Do not flood to ports that enable proxy ARP */
191 if (p
->flags
& BR_PROXYARP
)
193 if ((p
->flags
& BR_PROXYARP_WIFI
) &&
194 BR_INPUT_SKB_CB(skb
)->proxyarp_replied
)
197 prev
= maybe_deliver(prev
, p
, skb
, __packet_hook
);
206 deliver_clone(prev
, skb
, __packet_hook
);
208 __packet_hook(prev
, skb
);
217 /* called with rcu_read_lock */
218 void br_flood_deliver(struct net_bridge
*br
, struct sk_buff
*skb
, bool unicast
)
220 br_flood(br
, skb
, NULL
, __br_deliver
, unicast
);
223 /* called under bridge lock */
224 void br_flood_forward(struct net_bridge
*br
, struct sk_buff
*skb
,
225 struct sk_buff
*skb2
, bool unicast
)
227 br_flood(br
, skb
, skb2
, __br_forward
, unicast
);
230 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
231 /* called with rcu_read_lock */
232 static void br_multicast_flood(struct net_bridge_mdb_entry
*mdst
,
233 struct sk_buff
*skb
, struct sk_buff
*skb0
,
234 void (*__packet_hook
)(
235 const struct net_bridge_port
*p
,
236 struct sk_buff
*skb
))
238 struct net_device
*dev
= BR_INPUT_SKB_CB(skb
)->brdev
;
239 struct net_bridge
*br
= netdev_priv(dev
);
240 struct net_bridge_port
*prev
= NULL
;
241 struct net_bridge_port_group
*p
;
242 struct hlist_node
*rp
;
244 rp
= rcu_dereference(hlist_first_rcu(&br
->router_list
));
245 p
= mdst
? rcu_dereference(mdst
->ports
) : NULL
;
247 struct net_bridge_port
*port
, *lport
, *rport
;
249 lport
= p
? p
->port
: NULL
;
250 rport
= rp
? hlist_entry(rp
, struct net_bridge_port
, rlist
) :
253 port
= (unsigned long)lport
> (unsigned long)rport
?
256 prev
= maybe_deliver(prev
, port
, skb
, __packet_hook
);
260 if ((unsigned long)lport
>= (unsigned long)port
)
261 p
= rcu_dereference(p
->next
);
262 if ((unsigned long)rport
>= (unsigned long)port
)
263 rp
= rcu_dereference(hlist_next_rcu(rp
));
270 deliver_clone(prev
, skb
, __packet_hook
);
272 __packet_hook(prev
, skb
);
280 /* called with rcu_read_lock */
281 void br_multicast_deliver(struct net_bridge_mdb_entry
*mdst
,
284 br_multicast_flood(mdst
, skb
, NULL
, __br_deliver
);
287 /* called with rcu_read_lock */
288 void br_multicast_forward(struct net_bridge_mdb_entry
*mdst
,
289 struct sk_buff
*skb
, struct sk_buff
*skb2
)
291 br_multicast_flood(mdst
, skb
, skb2
, __br_forward
);
This page took 0.037773 seconds and 5 git commands to generate.