1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include <linux/export.h>
8 bool vlan_do_receive(struct sk_buff
**skbp
, bool last_handler
)
10 struct sk_buff
*skb
= *skbp
;
11 u16 vlan_id
= skb
->vlan_tci
& VLAN_VID_MASK
;
12 struct net_device
*vlan_dev
;
13 struct vlan_pcpu_stats
*rx_stats
;
15 vlan_dev
= vlan_find_dev(skb
->dev
, vlan_id
);
17 /* Only the last call to vlan_do_receive() should change
18 * pkt_type to PACKET_OTHERHOST
20 if (vlan_id
&& last_handler
)
21 skb
->pkt_type
= PACKET_OTHERHOST
;
25 skb
= *skbp
= skb_share_check(skb
, GFP_ATOMIC
);
30 if (skb
->pkt_type
== PACKET_OTHERHOST
) {
31 /* Our lower layer thinks this is not local, let's make sure.
32 * This allows the VLAN to have a different MAC than the
33 * underlying device, and still route correctly. */
34 if (ether_addr_equal(eth_hdr(skb
)->h_dest
, vlan_dev
->dev_addr
))
35 skb
->pkt_type
= PACKET_HOST
;
38 if (!(vlan_dev_priv(vlan_dev
)->flags
& VLAN_FLAG_REORDER_HDR
)) {
39 unsigned int offset
= skb
->data
- skb_mac_header(skb
);
42 * vlan_insert_tag expect skb->data pointing to mac header.
43 * So change skb->data before calling it and change back to
44 * original position later
46 skb_push(skb
, offset
);
47 skb
= *skbp
= vlan_insert_tag(skb
, skb
->vlan_tci
);
50 skb_pull(skb
, offset
+ VLAN_HLEN
);
51 skb_reset_mac_len(skb
);
54 skb
->priority
= vlan_get_ingress_priority(vlan_dev
, skb
->vlan_tci
);
57 rx_stats
= this_cpu_ptr(vlan_dev_priv(vlan_dev
)->vlan_pcpu_stats
);
59 u64_stats_update_begin(&rx_stats
->syncp
);
60 rx_stats
->rx_packets
++;
61 rx_stats
->rx_bytes
+= skb
->len
;
62 if (skb
->pkt_type
== PACKET_MULTICAST
)
63 rx_stats
->rx_multicast
++;
64 u64_stats_update_end(&rx_stats
->syncp
);
69 /* Must be invoked with rcu_read_lock or with RTNL. */
70 struct net_device
*__vlan_find_dev_deep(struct net_device
*real_dev
,
73 struct vlan_info
*vlan_info
= rcu_dereference_rtnl(real_dev
->vlan_info
);
76 return vlan_group_get_device(&vlan_info
->grp
, vlan_id
);
79 * Bonding slaves do not have grp assigned to themselves.
80 * Grp is assigned to bonding master instead.
82 if (netif_is_bond_slave(real_dev
))
83 return __vlan_find_dev_deep(real_dev
->master
, vlan_id
);
88 EXPORT_SYMBOL(__vlan_find_dev_deep
);
90 struct net_device
*vlan_dev_real_dev(const struct net_device
*dev
)
92 return vlan_dev_priv(dev
)->real_dev
;
94 EXPORT_SYMBOL(vlan_dev_real_dev
);
96 u16
vlan_dev_vlan_id(const struct net_device
*dev
)
98 return vlan_dev_priv(dev
)->vlan_id
;
100 EXPORT_SYMBOL(vlan_dev_vlan_id
);
102 static struct sk_buff
*vlan_reorder_header(struct sk_buff
*skb
)
104 if (skb_cow(skb
, skb_headroom(skb
)) < 0)
106 memmove(skb
->data
- ETH_HLEN
, skb
->data
- VLAN_ETH_HLEN
, 2 * ETH_ALEN
);
107 skb
->mac_header
+= VLAN_HLEN
;
108 skb_reset_mac_len(skb
);
112 struct sk_buff
*vlan_untag(struct sk_buff
*skb
)
114 struct vlan_hdr
*vhdr
;
117 if (unlikely(vlan_tx_tag_present(skb
))) {
118 /* vlan_tci is already set-up so leave this for another time */
122 skb
= skb_share_check(skb
, GFP_ATOMIC
);
126 if (unlikely(!pskb_may_pull(skb
, VLAN_HLEN
)))
129 vhdr
= (struct vlan_hdr
*) skb
->data
;
130 vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
131 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
133 skb_pull_rcsum(skb
, VLAN_HLEN
);
134 vlan_set_encap_proto(skb
, vhdr
);
136 skb
= vlan_reorder_header(skb
);
140 skb_reset_network_header(skb
);
141 skb_reset_transport_header(skb
);
151 * vlan info and vid list
154 static void vlan_group_free(struct vlan_group
*grp
)
158 for (i
= 0; i
< VLAN_GROUP_ARRAY_SPLIT_PARTS
; i
++)
159 kfree(grp
->vlan_devices_arrays
[i
]);
162 static void vlan_info_free(struct vlan_info
*vlan_info
)
164 vlan_group_free(&vlan_info
->grp
);
168 static void vlan_info_rcu_free(struct rcu_head
*rcu
)
170 vlan_info_free(container_of(rcu
, struct vlan_info
, rcu
));
173 static struct vlan_info
*vlan_info_alloc(struct net_device
*dev
)
175 struct vlan_info
*vlan_info
;
177 vlan_info
= kzalloc(sizeof(struct vlan_info
), GFP_KERNEL
);
181 vlan_info
->real_dev
= dev
;
182 INIT_LIST_HEAD(&vlan_info
->vid_list
);
186 struct vlan_vid_info
{
187 struct list_head list
;
192 static struct vlan_vid_info
*vlan_vid_info_get(struct vlan_info
*vlan_info
,
195 struct vlan_vid_info
*vid_info
;
197 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
) {
198 if (vid_info
->vid
== vid
)
204 static struct vlan_vid_info
*vlan_vid_info_alloc(unsigned short vid
)
206 struct vlan_vid_info
*vid_info
;
208 vid_info
= kzalloc(sizeof(struct vlan_vid_info
), GFP_KERNEL
);
216 static int __vlan_vid_add(struct vlan_info
*vlan_info
, unsigned short vid
,
217 struct vlan_vid_info
**pvid_info
)
219 struct net_device
*dev
= vlan_info
->real_dev
;
220 const struct net_device_ops
*ops
= dev
->netdev_ops
;
221 struct vlan_vid_info
*vid_info
;
224 vid_info
= vlan_vid_info_alloc(vid
);
228 if ((dev
->features
& NETIF_F_HW_VLAN_FILTER
) &&
229 ops
->ndo_vlan_rx_add_vid
) {
230 err
= ops
->ndo_vlan_rx_add_vid(dev
, vid
);
236 list_add(&vid_info
->list
, &vlan_info
->vid_list
);
237 vlan_info
->nr_vids
++;
238 *pvid_info
= vid_info
;
242 int vlan_vid_add(struct net_device
*dev
, unsigned short vid
)
244 struct vlan_info
*vlan_info
;
245 struct vlan_vid_info
*vid_info
;
246 bool vlan_info_created
= false;
251 vlan_info
= rtnl_dereference(dev
->vlan_info
);
253 vlan_info
= vlan_info_alloc(dev
);
256 vlan_info_created
= true;
258 vid_info
= vlan_vid_info_get(vlan_info
, vid
);
260 err
= __vlan_vid_add(vlan_info
, vid
, &vid_info
);
262 goto out_free_vlan_info
;
264 vid_info
->refcount
++;
266 if (vlan_info_created
)
267 rcu_assign_pointer(dev
->vlan_info
, vlan_info
);
272 if (vlan_info_created
)
276 EXPORT_SYMBOL(vlan_vid_add
);
278 static void __vlan_vid_del(struct vlan_info
*vlan_info
,
279 struct vlan_vid_info
*vid_info
)
281 struct net_device
*dev
= vlan_info
->real_dev
;
282 const struct net_device_ops
*ops
= dev
->netdev_ops
;
283 unsigned short vid
= vid_info
->vid
;
286 if ((dev
->features
& NETIF_F_HW_VLAN_FILTER
) &&
287 ops
->ndo_vlan_rx_kill_vid
) {
288 err
= ops
->ndo_vlan_rx_kill_vid(dev
, vid
);
290 pr_warn("failed to kill vid %d for device %s\n",
294 list_del(&vid_info
->list
);
296 vlan_info
->nr_vids
--;
299 void vlan_vid_del(struct net_device
*dev
, unsigned short vid
)
301 struct vlan_info
*vlan_info
;
302 struct vlan_vid_info
*vid_info
;
306 vlan_info
= rtnl_dereference(dev
->vlan_info
);
310 vid_info
= vlan_vid_info_get(vlan_info
, vid
);
313 vid_info
->refcount
--;
314 if (vid_info
->refcount
== 0) {
315 __vlan_vid_del(vlan_info
, vid_info
);
316 if (vlan_info
->nr_vids
== 0) {
317 RCU_INIT_POINTER(dev
->vlan_info
, NULL
);
318 call_rcu(&vlan_info
->rcu
, vlan_info_rcu_free
);
322 EXPORT_SYMBOL(vlan_vid_del
);
324 int vlan_vids_add_by_dev(struct net_device
*dev
,
325 const struct net_device
*by_dev
)
327 struct vlan_vid_info
*vid_info
;
328 struct vlan_info
*vlan_info
;
333 vlan_info
= rtnl_dereference(by_dev
->vlan_info
);
337 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
) {
338 err
= vlan_vid_add(dev
, vid_info
->vid
);
345 list_for_each_entry_continue_reverse(vid_info
,
346 &vlan_info
->vid_list
,
348 vlan_vid_del(dev
, vid_info
->vid
);
353 EXPORT_SYMBOL(vlan_vids_add_by_dev
);
355 void vlan_vids_del_by_dev(struct net_device
*dev
,
356 const struct net_device
*by_dev
)
358 struct vlan_vid_info
*vid_info
;
359 struct vlan_info
*vlan_info
;
363 vlan_info
= rtnl_dereference(by_dev
->vlan_info
);
367 list_for_each_entry(vid_info
, &vlan_info
->vid_list
, list
)
368 vlan_vid_del(dev
, vid_info
->vid
);
370 EXPORT_SYMBOL(vlan_vids_del_by_dev
);