net: vlan: make non-hw-accel rx path similar to hw-accel
[deliverable/linux.git] / net / 8021q / vlan_core.c
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include "vlan.h"
6
7 bool vlan_do_receive(struct sk_buff **skbp)
8 {
9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11 struct net_device *vlan_dev;
12 struct vlan_pcpu_stats *rx_stats;
13
14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) {
16 if (vlan_id)
17 skb->pkt_type = PACKET_OTHERHOST;
18 return false;
19 }
20
21 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22 if (unlikely(!skb))
23 return false;
24
25 skb->dev = vlan_dev;
26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
27 skb->vlan_tci = 0;
28
29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
30
31 u64_stats_update_begin(&rx_stats->syncp);
32 rx_stats->rx_packets++;
33 rx_stats->rx_bytes += skb->len;
34
35 switch (skb->pkt_type) {
36 case PACKET_BROADCAST:
37 break;
38 case PACKET_MULTICAST:
39 rx_stats->rx_multicast++;
40 break;
41 case PACKET_OTHERHOST:
42 /* Our lower layer thinks this is not local, let's make sure.
43 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
46 vlan_dev->dev_addr))
47 skb->pkt_type = PACKET_HOST;
48 break;
49 }
50 u64_stats_update_end(&rx_stats->syncp);
51
52 return true;
53 }
54
55 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
56 {
57 return vlan_dev_info(dev)->real_dev;
58 }
59 EXPORT_SYMBOL(vlan_dev_real_dev);
60
61 u16 vlan_dev_vlan_id(const struct net_device *dev)
62 {
63 return vlan_dev_info(dev)->vlan_id;
64 }
65 EXPORT_SYMBOL(vlan_dev_vlan_id);
66
67 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
68 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
69 u16 vlan_tci, int polling)
70 {
71 __vlan_hwaccel_put_tag(skb, vlan_tci);
72 return polling ? netif_receive_skb(skb) : netif_rx(skb);
73 }
74 EXPORT_SYMBOL(__vlan_hwaccel_rx);
75
76 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
77 unsigned int vlan_tci, struct sk_buff *skb)
78 {
79 __vlan_hwaccel_put_tag(skb, vlan_tci);
80 return napi_gro_receive(napi, skb);
81 }
82 EXPORT_SYMBOL(vlan_gro_receive);
83
84 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
85 unsigned int vlan_tci)
86 {
87 __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
88 return napi_gro_frags(napi);
89 }
90 EXPORT_SYMBOL(vlan_gro_frags);
91
92 static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
93 {
94 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
95 if (skb_cow(skb, skb_headroom(skb)) < 0)
96 skb = NULL;
97 if (skb) {
98 /* Lifted from Gleb's VLAN code... */
99 memmove(skb->data - ETH_HLEN,
100 skb->data - VLAN_ETH_HLEN, 12);
101 skb->mac_header += VLAN_HLEN;
102 }
103 }
104 return skb;
105 }
106
107 static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
108 {
109 __be16 proto;
110 unsigned char *rawp;
111
112 /*
113 * Was a VLAN packet, grab the encapsulated protocol, which the layer
114 * three protocols care about.
115 */
116
117 proto = vhdr->h_vlan_encapsulated_proto;
118 if (ntohs(proto) >= 1536) {
119 skb->protocol = proto;
120 return;
121 }
122
123 rawp = skb->data;
124 if (*(unsigned short *) rawp == 0xFFFF)
125 /*
126 * This is a magic hack to spot IPX packets. Older Novell
127 * breaks the protocol design and runs IPX over 802.3 without
128 * an 802.2 LLC layer. We look for FFFF which isn't a used
129 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
130 * but does for the rest.
131 */
132 skb->protocol = htons(ETH_P_802_3);
133 else
134 /*
135 * Real 802.2 LLC
136 */
137 skb->protocol = htons(ETH_P_802_2);
138 }
139
140 struct sk_buff *vlan_untag(struct sk_buff *skb)
141 {
142 struct vlan_hdr *vhdr;
143 u16 vlan_tci;
144
145 if (unlikely(vlan_tx_tag_present(skb))) {
146 /* vlan_tci is already set-up so leave this for another time */
147 return skb;
148 }
149
150 skb = skb_share_check(skb, GFP_ATOMIC);
151 if (unlikely(!skb))
152 goto err_free;
153
154 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
155 goto err_free;
156
157 vhdr = (struct vlan_hdr *) skb->data;
158 vlan_tci = ntohs(vhdr->h_vlan_TCI);
159 __vlan_hwaccel_put_tag(skb, vlan_tci);
160
161 skb_pull_rcsum(skb, VLAN_HLEN);
162 vlan_set_encap_proto(skb, vhdr);
163
164 skb = vlan_check_reorder_header(skb);
165 if (unlikely(!skb))
166 goto err_free;
167
168 return skb;
169
170 err_free:
171 kfree_skb(skb);
172 return NULL;
173 }
This page took 0.044441 seconds and 5 git commands to generate.