tcp: make sure xmit goal size never becomes zero
[deliverable/linux.git] / net / 8021q / vlan_core.c
CommitLineData
7750f403
PM
1#include <linux/skbuff.h>
2#include <linux/netdevice.h>
3#include <linux/if_vlan.h>
4ead4431 4#include <linux/netpoll.h>
7750f403
PM
5#include "vlan.h"
6
7/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9bb8582e 9 u16 vlan_tci, int polling)
7750f403 10{
4ead4431
HX
11 if (netpoll_rx(skb))
12 return NET_RX_DROP;
13
e1c096e2
HX
14 if (skb_bond_should_drop(skb))
15 goto drop;
7750f403 16
bc1d0411 17 skb->vlan_tci = vlan_tci;
e1c096e2
HX
18 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
19
20 if (!skb->dev)
21 goto drop;
9b22ea56
PM
22
23 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
e1c096e2
HX
24
25drop:
26 dev_kfree_skb_any(skb);
27 return NET_RX_DROP;
9b22ea56
PM
28}
29EXPORT_SYMBOL(__vlan_hwaccel_rx);
30
31int vlan_hwaccel_do_receive(struct sk_buff *skb)
32{
e1c096e2 33 struct net_device *dev = skb->dev;
9b22ea56
PM
34 struct net_device_stats *stats;
35
e1c096e2 36 skb->dev = vlan_dev_info(dev)->real_dev;
bc1d0411
PM
37 netif_nit_deliver(skb);
38
9b22ea56
PM
39 skb->dev = dev;
40 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
bc1d0411 41 skb->vlan_tci = 0;
7750f403 42
9b22ea56 43 stats = &dev->stats;
7750f403
PM
44 stats->rx_packets++;
45 stats->rx_bytes += skb->len;
46
7750f403
PM
47 switch (skb->pkt_type) {
48 case PACKET_BROADCAST:
49 break;
50 case PACKET_MULTICAST:
51 stats->multicast++;
52 break;
53 case PACKET_OTHERHOST:
54 /* Our lower layer thinks this is not local, let's make sure.
55 * This allows the VLAN to have a different MAC than the
56 * underlying device, and still route correctly. */
57 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
9b22ea56 58 dev->dev_addr))
7750f403
PM
59 skb->pkt_type = PACKET_HOST;
60 break;
61 };
9b22ea56 62 return 0;
7750f403 63}
22d1ba74
PM
64
65struct net_device *vlan_dev_real_dev(const struct net_device *dev)
66{
67 return vlan_dev_info(dev)->real_dev;
68}
116cb428 69EXPORT_SYMBOL(vlan_dev_real_dev);
22d1ba74
PM
70
71u16 vlan_dev_vlan_id(const struct net_device *dev)
72{
73 return vlan_dev_info(dev)->vlan_id;
74}
116cb428 75EXPORT_SYMBOL(vlan_dev_vlan_id);
e1c096e2
HX
76
77static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
78 unsigned int vlan_tci, struct sk_buff *skb)
79{
80 struct sk_buff *p;
81
82 if (skb_bond_should_drop(skb))
83 goto drop;
84
85 skb->vlan_tci = vlan_tci;
86 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
87
88 if (!skb->dev)
89 goto drop;
90
91 for (p = napi->gro_list; p; p = p->next) {
aa4b9f53
HX
92 NAPI_GRO_CB(p)->same_flow =
93 p->dev == skb->dev && !compare_ether_header(
94 skb_mac_header(p), skb_gro_mac_header(skb));
e1c096e2
HX
95 NAPI_GRO_CB(p)->flush = 0;
96 }
97
98 return dev_gro_receive(napi, skb);
99
100drop:
101 return 2;
102}
103
104int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
105 unsigned int vlan_tci, struct sk_buff *skb)
106{
86911732
HX
107 skb_gro_reset_offset(skb);
108
4ead4431
HX
109 if (netpoll_receive_skb(skb))
110 return NET_RX_DROP;
111
5d0d9be8 112 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
e1c096e2
HX
113}
114EXPORT_SYMBOL(vlan_gro_receive);
115
116int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
117 unsigned int vlan_tci, struct napi_gro_fraginfo *info)
118{
119 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
e1c096e2
HX
120
121 if (!skb)
5d0d9be8 122 return NET_RX_DROP;
e1c096e2 123
4ead4431 124 if (netpoll_receive_skb(skb))
aa4abc9b 125 return NET_RX_DROP;
e1c096e2 126
5d0d9be8
HX
127 return napi_frags_finish(napi, skb,
128 vlan_gro_common(napi, grp, vlan_tci, skb));
e1c096e2
HX
129}
130EXPORT_SYMBOL(vlan_gro_frags);
This page took 0.110821 seconds and 5 git commands to generate.