1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
7 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
8 int __vlan_hwaccel_rx(struct sk_buff
*skb
, struct vlan_group
*grp
,
9 u16 vlan_tci
, int polling
)
11 struct net_device
*vlan_dev
;
17 if (skb_bond_should_drop(skb
, ACCESS_ONCE(skb
->dev
->master
)))
18 skb
->deliver_no_wcard
= 1;
20 skb
->skb_iif
= skb
->dev
->ifindex
;
21 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
22 vlan_id
= vlan_tci
& VLAN_VID_MASK
;
23 vlan_dev
= vlan_group_get_device(grp
, vlan_id
);
28 if (!(skb
->dev
->flags
& IFF_PROMISC
))
30 skb
->pkt_type
= PACKET_OTHERHOST
;
33 return polling
? netif_receive_skb(skb
) : netif_rx(skb
);
36 atomic_long_inc(&skb
->dev
->rx_dropped
);
37 dev_kfree_skb_any(skb
);
40 EXPORT_SYMBOL(__vlan_hwaccel_rx
);
42 void vlan_hwaccel_do_receive(struct sk_buff
*skb
)
44 struct net_device
*dev
= skb
->dev
;
45 struct vlan_rx_stats
*rx_stats
;
47 skb
->dev
= vlan_dev_real_dev(dev
);
48 netif_nit_deliver(skb
);
51 skb
->priority
= vlan_get_ingress_priority(dev
, skb
->vlan_tci
);
54 rx_stats
= this_cpu_ptr(vlan_dev_info(dev
)->vlan_rx_stats
);
56 u64_stats_update_begin(&rx_stats
->syncp
);
57 rx_stats
->rx_packets
++;
58 rx_stats
->rx_bytes
+= skb
->len
;
60 switch (skb
->pkt_type
) {
61 case PACKET_BROADCAST
:
63 case PACKET_MULTICAST
:
64 rx_stats
->rx_multicast
++;
66 case PACKET_OTHERHOST
:
67 /* Our lower layer thinks this is not local, let's make sure.
68 * This allows the VLAN to have a different MAC than the
69 * underlying device, and still route correctly. */
70 if (!compare_ether_addr(eth_hdr(skb
)->h_dest
,
72 skb
->pkt_type
= PACKET_HOST
;
75 u64_stats_update_end(&rx_stats
->syncp
);
78 struct net_device
*vlan_dev_real_dev(const struct net_device
*dev
)
80 return vlan_dev_info(dev
)->real_dev
;
82 EXPORT_SYMBOL(vlan_dev_real_dev
);
84 u16
vlan_dev_vlan_id(const struct net_device
*dev
)
86 return vlan_dev_info(dev
)->vlan_id
;
88 EXPORT_SYMBOL(vlan_dev_vlan_id
);
91 vlan_gro_common(struct napi_struct
*napi
, struct vlan_group
*grp
,
92 unsigned int vlan_tci
, struct sk_buff
*skb
)
95 struct net_device
*vlan_dev
;
98 if (skb_bond_should_drop(skb
, ACCESS_ONCE(skb
->dev
->master
)))
99 skb
->deliver_no_wcard
= 1;
101 skb
->skb_iif
= skb
->dev
->ifindex
;
102 __vlan_hwaccel_put_tag(skb
, vlan_tci
);
103 vlan_id
= vlan_tci
& VLAN_VID_MASK
;
104 vlan_dev
= vlan_group_get_device(grp
, vlan_id
);
109 if (!(skb
->dev
->flags
& IFF_PROMISC
))
111 skb
->pkt_type
= PACKET_OTHERHOST
;
114 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
117 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
118 diffs
|= compare_ether_header(skb_mac_header(p
),
119 skb_gro_mac_header(skb
));
120 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
121 NAPI_GRO_CB(p
)->flush
= 0;
124 return dev_gro_receive(napi
, skb
);
127 atomic_long_inc(&skb
->dev
->rx_dropped
);
131 gro_result_t
vlan_gro_receive(struct napi_struct
*napi
, struct vlan_group
*grp
,
132 unsigned int vlan_tci
, struct sk_buff
*skb
)
134 if (netpoll_rx_on(skb
))
135 return vlan_hwaccel_receive_skb(skb
, grp
, vlan_tci
)
136 ? GRO_DROP
: GRO_NORMAL
;
138 skb_gro_reset_offset(skb
);
140 return napi_skb_finish(vlan_gro_common(napi
, grp
, vlan_tci
, skb
), skb
);
142 EXPORT_SYMBOL(vlan_gro_receive
);
144 gro_result_t
vlan_gro_frags(struct napi_struct
*napi
, struct vlan_group
*grp
,
145 unsigned int vlan_tci
)
147 struct sk_buff
*skb
= napi_frags_skb(napi
);
152 if (netpoll_rx_on(skb
)) {
153 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
154 return vlan_hwaccel_receive_skb(skb
, grp
, vlan_tci
)
155 ? GRO_DROP
: GRO_NORMAL
;
158 return napi_frags_finish(napi
, skb
,
159 vlan_gro_common(napi
, grp
, vlan_tci
, skb
));
161 EXPORT_SYMBOL(vlan_gro_frags
);
This page took 0.04476 seconds and 5 git commands to generate.