| 1 | #include <linux/skbuff.h> |
| 2 | #include <linux/netdevice.h> |
| 3 | #include <linux/if_vlan.h> |
| 4 | #include <linux/netpoll.h> |
| 5 | #include "vlan.h" |
| 6 | |
| 7 | /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ |
| 8 | int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
| 9 | u16 vlan_tci, int polling) |
| 10 | { |
| 11 | struct net_device *vlan_dev; |
| 12 | u16 vlan_id; |
| 13 | |
| 14 | if (netpoll_rx(skb)) |
| 15 | return NET_RX_DROP; |
| 16 | |
| 17 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
| 18 | skb->deliver_no_wcard = 1; |
| 19 | |
| 20 | skb->skb_iif = skb->dev->ifindex; |
| 21 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
| 22 | vlan_id = vlan_tci & VLAN_VID_MASK; |
| 23 | vlan_dev = vlan_group_get_device(grp, vlan_id); |
| 24 | |
| 25 | if (vlan_dev) |
| 26 | skb->dev = vlan_dev; |
| 27 | else if (vlan_id) { |
| 28 | if (!(skb->dev->flags & IFF_PROMISC)) |
| 29 | goto drop; |
| 30 | skb->pkt_type = PACKET_OTHERHOST; |
| 31 | } |
| 32 | |
| 33 | return polling ? netif_receive_skb(skb) : netif_rx(skb); |
| 34 | |
| 35 | drop: |
| 36 | atomic_long_inc(&skb->dev->rx_dropped); |
| 37 | dev_kfree_skb_any(skb); |
| 38 | return NET_RX_DROP; |
| 39 | } |
| 40 | EXPORT_SYMBOL(__vlan_hwaccel_rx); |
| 41 | |
| 42 | void vlan_hwaccel_do_receive(struct sk_buff *skb) |
| 43 | { |
| 44 | struct net_device *dev = skb->dev; |
| 45 | struct vlan_rx_stats *rx_stats; |
| 46 | |
| 47 | skb->dev = vlan_dev_real_dev(dev); |
| 48 | netif_nit_deliver(skb); |
| 49 | |
| 50 | skb->dev = dev; |
| 51 | skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); |
| 52 | skb->vlan_tci = 0; |
| 53 | |
| 54 | rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats); |
| 55 | |
| 56 | u64_stats_update_begin(&rx_stats->syncp); |
| 57 | rx_stats->rx_packets++; |
| 58 | rx_stats->rx_bytes += skb->len; |
| 59 | |
| 60 | switch (skb->pkt_type) { |
| 61 | case PACKET_BROADCAST: |
| 62 | break; |
| 63 | case PACKET_MULTICAST: |
| 64 | rx_stats->rx_multicast++; |
| 65 | break; |
| 66 | case PACKET_OTHERHOST: |
| 67 | /* Our lower layer thinks this is not local, let's make sure. |
| 68 | * This allows the VLAN to have a different MAC than the |
| 69 | * underlying device, and still route correctly. */ |
| 70 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, |
| 71 | dev->dev_addr)) |
| 72 | skb->pkt_type = PACKET_HOST; |
| 73 | break; |
| 74 | } |
| 75 | u64_stats_update_end(&rx_stats->syncp); |
| 76 | } |
| 77 | |
| 78 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
| 79 | { |
| 80 | return vlan_dev_info(dev)->real_dev; |
| 81 | } |
| 82 | EXPORT_SYMBOL(vlan_dev_real_dev); |
| 83 | |
| 84 | u16 vlan_dev_vlan_id(const struct net_device *dev) |
| 85 | { |
| 86 | return vlan_dev_info(dev)->vlan_id; |
| 87 | } |
| 88 | EXPORT_SYMBOL(vlan_dev_vlan_id); |
| 89 | |
| 90 | static gro_result_t |
| 91 | vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, |
| 92 | unsigned int vlan_tci, struct sk_buff *skb) |
| 93 | { |
| 94 | struct sk_buff *p; |
| 95 | struct net_device *vlan_dev; |
| 96 | u16 vlan_id; |
| 97 | |
| 98 | if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) |
| 99 | skb->deliver_no_wcard = 1; |
| 100 | |
| 101 | skb->skb_iif = skb->dev->ifindex; |
| 102 | __vlan_hwaccel_put_tag(skb, vlan_tci); |
| 103 | vlan_id = vlan_tci & VLAN_VID_MASK; |
| 104 | vlan_dev = vlan_group_get_device(grp, vlan_id); |
| 105 | |
| 106 | if (vlan_dev) |
| 107 | skb->dev = vlan_dev; |
| 108 | else if (vlan_id) { |
| 109 | if (!(skb->dev->flags & IFF_PROMISC)) |
| 110 | goto drop; |
| 111 | skb->pkt_type = PACKET_OTHERHOST; |
| 112 | } |
| 113 | |
| 114 | for (p = napi->gro_list; p; p = p->next) { |
| 115 | unsigned long diffs; |
| 116 | |
| 117 | diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; |
| 118 | diffs |= compare_ether_header(skb_mac_header(p), |
| 119 | skb_gro_mac_header(skb)); |
| 120 | NAPI_GRO_CB(p)->same_flow = !diffs; |
| 121 | NAPI_GRO_CB(p)->flush = 0; |
| 122 | } |
| 123 | |
| 124 | return dev_gro_receive(napi, skb); |
| 125 | |
| 126 | drop: |
| 127 | atomic_long_inc(&skb->dev->rx_dropped); |
| 128 | return GRO_DROP; |
| 129 | } |
| 130 | |
| 131 | gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
| 132 | unsigned int vlan_tci, struct sk_buff *skb) |
| 133 | { |
| 134 | if (netpoll_rx_on(skb)) |
| 135 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) |
| 136 | ? GRO_DROP : GRO_NORMAL; |
| 137 | |
| 138 | skb_gro_reset_offset(skb); |
| 139 | |
| 140 | return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); |
| 141 | } |
| 142 | EXPORT_SYMBOL(vlan_gro_receive); |
| 143 | |
| 144 | gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, |
| 145 | unsigned int vlan_tci) |
| 146 | { |
| 147 | struct sk_buff *skb = napi_frags_skb(napi); |
| 148 | |
| 149 | if (!skb) |
| 150 | return GRO_DROP; |
| 151 | |
| 152 | if (netpoll_rx_on(skb)) { |
| 153 | skb->protocol = eth_type_trans(skb, skb->dev); |
| 154 | return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) |
| 155 | ? GRO_DROP : GRO_NORMAL; |
| 156 | } |
| 157 | |
| 158 | return napi_frags_finish(napi, skb, |
| 159 | vlan_gro_common(napi, grp, vlan_tci, skb)); |
| 160 | } |
| 161 | EXPORT_SYMBOL(vlan_gro_frags); |