Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / net / core / flow_dissector.c
1 #include <linux/skbuff.h>
2 #include <linux/export.h>
3 #include <linux/ip.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
6 #include <net/ip.h>
7 #include <net/ipv6.h>
8 #include <linux/if_tunnel.h>
9 #include <linux/if_pppox.h>
10 #include <linux/ppp_defs.h>
11 #include <net/flow_keys.h>
12
13 /* copy saddr & daddr, possibly using 64bit load/store
14 * Equivalent to : flow->src = iph->saddr;
15 * flow->dst = iph->daddr;
16 */
17 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
18 {
19 BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
20 offsetof(typeof(*flow), src) + sizeof(flow->src));
21 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
22 }
23
24 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
25 {
26 int poff, nhoff = skb_network_offset(skb);
27 u8 ip_proto;
28 __be16 proto = skb->protocol;
29
30 memset(flow, 0, sizeof(*flow));
31
32 again:
33 switch (proto) {
34 case __constant_htons(ETH_P_IP): {
35 const struct iphdr *iph;
36 struct iphdr _iph;
37 ip:
38 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
39 if (!iph)
40 return false;
41
42 if (ip_is_fragment(iph))
43 ip_proto = 0;
44 else
45 ip_proto = iph->protocol;
46 iph_to_flow_copy_addrs(flow, iph);
47 nhoff += iph->ihl * 4;
48 break;
49 }
50 case __constant_htons(ETH_P_IPV6): {
51 const struct ipv6hdr *iph;
52 struct ipv6hdr _iph;
53 ipv6:
54 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
55 if (!iph)
56 return false;
57
58 ip_proto = iph->nexthdr;
59 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
60 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
61 nhoff += sizeof(struct ipv6hdr);
62 break;
63 }
64 case __constant_htons(ETH_P_8021Q): {
65 const struct vlan_hdr *vlan;
66 struct vlan_hdr _vlan;
67
68 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
69 if (!vlan)
70 return false;
71
72 proto = vlan->h_vlan_encapsulated_proto;
73 nhoff += sizeof(*vlan);
74 goto again;
75 }
76 case __constant_htons(ETH_P_PPP_SES): {
77 struct {
78 struct pppoe_hdr hdr;
79 __be16 proto;
80 } *hdr, _hdr;
81 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
82 if (!hdr)
83 return false;
84 proto = hdr->proto;
85 nhoff += PPPOE_SES_HLEN;
86 switch (proto) {
87 case __constant_htons(PPP_IP):
88 goto ip;
89 case __constant_htons(PPP_IPV6):
90 goto ipv6;
91 default:
92 return false;
93 }
94 }
95 default:
96 return false;
97 }
98
99 switch (ip_proto) {
100 case IPPROTO_GRE: {
101 struct gre_hdr {
102 __be16 flags;
103 __be16 proto;
104 } *hdr, _hdr;
105
106 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
107 if (!hdr)
108 return false;
109 /*
110 * Only look inside GRE if version zero and no
111 * routing
112 */
113 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
114 proto = hdr->proto;
115 nhoff += 4;
116 if (hdr->flags & GRE_CSUM)
117 nhoff += 4;
118 if (hdr->flags & GRE_KEY)
119 nhoff += 4;
120 if (hdr->flags & GRE_SEQ)
121 nhoff += 4;
122 if (proto == htons(ETH_P_TEB)) {
123 const struct ethhdr *eth;
124 struct ethhdr _eth;
125
126 eth = skb_header_pointer(skb, nhoff,
127 sizeof(_eth), &_eth);
128 if (!eth)
129 return false;
130 proto = eth->h_proto;
131 nhoff += sizeof(*eth);
132 }
133 goto again;
134 }
135 break;
136 }
137 case IPPROTO_IPIP:
138 goto again;
139 default:
140 break;
141 }
142
143 flow->ip_proto = ip_proto;
144 poff = proto_ports_offset(ip_proto);
145 if (poff >= 0) {
146 __be32 *ports, _ports;
147
148 nhoff += poff;
149 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
150 if (ports)
151 flow->ports = *ports;
152 }
153
154 flow->thoff = (u16) nhoff;
155
156 return true;
157 }
158 EXPORT_SYMBOL(skb_flow_dissect);
159
160 static u32 hashrnd __read_mostly;
161
162 /*
163 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
164 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
165 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
166 * if hash is a canonical 4-tuple hash over transport ports.
167 */
168 void __skb_get_rxhash(struct sk_buff *skb)
169 {
170 struct flow_keys keys;
171 u32 hash;
172
173 if (!skb_flow_dissect(skb, &keys))
174 return;
175
176 if (keys.ports)
177 skb->l4_rxhash = 1;
178
179 /* get a consistent hash (same value on both flow directions) */
180 if (((__force u32)keys.dst < (__force u32)keys.src) ||
181 (((__force u32)keys.dst == (__force u32)keys.src) &&
182 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
183 swap(keys.dst, keys.src);
184 swap(keys.port16[0], keys.port16[1]);
185 }
186
187 hash = jhash_3words((__force u32)keys.dst,
188 (__force u32)keys.src,
189 (__force u32)keys.ports, hashrnd);
190 if (!hash)
191 hash = 1;
192
193 skb->rxhash = hash;
194 }
195 EXPORT_SYMBOL(__skb_get_rxhash);
196
197 /*
198 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
199 * to be used as a distribution range.
200 */
201 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
202 unsigned int num_tx_queues)
203 {
204 u32 hash;
205 u16 qoffset = 0;
206 u16 qcount = num_tx_queues;
207
208 if (skb_rx_queue_recorded(skb)) {
209 hash = skb_get_rx_queue(skb);
210 while (unlikely(hash >= num_tx_queues))
211 hash -= num_tx_queues;
212 return hash;
213 }
214
215 if (dev->num_tc) {
216 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
217 qoffset = dev->tc_to_txq[tc].offset;
218 qcount = dev->tc_to_txq[tc].count;
219 }
220
221 if (skb->sk && skb->sk->sk_hash)
222 hash = skb->sk->sk_hash;
223 else
224 hash = (__force u16) skb->protocol;
225 hash = jhash_1word(hash, hashrnd);
226
227 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
228 }
229 EXPORT_SYMBOL(__skb_tx_hash);
230
231 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
232 {
233 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
234 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
235 dev->name, queue_index,
236 dev->real_num_tx_queues);
237 return 0;
238 }
239 return queue_index;
240 }
241
242 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
243 {
244 #ifdef CONFIG_XPS
245 struct xps_dev_maps *dev_maps;
246 struct xps_map *map;
247 int queue_index = -1;
248
249 rcu_read_lock();
250 dev_maps = rcu_dereference(dev->xps_maps);
251 if (dev_maps) {
252 map = rcu_dereference(
253 dev_maps->cpu_map[raw_smp_processor_id()]);
254 if (map) {
255 if (map->len == 1)
256 queue_index = map->queues[0];
257 else {
258 u32 hash;
259 if (skb->sk && skb->sk->sk_hash)
260 hash = skb->sk->sk_hash;
261 else
262 hash = (__force u16) skb->protocol ^
263 skb->rxhash;
264 hash = jhash_1word(hash, hashrnd);
265 queue_index = map->queues[
266 ((u64)hash * map->len) >> 32];
267 }
268 if (unlikely(queue_index >= dev->real_num_tx_queues))
269 queue_index = -1;
270 }
271 }
272 rcu_read_unlock();
273
274 return queue_index;
275 #else
276 return -1;
277 #endif
278 }
279
280 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
281 {
282 struct sock *sk = skb->sk;
283 int queue_index = sk_tx_queue_get(sk);
284
285 if (queue_index < 0 || skb->ooo_okay ||
286 queue_index >= dev->real_num_tx_queues) {
287 int new_index = get_xps_queue(dev, skb);
288 if (new_index < 0)
289 new_index = skb_tx_hash(dev, skb);
290
291 if (queue_index != new_index && sk) {
292 struct dst_entry *dst =
293 rcu_dereference_check(sk->sk_dst_cache, 1);
294
295 if (dst && skb_dst(skb) == dst)
296 sk_tx_queue_set(sk, queue_index);
297
298 }
299
300 queue_index = new_index;
301 }
302
303 return queue_index;
304 }
305 EXPORT_SYMBOL(__netdev_pick_tx);
306
307 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
308 struct sk_buff *skb)
309 {
310 int queue_index = 0;
311
312 if (dev->real_num_tx_queues != 1) {
313 const struct net_device_ops *ops = dev->netdev_ops;
314 if (ops->ndo_select_queue)
315 queue_index = ops->ndo_select_queue(dev, skb);
316 else
317 queue_index = __netdev_pick_tx(dev, skb);
318 queue_index = dev_cap_txqueue(dev, queue_index);
319 }
320
321 skb_set_queue_mapping(skb, queue_index);
322 return netdev_get_tx_queue(dev, queue_index);
323 }
324
325 static int __init initialize_hashrnd(void)
326 {
327 get_random_bytes(&hashrnd, sizeof(hashrnd));
328 return 0;
329 }
330
331 late_initcall_sync(initialize_hashrnd);
This page took 0.037143 seconds and 5 git commands to generate.