1 #include <linux/skbuff.h>
2 #include <linux/export.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
8 #include <linux/if_tunnel.h>
9 #include <linux/if_pppox.h>
10 #include <linux/ppp_defs.h>
11 #include <net/flow_keys.h>
13 /* copy saddr & daddr, possibly using 64bit load/store
14 * Equivalent to : flow->src = iph->saddr;
15 * flow->dst = iph->daddr;
17 static void iph_to_flow_copy_addrs(struct flow_keys
*flow
, const struct iphdr
*iph
)
19 BUILD_BUG_ON(offsetof(typeof(*flow
), dst
) !=
20 offsetof(typeof(*flow
), src
) + sizeof(flow
->src
));
21 memcpy(&flow
->src
, &iph
->saddr
, sizeof(flow
->src
) + sizeof(flow
->dst
));
24 bool skb_flow_dissect(const struct sk_buff
*skb
, struct flow_keys
*flow
)
26 int poff
, nhoff
= skb_network_offset(skb
);
28 __be16 proto
= skb
->protocol
;
30 memset(flow
, 0, sizeof(*flow
));
34 case __constant_htons(ETH_P_IP
): {
35 const struct iphdr
*iph
;
38 iph
= skb_header_pointer(skb
, nhoff
, sizeof(_iph
), &_iph
);
42 if (ip_is_fragment(iph
))
45 ip_proto
= iph
->protocol
;
46 iph_to_flow_copy_addrs(flow
, iph
);
47 nhoff
+= iph
->ihl
* 4;
50 case __constant_htons(ETH_P_IPV6
): {
51 const struct ipv6hdr
*iph
;
54 iph
= skb_header_pointer(skb
, nhoff
, sizeof(_iph
), &_iph
);
58 ip_proto
= iph
->nexthdr
;
59 flow
->src
= (__force __be32
)ipv6_addr_hash(&iph
->saddr
);
60 flow
->dst
= (__force __be32
)ipv6_addr_hash(&iph
->daddr
);
61 nhoff
+= sizeof(struct ipv6hdr
);
64 case __constant_htons(ETH_P_8021Q
): {
65 const struct vlan_hdr
*vlan
;
66 struct vlan_hdr _vlan
;
68 vlan
= skb_header_pointer(skb
, nhoff
, sizeof(_vlan
), &_vlan
);
72 proto
= vlan
->h_vlan_encapsulated_proto
;
73 nhoff
+= sizeof(*vlan
);
76 case __constant_htons(ETH_P_PPP_SES
): {
81 hdr
= skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), &_hdr
);
85 nhoff
+= PPPOE_SES_HLEN
;
87 case __constant_htons(PPP_IP
):
89 case __constant_htons(PPP_IPV6
):
106 hdr
= skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), &_hdr
);
110 * Only look inside GRE if version zero and no
113 if (!(hdr
->flags
& (GRE_VERSION
|GRE_ROUTING
))) {
116 if (hdr
->flags
& GRE_CSUM
)
118 if (hdr
->flags
& GRE_KEY
)
120 if (hdr
->flags
& GRE_SEQ
)
122 if (proto
== htons(ETH_P_TEB
)) {
123 const struct ethhdr
*eth
;
126 eth
= skb_header_pointer(skb
, nhoff
,
127 sizeof(_eth
), &_eth
);
130 proto
= eth
->h_proto
;
131 nhoff
+= sizeof(*eth
);
143 flow
->ip_proto
= ip_proto
;
144 poff
= proto_ports_offset(ip_proto
);
146 __be32
*ports
, _ports
;
149 ports
= skb_header_pointer(skb
, nhoff
, sizeof(_ports
), &_ports
);
151 flow
->ports
= *ports
;
154 flow
->thoff
= (u16
) nhoff
;
158 EXPORT_SYMBOL(skb_flow_dissect
);
160 static u32 hashrnd __read_mostly
;
163 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
164 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
165 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
166 * if hash is a canonical 4-tuple hash over transport ports.
168 void __skb_get_rxhash(struct sk_buff
*skb
)
170 struct flow_keys keys
;
173 if (!skb_flow_dissect(skb
, &keys
))
179 /* get a consistent hash (same value on both flow directions) */
180 if (((__force u32
)keys
.dst
< (__force u32
)keys
.src
) ||
181 (((__force u32
)keys
.dst
== (__force u32
)keys
.src
) &&
182 ((__force u16
)keys
.port16
[1] < (__force u16
)keys
.port16
[0]))) {
183 swap(keys
.dst
, keys
.src
);
184 swap(keys
.port16
[0], keys
.port16
[1]);
187 hash
= jhash_3words((__force u32
)keys
.dst
,
188 (__force u32
)keys
.src
,
189 (__force u32
)keys
.ports
, hashrnd
);
195 EXPORT_SYMBOL(__skb_get_rxhash
);
198 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
199 * to be used as a distribution range.
201 u16
__skb_tx_hash(const struct net_device
*dev
, const struct sk_buff
*skb
,
202 unsigned int num_tx_queues
)
206 u16 qcount
= num_tx_queues
;
208 if (skb_rx_queue_recorded(skb
)) {
209 hash
= skb_get_rx_queue(skb
);
210 while (unlikely(hash
>= num_tx_queues
))
211 hash
-= num_tx_queues
;
216 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
217 qoffset
= dev
->tc_to_txq
[tc
].offset
;
218 qcount
= dev
->tc_to_txq
[tc
].count
;
221 if (skb
->sk
&& skb
->sk
->sk_hash
)
222 hash
= skb
->sk
->sk_hash
;
224 hash
= (__force u16
) skb
->protocol
;
225 hash
= jhash_1word(hash
, hashrnd
);
227 return (u16
) (((u64
) hash
* qcount
) >> 32) + qoffset
;
229 EXPORT_SYMBOL(__skb_tx_hash
);
231 static inline u16
dev_cap_txqueue(struct net_device
*dev
, u16 queue_index
)
233 if (unlikely(queue_index
>= dev
->real_num_tx_queues
)) {
234 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
235 dev
->name
, queue_index
,
236 dev
->real_num_tx_queues
);
242 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
245 struct xps_dev_maps
*dev_maps
;
247 int queue_index
= -1;
250 dev_maps
= rcu_dereference(dev
->xps_maps
);
252 map
= rcu_dereference(
253 dev_maps
->cpu_map
[raw_smp_processor_id()]);
256 queue_index
= map
->queues
[0];
259 if (skb
->sk
&& skb
->sk
->sk_hash
)
260 hash
= skb
->sk
->sk_hash
;
262 hash
= (__force u16
) skb
->protocol
^
264 hash
= jhash_1word(hash
, hashrnd
);
265 queue_index
= map
->queues
[
266 ((u64
)hash
* map
->len
) >> 32];
268 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
280 u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
282 struct sock
*sk
= skb
->sk
;
283 int queue_index
= sk_tx_queue_get(sk
);
285 if (queue_index
< 0 || skb
->ooo_okay
||
286 queue_index
>= dev
->real_num_tx_queues
) {
287 int new_index
= get_xps_queue(dev
, skb
);
289 new_index
= skb_tx_hash(dev
, skb
);
291 if (queue_index
!= new_index
&& sk
) {
292 struct dst_entry
*dst
=
293 rcu_dereference_check(sk
->sk_dst_cache
, 1);
295 if (dst
&& skb_dst(skb
) == dst
)
296 sk_tx_queue_set(sk
, queue_index
);
300 queue_index
= new_index
;
305 EXPORT_SYMBOL(__netdev_pick_tx
);
307 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
312 if (dev
->real_num_tx_queues
!= 1) {
313 const struct net_device_ops
*ops
= dev
->netdev_ops
;
314 if (ops
->ndo_select_queue
)
315 queue_index
= ops
->ndo_select_queue(dev
, skb
);
317 queue_index
= __netdev_pick_tx(dev
, skb
);
318 queue_index
= dev_cap_txqueue(dev
, queue_index
);
321 skb_set_queue_mapping(skb
, queue_index
);
322 return netdev_get_tx_queue(dev
, queue_index
);
325 static int __init
initialize_hashrnd(void)
327 get_random_bytes(&hashrnd
, sizeof(hashrnd
));
331 late_initcall_sync(initialize_hashrnd
);