2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
44 #include <net/ip_tunnels.h>
46 #include <net/ndisc.h>
48 static struct kmem_cache
*flow_cache
;
50 static void ovs_sw_flow_mask_set(struct sw_flow_mask
*mask
,
51 struct sw_flow_key_range
*range
, u8 val
);
53 static void update_range__(struct sw_flow_match
*match
,
54 size_t offset
, size_t size
, bool is_mask
)
56 struct sw_flow_key_range
*range
= NULL
;
57 size_t start
= rounddown(offset
, sizeof(long));
58 size_t end
= roundup(offset
+ size
, sizeof(long));
61 range
= &match
->range
;
63 range
= &match
->mask
->range
;
68 if (range
->start
== range
->end
) {
74 if (range
->start
> start
)
81 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
83 update_range__(match, offsetof(struct sw_flow_key, field), \
84 sizeof((match)->key->field), is_mask); \
87 (match)->mask->key.field = value; \
89 (match)->key->field = value; \
93 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
95 update_range__(match, offsetof(struct sw_flow_key, field), \
99 memcpy(&(match)->mask->key.field, value_p, len);\
101 memcpy(&(match)->key->field, value_p, len); \
105 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
107 return range
->end
- range
->start
;
110 void ovs_match_init(struct sw_flow_match
*match
,
111 struct sw_flow_key
*key
,
112 struct sw_flow_mask
*mask
)
114 memset(match
, 0, sizeof(*match
));
118 memset(key
, 0, sizeof(*key
));
121 memset(&mask
->key
, 0, sizeof(mask
->key
));
122 mask
->range
.start
= mask
->range
.end
= 0;
126 static bool ovs_match_validate(const struct sw_flow_match
*match
,
127 u64 key_attrs
, u64 mask_attrs
)
129 u64 key_expected
= 1 << OVS_KEY_ATTR_ETHERNET
;
130 u64 mask_allowed
= key_attrs
; /* At most allow all key attributes */
132 /* The following mask attributes allowed only if they
133 * pass the validation tests. */
134 mask_allowed
&= ~((1 << OVS_KEY_ATTR_IPV4
)
135 | (1 << OVS_KEY_ATTR_IPV6
)
136 | (1 << OVS_KEY_ATTR_TCP
)
137 | (1 << OVS_KEY_ATTR_UDP
)
138 | (1 << OVS_KEY_ATTR_SCTP
)
139 | (1 << OVS_KEY_ATTR_ICMP
)
140 | (1 << OVS_KEY_ATTR_ICMPV6
)
141 | (1 << OVS_KEY_ATTR_ARP
)
142 | (1 << OVS_KEY_ATTR_ND
));
144 /* Always allowed mask fields. */
145 mask_allowed
|= ((1 << OVS_KEY_ATTR_TUNNEL
)
146 | (1 << OVS_KEY_ATTR_IN_PORT
)
147 | (1 << OVS_KEY_ATTR_ETHERTYPE
));
149 /* Check key attributes. */
150 if (match
->key
->eth
.type
== htons(ETH_P_ARP
)
151 || match
->key
->eth
.type
== htons(ETH_P_RARP
)) {
152 key_expected
|= 1 << OVS_KEY_ATTR_ARP
;
153 if (match
->mask
&& (match
->mask
->key
.eth
.type
== htons(0xffff)))
154 mask_allowed
|= 1 << OVS_KEY_ATTR_ARP
;
157 if (match
->key
->eth
.type
== htons(ETH_P_IP
)) {
158 key_expected
|= 1 << OVS_KEY_ATTR_IPV4
;
159 if (match
->mask
&& (match
->mask
->key
.eth
.type
== htons(0xffff)))
160 mask_allowed
|= 1 << OVS_KEY_ATTR_IPV4
;
162 if (match
->key
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
163 if (match
->key
->ip
.proto
== IPPROTO_UDP
) {
164 key_expected
|= 1 << OVS_KEY_ATTR_UDP
;
165 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
166 mask_allowed
|= 1 << OVS_KEY_ATTR_UDP
;
169 if (match
->key
->ip
.proto
== IPPROTO_SCTP
) {
170 key_expected
|= 1 << OVS_KEY_ATTR_SCTP
;
171 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
172 mask_allowed
|= 1 << OVS_KEY_ATTR_SCTP
;
175 if (match
->key
->ip
.proto
== IPPROTO_TCP
) {
176 key_expected
|= 1 << OVS_KEY_ATTR_TCP
;
177 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
178 mask_allowed
|= 1 << OVS_KEY_ATTR_TCP
;
181 if (match
->key
->ip
.proto
== IPPROTO_ICMP
) {
182 key_expected
|= 1 << OVS_KEY_ATTR_ICMP
;
183 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
184 mask_allowed
|= 1 << OVS_KEY_ATTR_ICMP
;
189 if (match
->key
->eth
.type
== htons(ETH_P_IPV6
)) {
190 key_expected
|= 1 << OVS_KEY_ATTR_IPV6
;
191 if (match
->mask
&& (match
->mask
->key
.eth
.type
== htons(0xffff)))
192 mask_allowed
|= 1 << OVS_KEY_ATTR_IPV6
;
194 if (match
->key
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
195 if (match
->key
->ip
.proto
== IPPROTO_UDP
) {
196 key_expected
|= 1 << OVS_KEY_ATTR_UDP
;
197 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
198 mask_allowed
|= 1 << OVS_KEY_ATTR_UDP
;
201 if (match
->key
->ip
.proto
== IPPROTO_SCTP
) {
202 key_expected
|= 1 << OVS_KEY_ATTR_SCTP
;
203 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
204 mask_allowed
|= 1 << OVS_KEY_ATTR_SCTP
;
207 if (match
->key
->ip
.proto
== IPPROTO_TCP
) {
208 key_expected
|= 1 << OVS_KEY_ATTR_TCP
;
209 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
210 mask_allowed
|= 1 << OVS_KEY_ATTR_TCP
;
213 if (match
->key
->ip
.proto
== IPPROTO_ICMPV6
) {
214 key_expected
|= 1 << OVS_KEY_ATTR_ICMPV6
;
215 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
216 mask_allowed
|= 1 << OVS_KEY_ATTR_ICMPV6
;
218 if (match
->key
->ipv6
.tp
.src
==
219 htons(NDISC_NEIGHBOUR_SOLICITATION
) ||
220 match
->key
->ipv6
.tp
.src
== htons(NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
221 key_expected
|= 1 << OVS_KEY_ATTR_ND
;
222 if (match
->mask
&& (match
->mask
->key
.ipv6
.tp
.src
== htons(0xffff)))
223 mask_allowed
|= 1 << OVS_KEY_ATTR_ND
;
229 if ((key_attrs
& key_expected
) != key_expected
) {
230 /* Key attributes check failed. */
231 OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
232 key_attrs
, key_expected
);
236 if ((mask_attrs
& mask_allowed
) != mask_attrs
) {
237 /* Mask attributes check failed. */
238 OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
239 mask_attrs
, mask_allowed
);
246 static int check_header(struct sk_buff
*skb
, int len
)
248 if (unlikely(skb
->len
< len
))
250 if (unlikely(!pskb_may_pull(skb
, len
)))
255 static bool arphdr_ok(struct sk_buff
*skb
)
257 return pskb_may_pull(skb
, skb_network_offset(skb
) +
258 sizeof(struct arp_eth_header
));
261 static int check_iphdr(struct sk_buff
*skb
)
263 unsigned int nh_ofs
= skb_network_offset(skb
);
267 err
= check_header(skb
, nh_ofs
+ sizeof(struct iphdr
));
271 ip_len
= ip_hdrlen(skb
);
272 if (unlikely(ip_len
< sizeof(struct iphdr
) ||
273 skb
->len
< nh_ofs
+ ip_len
))
276 skb_set_transport_header(skb
, nh_ofs
+ ip_len
);
280 static bool tcphdr_ok(struct sk_buff
*skb
)
282 int th_ofs
= skb_transport_offset(skb
);
285 if (unlikely(!pskb_may_pull(skb
, th_ofs
+ sizeof(struct tcphdr
))))
288 tcp_len
= tcp_hdrlen(skb
);
289 if (unlikely(tcp_len
< sizeof(struct tcphdr
) ||
290 skb
->len
< th_ofs
+ tcp_len
))
296 static bool udphdr_ok(struct sk_buff
*skb
)
298 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
299 sizeof(struct udphdr
));
302 static bool sctphdr_ok(struct sk_buff
*skb
)
304 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
305 sizeof(struct sctphdr
));
308 static bool icmphdr_ok(struct sk_buff
*skb
)
310 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
311 sizeof(struct icmphdr
));
314 u64
ovs_flow_used_time(unsigned long flow_jiffies
)
316 struct timespec cur_ts
;
319 ktime_get_ts(&cur_ts
);
320 idle_ms
= jiffies_to_msecs(jiffies
- flow_jiffies
);
321 cur_ms
= (u64
)cur_ts
.tv_sec
* MSEC_PER_SEC
+
322 cur_ts
.tv_nsec
/ NSEC_PER_MSEC
;
324 return cur_ms
- idle_ms
;
327 static int parse_ipv6hdr(struct sk_buff
*skb
, struct sw_flow_key
*key
)
329 unsigned int nh_ofs
= skb_network_offset(skb
);
337 err
= check_header(skb
, nh_ofs
+ sizeof(*nh
));
342 nexthdr
= nh
->nexthdr
;
343 payload_ofs
= (u8
*)(nh
+ 1) - skb
->data
;
345 key
->ip
.proto
= NEXTHDR_NONE
;
346 key
->ip
.tos
= ipv6_get_dsfield(nh
);
347 key
->ip
.ttl
= nh
->hop_limit
;
348 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
349 key
->ipv6
.addr
.src
= nh
->saddr
;
350 key
->ipv6
.addr
.dst
= nh
->daddr
;
352 payload_ofs
= ipv6_skip_exthdr(skb
, payload_ofs
, &nexthdr
, &frag_off
);
353 if (unlikely(payload_ofs
< 0))
357 if (frag_off
& htons(~0x7))
358 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
360 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
363 nh_len
= payload_ofs
- nh_ofs
;
364 skb_set_transport_header(skb
, nh_ofs
+ nh_len
);
365 key
->ip
.proto
= nexthdr
;
369 static bool icmp6hdr_ok(struct sk_buff
*skb
)
371 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
372 sizeof(struct icmp6hdr
));
375 void ovs_flow_key_mask(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
376 const struct sw_flow_mask
*mask
)
378 const long *m
= (long *)((u8
*)&mask
->key
+ mask
->range
.start
);
379 const long *s
= (long *)((u8
*)src
+ mask
->range
.start
);
380 long *d
= (long *)((u8
*)dst
+ mask
->range
.start
);
383 /* The memory outside of the 'mask->range' are not set since
384 * further operations on 'dst' only uses contents within
387 for (i
= 0; i
< range_n_bytes(&mask
->range
); i
+= sizeof(long))
391 #define TCP_FLAGS_OFFSET 13
392 #define TCP_FLAG_MASK 0x3f
394 void ovs_flow_used(struct sw_flow
*flow
, struct sk_buff
*skb
)
398 if ((flow
->key
.eth
.type
== htons(ETH_P_IP
) ||
399 flow
->key
.eth
.type
== htons(ETH_P_IPV6
)) &&
400 flow
->key
.ip
.proto
== IPPROTO_TCP
&&
401 likely(skb
->len
>= skb_transport_offset(skb
) + sizeof(struct tcphdr
))) {
402 u8
*tcp
= (u8
*)tcp_hdr(skb
);
403 tcp_flags
= *(tcp
+ TCP_FLAGS_OFFSET
) & TCP_FLAG_MASK
;
406 spin_lock(&flow
->lock
);
407 flow
->used
= jiffies
;
408 flow
->packet_count
++;
409 flow
->byte_count
+= skb
->len
;
410 flow
->tcp_flags
|= tcp_flags
;
411 spin_unlock(&flow
->lock
);
414 struct sw_flow_actions
*ovs_flow_actions_alloc(int size
)
416 struct sw_flow_actions
*sfa
;
418 if (size
> MAX_ACTIONS_BUFSIZE
)
419 return ERR_PTR(-EINVAL
);
421 sfa
= kmalloc(sizeof(*sfa
) + size
, GFP_KERNEL
);
423 return ERR_PTR(-ENOMEM
);
425 sfa
->actions_len
= 0;
429 struct sw_flow
*ovs_flow_alloc(void)
431 struct sw_flow
*flow
;
433 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
435 return ERR_PTR(-ENOMEM
);
437 spin_lock_init(&flow
->lock
);
438 flow
->sf_acts
= NULL
;
444 static struct hlist_head
*find_bucket(struct flow_table
*table
, u32 hash
)
446 hash
= jhash_1word(hash
, table
->hash_seed
);
447 return flex_array_get(table
->buckets
,
448 (hash
& (table
->n_buckets
- 1)));
451 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
453 struct flex_array
*buckets
;
456 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
457 n_buckets
, GFP_KERNEL
);
461 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
463 flex_array_free(buckets
);
467 for (i
= 0; i
< n_buckets
; i
++)
468 INIT_HLIST_HEAD((struct hlist_head
*)
469 flex_array_get(buckets
, i
));
474 static void free_buckets(struct flex_array
*buckets
)
476 flex_array_free(buckets
);
479 static struct flow_table
*__flow_tbl_alloc(int new_size
)
481 struct flow_table
*table
= kmalloc(sizeof(*table
), GFP_KERNEL
);
486 table
->buckets
= alloc_buckets(new_size
);
488 if (!table
->buckets
) {
492 table
->n_buckets
= new_size
;
495 table
->keep_flows
= false;
496 get_random_bytes(&table
->hash_seed
, sizeof(u32
));
497 table
->mask_list
= NULL
;
502 static void __flow_tbl_destroy(struct flow_table
*table
)
506 if (table
->keep_flows
)
509 for (i
= 0; i
< table
->n_buckets
; i
++) {
510 struct sw_flow
*flow
;
511 struct hlist_head
*head
= flex_array_get(table
->buckets
, i
);
512 struct hlist_node
*n
;
513 int ver
= table
->node_ver
;
515 hlist_for_each_entry_safe(flow
, n
, head
, hash_node
[ver
]) {
516 hlist_del(&flow
->hash_node
[ver
]);
517 ovs_flow_free(flow
, false);
521 BUG_ON(!list_empty(table
->mask_list
));
522 kfree(table
->mask_list
);
525 free_buckets(table
->buckets
);
529 struct flow_table
*ovs_flow_tbl_alloc(int new_size
)
531 struct flow_table
*table
= __flow_tbl_alloc(new_size
);
536 table
->mask_list
= kmalloc(sizeof(struct list_head
), GFP_KERNEL
);
537 if (!table
->mask_list
) {
538 table
->keep_flows
= true;
539 __flow_tbl_destroy(table
);
542 INIT_LIST_HEAD(table
->mask_list
);
547 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
549 struct flow_table
*table
= container_of(rcu
, struct flow_table
, rcu
);
551 __flow_tbl_destroy(table
);
554 void ovs_flow_tbl_destroy(struct flow_table
*table
, bool deferred
)
560 call_rcu(&table
->rcu
, flow_tbl_destroy_rcu_cb
);
562 __flow_tbl_destroy(table
);
565 struct sw_flow
*ovs_flow_dump_next(struct flow_table
*table
, u32
*bucket
, u32
*last
)
567 struct sw_flow
*flow
;
568 struct hlist_head
*head
;
572 ver
= table
->node_ver
;
573 while (*bucket
< table
->n_buckets
) {
575 head
= flex_array_get(table
->buckets
, *bucket
);
576 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ver
]) {
591 static void __tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
)
593 struct hlist_head
*head
;
595 head
= find_bucket(table
, flow
->hash
);
596 hlist_add_head_rcu(&flow
->hash_node
[table
->node_ver
], head
);
601 static void flow_table_copy_flows(struct flow_table
*old
, struct flow_table
*new)
606 old_ver
= old
->node_ver
;
607 new->node_ver
= !old_ver
;
609 /* Insert in new table. */
610 for (i
= 0; i
< old
->n_buckets
; i
++) {
611 struct sw_flow
*flow
;
612 struct hlist_head
*head
;
614 head
= flex_array_get(old
->buckets
, i
);
616 hlist_for_each_entry(flow
, head
, hash_node
[old_ver
])
617 __tbl_insert(new, flow
);
620 new->mask_list
= old
->mask_list
;
621 old
->keep_flows
= true;
624 static struct flow_table
*__flow_tbl_rehash(struct flow_table
*table
, int n_buckets
)
626 struct flow_table
*new_table
;
628 new_table
= __flow_tbl_alloc(n_buckets
);
630 return ERR_PTR(-ENOMEM
);
632 flow_table_copy_flows(table
, new_table
);
637 struct flow_table
*ovs_flow_tbl_rehash(struct flow_table
*table
)
639 return __flow_tbl_rehash(table
, table
->n_buckets
);
642 struct flow_table
*ovs_flow_tbl_expand(struct flow_table
*table
)
644 return __flow_tbl_rehash(table
, table
->n_buckets
* 2);
647 static void __flow_free(struct sw_flow
*flow
)
649 kfree((struct sf_flow_acts __force
*)flow
->sf_acts
);
650 kmem_cache_free(flow_cache
, flow
);
653 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
655 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
660 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
665 ovs_sw_flow_mask_del_ref(flow
->mask
, deferred
);
668 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
673 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
674 * The caller must hold rcu_read_lock for this to be sensible. */
675 void ovs_flow_deferred_free_acts(struct sw_flow_actions
*sf_acts
)
677 kfree_rcu(sf_acts
, rcu
);
680 static int parse_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
683 __be16 eth_type
; /* ETH_P_8021Q */
686 struct qtag_prefix
*qp
;
688 if (unlikely(skb
->len
< sizeof(struct qtag_prefix
) + sizeof(__be16
)))
691 if (unlikely(!pskb_may_pull(skb
, sizeof(struct qtag_prefix
) +
695 qp
= (struct qtag_prefix
*) skb
->data
;
696 key
->eth
.tci
= qp
->tci
| htons(VLAN_TAG_PRESENT
);
697 __skb_pull(skb
, sizeof(struct qtag_prefix
));
702 static __be16
parse_ethertype(struct sk_buff
*skb
)
704 struct llc_snap_hdr
{
705 u8 dsap
; /* Always 0xAA */
706 u8 ssap
; /* Always 0xAA */
711 struct llc_snap_hdr
*llc
;
714 proto
= *(__be16
*) skb
->data
;
715 __skb_pull(skb
, sizeof(__be16
));
717 if (ntohs(proto
) >= ETH_P_802_3_MIN
)
720 if (skb
->len
< sizeof(struct llc_snap_hdr
))
721 return htons(ETH_P_802_2
);
723 if (unlikely(!pskb_may_pull(skb
, sizeof(struct llc_snap_hdr
))))
726 llc
= (struct llc_snap_hdr
*) skb
->data
;
727 if (llc
->dsap
!= LLC_SAP_SNAP
||
728 llc
->ssap
!= LLC_SAP_SNAP
||
729 (llc
->oui
[0] | llc
->oui
[1] | llc
->oui
[2]) != 0)
730 return htons(ETH_P_802_2
);
732 __skb_pull(skb
, sizeof(struct llc_snap_hdr
));
734 if (ntohs(llc
->ethertype
) >= ETH_P_802_3_MIN
)
735 return llc
->ethertype
;
737 return htons(ETH_P_802_2
);
740 static int parse_icmpv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
743 struct icmp6hdr
*icmp
= icmp6_hdr(skb
);
745 /* The ICMPv6 type and code fields use the 16-bit transport port
746 * fields, so we need to store them in 16-bit network byte order.
748 key
->ipv6
.tp
.src
= htons(icmp
->icmp6_type
);
749 key
->ipv6
.tp
.dst
= htons(icmp
->icmp6_code
);
751 if (icmp
->icmp6_code
== 0 &&
752 (icmp
->icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
753 icmp
->icmp6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
754 int icmp_len
= skb
->len
- skb_transport_offset(skb
);
758 /* In order to process neighbor discovery options, we need the
761 if (unlikely(icmp_len
< sizeof(*nd
)))
764 if (unlikely(skb_linearize(skb
)))
767 nd
= (struct nd_msg
*)skb_transport_header(skb
);
768 key
->ipv6
.nd
.target
= nd
->target
;
770 icmp_len
-= sizeof(*nd
);
772 while (icmp_len
>= 8) {
773 struct nd_opt_hdr
*nd_opt
=
774 (struct nd_opt_hdr
*)(nd
->opt
+ offset
);
775 int opt_len
= nd_opt
->nd_opt_len
* 8;
777 if (unlikely(!opt_len
|| opt_len
> icmp_len
))
780 /* Store the link layer address if the appropriate
781 * option is provided. It is considered an error if
782 * the same link layer option is specified twice.
784 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LL_ADDR
786 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.sll
)))
788 memcpy(key
->ipv6
.nd
.sll
,
789 &nd
->opt
[offset
+sizeof(*nd_opt
)], ETH_ALEN
);
790 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LL_ADDR
792 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.tll
)))
794 memcpy(key
->ipv6
.nd
.tll
,
795 &nd
->opt
[offset
+sizeof(*nd_opt
)], ETH_ALEN
);
806 memset(&key
->ipv6
.nd
.target
, 0, sizeof(key
->ipv6
.nd
.target
));
807 memset(key
->ipv6
.nd
.sll
, 0, sizeof(key
->ipv6
.nd
.sll
));
808 memset(key
->ipv6
.nd
.tll
, 0, sizeof(key
->ipv6
.nd
.tll
));
814 * ovs_flow_extract - extracts a flow key from an Ethernet frame.
815 * @skb: sk_buff that contains the frame, with skb->data pointing to the
817 * @in_port: port number on which @skb was received.
818 * @key: output flow key
820 * The caller must ensure that skb->len >= ETH_HLEN.
822 * Returns 0 if successful, otherwise a negative errno value.
824 * Initializes @skb header pointers as follows:
826 * - skb->mac_header: the Ethernet header.
828 * - skb->network_header: just past the Ethernet header, or just past the
829 * VLAN header, to the first byte of the Ethernet payload.
831 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
832 * on output, then just past the IP header, if one is present and
833 * of a correct length, otherwise the same as skb->network_header.
834 * For other key->eth.type values it is left untouched.
836 int ovs_flow_extract(struct sk_buff
*skb
, u16 in_port
, struct sw_flow_key
*key
)
841 memset(key
, 0, sizeof(*key
));
843 key
->phy
.priority
= skb
->priority
;
844 if (OVS_CB(skb
)->tun_key
)
845 memcpy(&key
->tun_key
, OVS_CB(skb
)->tun_key
, sizeof(key
->tun_key
));
846 key
->phy
.in_port
= in_port
;
847 key
->phy
.skb_mark
= skb
->mark
;
849 skb_reset_mac_header(skb
);
851 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
852 * header in the linear data area.
855 memcpy(key
->eth
.src
, eth
->h_source
, ETH_ALEN
);
856 memcpy(key
->eth
.dst
, eth
->h_dest
, ETH_ALEN
);
858 __skb_pull(skb
, 2 * ETH_ALEN
);
859 /* We are going to push all headers that we pull, so no need to
860 * update skb->csum here.
863 if (vlan_tx_tag_present(skb
))
864 key
->eth
.tci
= htons(skb
->vlan_tci
);
865 else if (eth
->h_proto
== htons(ETH_P_8021Q
))
866 if (unlikely(parse_vlan(skb
, key
)))
869 key
->eth
.type
= parse_ethertype(skb
);
870 if (unlikely(key
->eth
.type
== htons(0)))
873 skb_reset_network_header(skb
);
874 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
877 if (key
->eth
.type
== htons(ETH_P_IP
)) {
881 error
= check_iphdr(skb
);
882 if (unlikely(error
)) {
883 if (error
== -EINVAL
) {
884 skb
->transport_header
= skb
->network_header
;
891 key
->ipv4
.addr
.src
= nh
->saddr
;
892 key
->ipv4
.addr
.dst
= nh
->daddr
;
894 key
->ip
.proto
= nh
->protocol
;
895 key
->ip
.tos
= nh
->tos
;
896 key
->ip
.ttl
= nh
->ttl
;
898 offset
= nh
->frag_off
& htons(IP_OFFSET
);
900 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
903 if (nh
->frag_off
& htons(IP_MF
) ||
904 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
905 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
907 /* Transport layer. */
908 if (key
->ip
.proto
== IPPROTO_TCP
) {
909 if (tcphdr_ok(skb
)) {
910 struct tcphdr
*tcp
= tcp_hdr(skb
);
911 key
->ipv4
.tp
.src
= tcp
->source
;
912 key
->ipv4
.tp
.dst
= tcp
->dest
;
914 } else if (key
->ip
.proto
== IPPROTO_UDP
) {
915 if (udphdr_ok(skb
)) {
916 struct udphdr
*udp
= udp_hdr(skb
);
917 key
->ipv4
.tp
.src
= udp
->source
;
918 key
->ipv4
.tp
.dst
= udp
->dest
;
920 } else if (key
->ip
.proto
== IPPROTO_SCTP
) {
921 if (sctphdr_ok(skb
)) {
922 struct sctphdr
*sctp
= sctp_hdr(skb
);
923 key
->ipv4
.tp
.src
= sctp
->source
;
924 key
->ipv4
.tp
.dst
= sctp
->dest
;
926 } else if (key
->ip
.proto
== IPPROTO_ICMP
) {
927 if (icmphdr_ok(skb
)) {
928 struct icmphdr
*icmp
= icmp_hdr(skb
);
929 /* The ICMP type and code fields use the 16-bit
930 * transport port fields, so we need to store
931 * them in 16-bit network byte order. */
932 key
->ipv4
.tp
.src
= htons(icmp
->type
);
933 key
->ipv4
.tp
.dst
= htons(icmp
->code
);
937 } else if ((key
->eth
.type
== htons(ETH_P_ARP
) ||
938 key
->eth
.type
== htons(ETH_P_RARP
)) && arphdr_ok(skb
)) {
939 struct arp_eth_header
*arp
;
941 arp
= (struct arp_eth_header
*)skb_network_header(skb
);
943 if (arp
->ar_hrd
== htons(ARPHRD_ETHER
)
944 && arp
->ar_pro
== htons(ETH_P_IP
)
945 && arp
->ar_hln
== ETH_ALEN
946 && arp
->ar_pln
== 4) {
948 /* We only match on the lower 8 bits of the opcode. */
949 if (ntohs(arp
->ar_op
) <= 0xff)
950 key
->ip
.proto
= ntohs(arp
->ar_op
);
951 memcpy(&key
->ipv4
.addr
.src
, arp
->ar_sip
, sizeof(key
->ipv4
.addr
.src
));
952 memcpy(&key
->ipv4
.addr
.dst
, arp
->ar_tip
, sizeof(key
->ipv4
.addr
.dst
));
953 memcpy(key
->ipv4
.arp
.sha
, arp
->ar_sha
, ETH_ALEN
);
954 memcpy(key
->ipv4
.arp
.tha
, arp
->ar_tha
, ETH_ALEN
);
956 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
957 int nh_len
; /* IPv6 Header + Extensions */
959 nh_len
= parse_ipv6hdr(skb
, key
);
960 if (unlikely(nh_len
< 0)) {
961 if (nh_len
== -EINVAL
) {
962 skb
->transport_header
= skb
->network_header
;
970 if (key
->ip
.frag
== OVS_FRAG_TYPE_LATER
)
972 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
973 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
975 /* Transport layer. */
976 if (key
->ip
.proto
== NEXTHDR_TCP
) {
977 if (tcphdr_ok(skb
)) {
978 struct tcphdr
*tcp
= tcp_hdr(skb
);
979 key
->ipv6
.tp
.src
= tcp
->source
;
980 key
->ipv6
.tp
.dst
= tcp
->dest
;
982 } else if (key
->ip
.proto
== NEXTHDR_UDP
) {
983 if (udphdr_ok(skb
)) {
984 struct udphdr
*udp
= udp_hdr(skb
);
985 key
->ipv6
.tp
.src
= udp
->source
;
986 key
->ipv6
.tp
.dst
= udp
->dest
;
988 } else if (key
->ip
.proto
== NEXTHDR_SCTP
) {
989 if (sctphdr_ok(skb
)) {
990 struct sctphdr
*sctp
= sctp_hdr(skb
);
991 key
->ipv6
.tp
.src
= sctp
->source
;
992 key
->ipv6
.tp
.dst
= sctp
->dest
;
994 } else if (key
->ip
.proto
== NEXTHDR_ICMP
) {
995 if (icmp6hdr_ok(skb
)) {
996 error
= parse_icmpv6(skb
, key
, nh_len
);
1006 static u32
ovs_flow_hash(const struct sw_flow_key
*key
, int key_start
,
1009 u32
*hash_key
= (u32
*)((u8
*)key
+ key_start
);
1010 int hash_u32s
= (key_end
- key_start
) >> 2;
1012 /* Make sure number of hash bytes are multiple of u32. */
1013 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
1015 return jhash2(hash_key
, hash_u32s
, 0);
1018 static int flow_key_start(const struct sw_flow_key
*key
)
1020 if (key
->tun_key
.ipv4_dst
)
1023 return rounddown(offsetof(struct sw_flow_key
, phy
),
1027 static bool __cmp_key(const struct sw_flow_key
*key1
,
1028 const struct sw_flow_key
*key2
, int key_start
, int key_end
)
1030 const long *cp1
= (long *)((u8
*)key1
+ key_start
);
1031 const long *cp2
= (long *)((u8
*)key2
+ key_start
);
1035 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
1036 diffs
|= *cp1
++ ^ *cp2
++;
1041 static bool __flow_cmp_masked_key(const struct sw_flow
*flow
,
1042 const struct sw_flow_key
*key
, int key_start
, int key_end
)
1044 return __cmp_key(&flow
->key
, key
, key_start
, key_end
);
1047 static bool __flow_cmp_unmasked_key(const struct sw_flow
*flow
,
1048 const struct sw_flow_key
*key
, int key_start
, int key_end
)
1050 return __cmp_key(&flow
->unmasked_key
, key
, key_start
, key_end
);
1053 bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
1054 const struct sw_flow_key
*key
, int key_end
)
1057 key_start
= flow_key_start(key
);
1059 return __flow_cmp_unmasked_key(flow
, key
, key_start
, key_end
);
1063 struct sw_flow
*ovs_flow_lookup_unmasked_key(struct flow_table
*table
,
1064 struct sw_flow_match
*match
)
1066 struct sw_flow_key
*unmasked
= match
->key
;
1067 int key_end
= match
->range
.end
;
1068 struct sw_flow
*flow
;
1070 flow
= ovs_flow_lookup(table
, unmasked
);
1071 if (flow
&& (!ovs_flow_cmp_unmasked_key(flow
, unmasked
, key_end
)))
1077 static struct sw_flow
*ovs_masked_flow_lookup(struct flow_table
*table
,
1078 const struct sw_flow_key
*unmasked
,
1079 struct sw_flow_mask
*mask
)
1081 struct sw_flow
*flow
;
1082 struct hlist_head
*head
;
1083 int key_start
= mask
->range
.start
;
1084 int key_end
= mask
->range
.end
;
1086 struct sw_flow_key masked_key
;
1088 ovs_flow_key_mask(&masked_key
, unmasked
, mask
);
1089 hash
= ovs_flow_hash(&masked_key
, key_start
, key_end
);
1090 head
= find_bucket(table
, hash
);
1091 hlist_for_each_entry_rcu(flow
, head
, hash_node
[table
->node_ver
]) {
1092 if (flow
->mask
== mask
&&
1093 __flow_cmp_masked_key(flow
, &masked_key
,
1094 key_start
, key_end
))
1100 struct sw_flow
*ovs_flow_lookup(struct flow_table
*tbl
,
1101 const struct sw_flow_key
*key
)
1103 struct sw_flow
*flow
= NULL
;
1104 struct sw_flow_mask
*mask
;
1106 list_for_each_entry_rcu(mask
, tbl
->mask_list
, list
) {
1107 flow
= ovs_masked_flow_lookup(tbl
, key
, mask
);
1108 if (flow
) /* Found */
1116 void ovs_flow_insert(struct flow_table
*table
, struct sw_flow
*flow
)
1118 flow
->hash
= ovs_flow_hash(&flow
->key
, flow
->mask
->range
.start
,
1119 flow
->mask
->range
.end
);
1120 __tbl_insert(table
, flow
);
1123 void ovs_flow_remove(struct flow_table
*table
, struct sw_flow
*flow
)
1125 BUG_ON(table
->count
== 0);
1126 hlist_del_rcu(&flow
->hash_node
[table
->node_ver
]);
1130 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
1131 const int ovs_key_lens
[OVS_KEY_ATTR_MAX
+ 1] = {
1132 [OVS_KEY_ATTR_ENCAP
] = -1,
1133 [OVS_KEY_ATTR_PRIORITY
] = sizeof(u32
),
1134 [OVS_KEY_ATTR_IN_PORT
] = sizeof(u32
),
1135 [OVS_KEY_ATTR_SKB_MARK
] = sizeof(u32
),
1136 [OVS_KEY_ATTR_ETHERNET
] = sizeof(struct ovs_key_ethernet
),
1137 [OVS_KEY_ATTR_VLAN
] = sizeof(__be16
),
1138 [OVS_KEY_ATTR_ETHERTYPE
] = sizeof(__be16
),
1139 [OVS_KEY_ATTR_IPV4
] = sizeof(struct ovs_key_ipv4
),
1140 [OVS_KEY_ATTR_IPV6
] = sizeof(struct ovs_key_ipv6
),
1141 [OVS_KEY_ATTR_TCP
] = sizeof(struct ovs_key_tcp
),
1142 [OVS_KEY_ATTR_UDP
] = sizeof(struct ovs_key_udp
),
1143 [OVS_KEY_ATTR_SCTP
] = sizeof(struct ovs_key_sctp
),
1144 [OVS_KEY_ATTR_ICMP
] = sizeof(struct ovs_key_icmp
),
1145 [OVS_KEY_ATTR_ICMPV6
] = sizeof(struct ovs_key_icmpv6
),
1146 [OVS_KEY_ATTR_ARP
] = sizeof(struct ovs_key_arp
),
1147 [OVS_KEY_ATTR_ND
] = sizeof(struct ovs_key_nd
),
1148 [OVS_KEY_ATTR_TUNNEL
] = -1,
1151 static bool is_all_zero(const u8
*fp
, size_t size
)
1158 for (i
= 0; i
< size
; i
++)
1165 static int __parse_flow_nlattrs(const struct nlattr
*attr
,
1166 const struct nlattr
*a
[],
1167 u64
*attrsp
, bool nz
)
1169 const struct nlattr
*nla
;
1174 nla_for_each_nested(nla
, attr
, rem
) {
1175 u16 type
= nla_type(nla
);
1178 if (type
> OVS_KEY_ATTR_MAX
) {
1179 OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
1180 type
, OVS_KEY_ATTR_MAX
);
1183 if (attrs
& (1 << type
)) {
1184 OVS_NLERR("Duplicate key attribute (type %d).\n", type
);
1188 expected_len
= ovs_key_lens
[type
];
1189 if (nla_len(nla
) != expected_len
&& expected_len
!= -1) {
1190 OVS_NLERR("Key attribute has unexpected length (type=%d"
1191 ", length=%d, expected=%d).\n", type
,
1192 nla_len(nla
), expected_len
);
1196 if (!nz
|| !is_all_zero(nla_data(nla
), expected_len
)) {
1202 OVS_NLERR("Message has %d unknown bytes.\n", rem
);
1210 static int parse_flow_mask_nlattrs(const struct nlattr
*attr
,
1211 const struct nlattr
*a
[], u64
*attrsp
)
1213 return __parse_flow_nlattrs(attr
, a
, attrsp
, true);
1216 static int parse_flow_nlattrs(const struct nlattr
*attr
,
1217 const struct nlattr
*a
[], u64
*attrsp
)
1219 return __parse_flow_nlattrs(attr
, a
, attrsp
, false);
1222 int ovs_ipv4_tun_from_nlattr(const struct nlattr
*attr
,
1223 struct sw_flow_match
*match
, bool is_mask
)
1228 __be16 tun_flags
= 0;
1230 nla_for_each_nested(a
, attr
, rem
) {
1231 int type
= nla_type(a
);
1232 static const u32 ovs_tunnel_key_lens
[OVS_TUNNEL_KEY_ATTR_MAX
+ 1] = {
1233 [OVS_TUNNEL_KEY_ATTR_ID
] = sizeof(u64
),
1234 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC
] = sizeof(u32
),
1235 [OVS_TUNNEL_KEY_ATTR_IPV4_DST
] = sizeof(u32
),
1236 [OVS_TUNNEL_KEY_ATTR_TOS
] = 1,
1237 [OVS_TUNNEL_KEY_ATTR_TTL
] = 1,
1238 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT
] = 0,
1239 [OVS_TUNNEL_KEY_ATTR_CSUM
] = 0,
1242 if (type
> OVS_TUNNEL_KEY_ATTR_MAX
) {
1243 OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
1244 type
, OVS_TUNNEL_KEY_ATTR_MAX
);
1248 if (ovs_tunnel_key_lens
[type
] != nla_len(a
)) {
1249 OVS_NLERR("IPv4 tunnel attribute type has unexpected "
1250 " length (type=%d, length=%d, expected=%d).\n",
1251 type
, nla_len(a
), ovs_tunnel_key_lens
[type
]);
1256 case OVS_TUNNEL_KEY_ATTR_ID
:
1257 SW_FLOW_KEY_PUT(match
, tun_key
.tun_id
,
1258 nla_get_be64(a
), is_mask
);
1259 tun_flags
|= TUNNEL_KEY
;
1261 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC
:
1262 SW_FLOW_KEY_PUT(match
, tun_key
.ipv4_src
,
1263 nla_get_be32(a
), is_mask
);
1265 case OVS_TUNNEL_KEY_ATTR_IPV4_DST
:
1266 SW_FLOW_KEY_PUT(match
, tun_key
.ipv4_dst
,
1267 nla_get_be32(a
), is_mask
);
1269 case OVS_TUNNEL_KEY_ATTR_TOS
:
1270 SW_FLOW_KEY_PUT(match
, tun_key
.ipv4_tos
,
1271 nla_get_u8(a
), is_mask
);
1273 case OVS_TUNNEL_KEY_ATTR_TTL
:
1274 SW_FLOW_KEY_PUT(match
, tun_key
.ipv4_ttl
,
1275 nla_get_u8(a
), is_mask
);
1278 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT
:
1279 tun_flags
|= TUNNEL_DONT_FRAGMENT
;
1281 case OVS_TUNNEL_KEY_ATTR_CSUM
:
1282 tun_flags
|= TUNNEL_CSUM
;
1289 SW_FLOW_KEY_PUT(match
, tun_key
.tun_flags
, tun_flags
, is_mask
);
1292 OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem
);
1297 if (!match
->key
->tun_key
.ipv4_dst
) {
1298 OVS_NLERR("IPv4 tunnel destination address is zero.\n");
1303 OVS_NLERR("IPv4 tunnel TTL not specified.\n");
1311 int ovs_ipv4_tun_to_nlattr(struct sk_buff
*skb
,
1312 const struct ovs_key_ipv4_tunnel
*tun_key
,
1313 const struct ovs_key_ipv4_tunnel
*output
)
1317 nla
= nla_nest_start(skb
, OVS_KEY_ATTR_TUNNEL
);
1321 if (output
->tun_flags
& TUNNEL_KEY
&&
1322 nla_put_be64(skb
, OVS_TUNNEL_KEY_ATTR_ID
, output
->tun_id
))
1324 if (output
->ipv4_src
&&
1325 nla_put_be32(skb
, OVS_TUNNEL_KEY_ATTR_IPV4_SRC
, output
->ipv4_src
))
1327 if (output
->ipv4_dst
&&
1328 nla_put_be32(skb
, OVS_TUNNEL_KEY_ATTR_IPV4_DST
, output
->ipv4_dst
))
1330 if (output
->ipv4_tos
&&
1331 nla_put_u8(skb
, OVS_TUNNEL_KEY_ATTR_TOS
, output
->ipv4_tos
))
1333 if (nla_put_u8(skb
, OVS_TUNNEL_KEY_ATTR_TTL
, output
->ipv4_ttl
))
1335 if ((output
->tun_flags
& TUNNEL_DONT_FRAGMENT
) &&
1336 nla_put_flag(skb
, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT
))
1338 if ((output
->tun_flags
& TUNNEL_CSUM
) &&
1339 nla_put_flag(skb
, OVS_TUNNEL_KEY_ATTR_CSUM
))
1342 nla_nest_end(skb
, nla
);
1346 static int metadata_from_nlattrs(struct sw_flow_match
*match
, u64
*attrs
,
1347 const struct nlattr
**a
, bool is_mask
)
1349 if (*attrs
& (1 << OVS_KEY_ATTR_PRIORITY
)) {
1350 SW_FLOW_KEY_PUT(match
, phy
.priority
,
1351 nla_get_u32(a
[OVS_KEY_ATTR_PRIORITY
]), is_mask
);
1352 *attrs
&= ~(1 << OVS_KEY_ATTR_PRIORITY
);
1355 if (*attrs
& (1 << OVS_KEY_ATTR_IN_PORT
)) {
1356 u32 in_port
= nla_get_u32(a
[OVS_KEY_ATTR_IN_PORT
]);
1359 in_port
= 0xffffffff; /* Always exact match in_port. */
1360 else if (in_port
>= DP_MAX_PORTS
)
1363 SW_FLOW_KEY_PUT(match
, phy
.in_port
, in_port
, is_mask
);
1364 *attrs
&= ~(1 << OVS_KEY_ATTR_IN_PORT
);
1365 } else if (!is_mask
) {
1366 SW_FLOW_KEY_PUT(match
, phy
.in_port
, DP_MAX_PORTS
, is_mask
);
1369 if (*attrs
& (1 << OVS_KEY_ATTR_SKB_MARK
)) {
1370 uint32_t mark
= nla_get_u32(a
[OVS_KEY_ATTR_SKB_MARK
]);
1372 SW_FLOW_KEY_PUT(match
, phy
.skb_mark
, mark
, is_mask
);
1373 *attrs
&= ~(1 << OVS_KEY_ATTR_SKB_MARK
);
1375 if (*attrs
& (1 << OVS_KEY_ATTR_TUNNEL
)) {
1376 if (ovs_ipv4_tun_from_nlattr(a
[OVS_KEY_ATTR_TUNNEL
], match
,
1379 *attrs
&= ~(1 << OVS_KEY_ATTR_TUNNEL
);
1384 static int ovs_key_from_nlattrs(struct sw_flow_match
*match
, u64 attrs
,
1385 const struct nlattr
**a
, bool is_mask
)
1388 u64 orig_attrs
= attrs
;
1390 err
= metadata_from_nlattrs(match
, &attrs
, a
, is_mask
);
1394 if (attrs
& (1 << OVS_KEY_ATTR_ETHERNET
)) {
1395 const struct ovs_key_ethernet
*eth_key
;
1397 eth_key
= nla_data(a
[OVS_KEY_ATTR_ETHERNET
]);
1398 SW_FLOW_KEY_MEMCPY(match
, eth
.src
,
1399 eth_key
->eth_src
, ETH_ALEN
, is_mask
);
1400 SW_FLOW_KEY_MEMCPY(match
, eth
.dst
,
1401 eth_key
->eth_dst
, ETH_ALEN
, is_mask
);
1402 attrs
&= ~(1 << OVS_KEY_ATTR_ETHERNET
);
1405 if (attrs
& (1 << OVS_KEY_ATTR_VLAN
)) {
1408 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
1409 if (!(tci
& htons(VLAN_TAG_PRESENT
))) {
1411 OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
1413 OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
1418 SW_FLOW_KEY_PUT(match
, eth
.tci
, tci
, is_mask
);
1419 attrs
&= ~(1 << OVS_KEY_ATTR_VLAN
);
1420 } else if (!is_mask
)
1421 SW_FLOW_KEY_PUT(match
, eth
.tci
, htons(0xffff), true);
1423 if (attrs
& (1 << OVS_KEY_ATTR_ETHERTYPE
)) {
1426 eth_type
= nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]);
1428 /* Always exact match EtherType. */
1429 eth_type
= htons(0xffff);
1430 } else if (ntohs(eth_type
) < ETH_P_802_3_MIN
) {
1431 OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
1432 ntohs(eth_type
), ETH_P_802_3_MIN
);
1436 SW_FLOW_KEY_PUT(match
, eth
.type
, eth_type
, is_mask
);
1437 attrs
&= ~(1 << OVS_KEY_ATTR_ETHERTYPE
);
1438 } else if (!is_mask
) {
1439 SW_FLOW_KEY_PUT(match
, eth
.type
, htons(ETH_P_802_2
), is_mask
);
1442 if (attrs
& (1 << OVS_KEY_ATTR_IPV4
)) {
1443 const struct ovs_key_ipv4
*ipv4_key
;
1445 ipv4_key
= nla_data(a
[OVS_KEY_ATTR_IPV4
]);
1446 if (!is_mask
&& ipv4_key
->ipv4_frag
> OVS_FRAG_TYPE_MAX
) {
1447 OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
1448 ipv4_key
->ipv4_frag
, OVS_FRAG_TYPE_MAX
);
1451 SW_FLOW_KEY_PUT(match
, ip
.proto
,
1452 ipv4_key
->ipv4_proto
, is_mask
);
1453 SW_FLOW_KEY_PUT(match
, ip
.tos
,
1454 ipv4_key
->ipv4_tos
, is_mask
);
1455 SW_FLOW_KEY_PUT(match
, ip
.ttl
,
1456 ipv4_key
->ipv4_ttl
, is_mask
);
1457 SW_FLOW_KEY_PUT(match
, ip
.frag
,
1458 ipv4_key
->ipv4_frag
, is_mask
);
1459 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.src
,
1460 ipv4_key
->ipv4_src
, is_mask
);
1461 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.dst
,
1462 ipv4_key
->ipv4_dst
, is_mask
);
1463 attrs
&= ~(1 << OVS_KEY_ATTR_IPV4
);
1466 if (attrs
& (1 << OVS_KEY_ATTR_IPV6
)) {
1467 const struct ovs_key_ipv6
*ipv6_key
;
1469 ipv6_key
= nla_data(a
[OVS_KEY_ATTR_IPV6
]);
1470 if (!is_mask
&& ipv6_key
->ipv6_frag
> OVS_FRAG_TYPE_MAX
) {
1471 OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
1472 ipv6_key
->ipv6_frag
, OVS_FRAG_TYPE_MAX
);
1475 SW_FLOW_KEY_PUT(match
, ipv6
.label
,
1476 ipv6_key
->ipv6_label
, is_mask
);
1477 SW_FLOW_KEY_PUT(match
, ip
.proto
,
1478 ipv6_key
->ipv6_proto
, is_mask
);
1479 SW_FLOW_KEY_PUT(match
, ip
.tos
,
1480 ipv6_key
->ipv6_tclass
, is_mask
);
1481 SW_FLOW_KEY_PUT(match
, ip
.ttl
,
1482 ipv6_key
->ipv6_hlimit
, is_mask
);
1483 SW_FLOW_KEY_PUT(match
, ip
.frag
,
1484 ipv6_key
->ipv6_frag
, is_mask
);
1485 SW_FLOW_KEY_MEMCPY(match
, ipv6
.addr
.src
,
1487 sizeof(match
->key
->ipv6
.addr
.src
),
1489 SW_FLOW_KEY_MEMCPY(match
, ipv6
.addr
.dst
,
1491 sizeof(match
->key
->ipv6
.addr
.dst
),
1494 attrs
&= ~(1 << OVS_KEY_ATTR_IPV6
);
1497 if (attrs
& (1 << OVS_KEY_ATTR_ARP
)) {
1498 const struct ovs_key_arp
*arp_key
;
1500 arp_key
= nla_data(a
[OVS_KEY_ATTR_ARP
]);
1501 if (!is_mask
&& (arp_key
->arp_op
& htons(0xff00))) {
1502 OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
1507 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.src
,
1508 arp_key
->arp_sip
, is_mask
);
1509 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.dst
,
1510 arp_key
->arp_tip
, is_mask
);
1511 SW_FLOW_KEY_PUT(match
, ip
.proto
,
1512 ntohs(arp_key
->arp_op
), is_mask
);
1513 SW_FLOW_KEY_MEMCPY(match
, ipv4
.arp
.sha
,
1514 arp_key
->arp_sha
, ETH_ALEN
, is_mask
);
1515 SW_FLOW_KEY_MEMCPY(match
, ipv4
.arp
.tha
,
1516 arp_key
->arp_tha
, ETH_ALEN
, is_mask
);
1518 attrs
&= ~(1 << OVS_KEY_ATTR_ARP
);
1521 if (attrs
& (1 << OVS_KEY_ATTR_TCP
)) {
1522 const struct ovs_key_tcp
*tcp_key
;
1524 tcp_key
= nla_data(a
[OVS_KEY_ATTR_TCP
]);
1525 if (orig_attrs
& (1 << OVS_KEY_ATTR_IPV4
)) {
1526 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.src
,
1527 tcp_key
->tcp_src
, is_mask
);
1528 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.dst
,
1529 tcp_key
->tcp_dst
, is_mask
);
1531 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.src
,
1532 tcp_key
->tcp_src
, is_mask
);
1533 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.dst
,
1534 tcp_key
->tcp_dst
, is_mask
);
1536 attrs
&= ~(1 << OVS_KEY_ATTR_TCP
);
1539 if (attrs
& (1 << OVS_KEY_ATTR_UDP
)) {
1540 const struct ovs_key_udp
*udp_key
;
1542 udp_key
= nla_data(a
[OVS_KEY_ATTR_UDP
]);
1543 if (orig_attrs
& (1 << OVS_KEY_ATTR_IPV4
)) {
1544 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.src
,
1545 udp_key
->udp_src
, is_mask
);
1546 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.dst
,
1547 udp_key
->udp_dst
, is_mask
);
1549 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.src
,
1550 udp_key
->udp_src
, is_mask
);
1551 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.dst
,
1552 udp_key
->udp_dst
, is_mask
);
1554 attrs
&= ~(1 << OVS_KEY_ATTR_UDP
);
1557 if (attrs
& (1 << OVS_KEY_ATTR_SCTP
)) {
1558 const struct ovs_key_sctp
*sctp_key
;
1560 sctp_key
= nla_data(a
[OVS_KEY_ATTR_SCTP
]);
1561 if (orig_attrs
& (1 << OVS_KEY_ATTR_IPV4
)) {
1562 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.src
,
1563 sctp_key
->sctp_src
, is_mask
);
1564 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.dst
,
1565 sctp_key
->sctp_dst
, is_mask
);
1567 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.src
,
1568 sctp_key
->sctp_src
, is_mask
);
1569 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.dst
,
1570 sctp_key
->sctp_dst
, is_mask
);
1572 attrs
&= ~(1 << OVS_KEY_ATTR_SCTP
);
1575 if (attrs
& (1 << OVS_KEY_ATTR_ICMP
)) {
1576 const struct ovs_key_icmp
*icmp_key
;
1578 icmp_key
= nla_data(a
[OVS_KEY_ATTR_ICMP
]);
1579 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.src
,
1580 htons(icmp_key
->icmp_type
), is_mask
);
1581 SW_FLOW_KEY_PUT(match
, ipv4
.tp
.dst
,
1582 htons(icmp_key
->icmp_code
), is_mask
);
1583 attrs
&= ~(1 << OVS_KEY_ATTR_ICMP
);
1586 if (attrs
& (1 << OVS_KEY_ATTR_ICMPV6
)) {
1587 const struct ovs_key_icmpv6
*icmpv6_key
;
1589 icmpv6_key
= nla_data(a
[OVS_KEY_ATTR_ICMPV6
]);
1590 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.src
,
1591 htons(icmpv6_key
->icmpv6_type
), is_mask
);
1592 SW_FLOW_KEY_PUT(match
, ipv6
.tp
.dst
,
1593 htons(icmpv6_key
->icmpv6_code
), is_mask
);
1594 attrs
&= ~(1 << OVS_KEY_ATTR_ICMPV6
);
1597 if (attrs
& (1 << OVS_KEY_ATTR_ND
)) {
1598 const struct ovs_key_nd
*nd_key
;
1600 nd_key
= nla_data(a
[OVS_KEY_ATTR_ND
]);
1601 SW_FLOW_KEY_MEMCPY(match
, ipv6
.nd
.target
,
1603 sizeof(match
->key
->ipv6
.nd
.target
),
1605 SW_FLOW_KEY_MEMCPY(match
, ipv6
.nd
.sll
,
1606 nd_key
->nd_sll
, ETH_ALEN
, is_mask
);
1607 SW_FLOW_KEY_MEMCPY(match
, ipv6
.nd
.tll
,
1608 nd_key
->nd_tll
, ETH_ALEN
, is_mask
);
1609 attrs
&= ~(1 << OVS_KEY_ATTR_ND
);
1619 * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and
1620 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1621 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1622 * does not include any don't care bit.
1623 * @match: receives the extracted flow match information.
1624 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1625 * sequence. The fields should of the packet that triggered the creation
1627 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1628 * attribute specifies the mask field of the wildcarded flow.
1630 int ovs_match_from_nlattrs(struct sw_flow_match
*match
,
1631 const struct nlattr
*key
,
1632 const struct nlattr
*mask
)
1634 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1];
1635 const struct nlattr
*encap
;
1638 bool encap_valid
= false;
1641 err
= parse_flow_nlattrs(key
, a
, &key_attrs
);
1645 if ((key_attrs
& (1 << OVS_KEY_ATTR_ETHERNET
)) &&
1646 (key_attrs
& (1 << OVS_KEY_ATTR_ETHERTYPE
)) &&
1647 (nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]) == htons(ETH_P_8021Q
))) {
1650 if (!((key_attrs
& (1 << OVS_KEY_ATTR_VLAN
)) &&
1651 (key_attrs
& (1 << OVS_KEY_ATTR_ENCAP
)))) {
1652 OVS_NLERR("Invalid Vlan frame.\n");
1656 key_attrs
&= ~(1 << OVS_KEY_ATTR_ETHERTYPE
);
1657 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
1658 encap
= a
[OVS_KEY_ATTR_ENCAP
];
1659 key_attrs
&= ~(1 << OVS_KEY_ATTR_ENCAP
);
1662 if (tci
& htons(VLAN_TAG_PRESENT
)) {
1663 err
= parse_flow_nlattrs(encap
, a
, &key_attrs
);
1667 /* Corner case for truncated 802.1Q header. */
1668 if (nla_len(encap
)) {
1669 OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
1673 OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
1678 err
= ovs_key_from_nlattrs(match
, key_attrs
, a
, false);
1683 err
= parse_flow_mask_nlattrs(mask
, a
, &mask_attrs
);
1687 if (mask_attrs
& 1ULL << OVS_KEY_ATTR_ENCAP
) {
1688 __be16 eth_type
= 0;
1692 OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
1696 mask_attrs
&= ~(1 << OVS_KEY_ATTR_ENCAP
);
1697 if (a
[OVS_KEY_ATTR_ETHERTYPE
])
1698 eth_type
= nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]);
1700 if (eth_type
== htons(0xffff)) {
1701 mask_attrs
&= ~(1 << OVS_KEY_ATTR_ETHERTYPE
);
1702 encap
= a
[OVS_KEY_ATTR_ENCAP
];
1703 err
= parse_flow_mask_nlattrs(encap
, a
, &mask_attrs
);
1705 OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
1710 if (a
[OVS_KEY_ATTR_VLAN
])
1711 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
1713 if (!(tci
& htons(VLAN_TAG_PRESENT
))) {
1714 OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci
));
1719 err
= ovs_key_from_nlattrs(match
, mask_attrs
, a
, true);
1723 /* Populate exact match flow's key mask. */
1725 ovs_sw_flow_mask_set(match
->mask
, &match
->range
, 0xff);
1728 if (!ovs_match_validate(match
, key_attrs
, mask_attrs
))
1735 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
1736 * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
1737 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1740 * This parses a series of Netlink attributes that form a flow key, which must
1741 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1742 * get the metadata, that is, the parts of the flow key that cannot be
1743 * extracted from the packet itself.
1746 int ovs_flow_metadata_from_nlattrs(struct sw_flow
*flow
,
1747 const struct nlattr
*attr
)
1749 struct ovs_key_ipv4_tunnel
*tun_key
= &flow
->key
.tun_key
;
1750 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1];
1753 struct sw_flow_match match
;
1755 flow
->key
.phy
.in_port
= DP_MAX_PORTS
;
1756 flow
->key
.phy
.priority
= 0;
1757 flow
->key
.phy
.skb_mark
= 0;
1758 memset(tun_key
, 0, sizeof(flow
->key
.tun_key
));
1760 err
= parse_flow_nlattrs(attr
, a
, &attrs
);
1764 memset(&match
, 0, sizeof(match
));
1765 match
.key
= &flow
->key
;
1767 err
= metadata_from_nlattrs(&match
, &attrs
, a
, false);
1774 int ovs_flow_to_nlattrs(const struct sw_flow_key
*swkey
,
1775 const struct sw_flow_key
*output
, struct sk_buff
*skb
)
1777 struct ovs_key_ethernet
*eth_key
;
1778 struct nlattr
*nla
, *encap
;
1779 bool is_mask
= (swkey
!= output
);
1781 if (nla_put_u32(skb
, OVS_KEY_ATTR_PRIORITY
, output
->phy
.priority
))
1782 goto nla_put_failure
;
1784 if ((swkey
->tun_key
.ipv4_dst
|| is_mask
) &&
1785 ovs_ipv4_tun_to_nlattr(skb
, &swkey
->tun_key
, &output
->tun_key
))
1786 goto nla_put_failure
;
1788 if (swkey
->phy
.in_port
== DP_MAX_PORTS
) {
1789 if (is_mask
&& (output
->phy
.in_port
== 0xffff))
1790 if (nla_put_u32(skb
, OVS_KEY_ATTR_IN_PORT
, 0xffffffff))
1791 goto nla_put_failure
;
1794 upper_u16
= !is_mask
? 0 : 0xffff;
1796 if (nla_put_u32(skb
, OVS_KEY_ATTR_IN_PORT
,
1797 (upper_u16
<< 16) | output
->phy
.in_port
))
1798 goto nla_put_failure
;
1801 if (nla_put_u32(skb
, OVS_KEY_ATTR_SKB_MARK
, output
->phy
.skb_mark
))
1802 goto nla_put_failure
;
1804 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ETHERNET
, sizeof(*eth_key
));
1806 goto nla_put_failure
;
1808 eth_key
= nla_data(nla
);
1809 memcpy(eth_key
->eth_src
, output
->eth
.src
, ETH_ALEN
);
1810 memcpy(eth_key
->eth_dst
, output
->eth
.dst
, ETH_ALEN
);
1812 if (swkey
->eth
.tci
|| swkey
->eth
.type
== htons(ETH_P_8021Q
)) {
1814 eth_type
= !is_mask
? htons(ETH_P_8021Q
) : htons(0xffff);
1815 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, eth_type
) ||
1816 nla_put_be16(skb
, OVS_KEY_ATTR_VLAN
, output
->eth
.tci
))
1817 goto nla_put_failure
;
1818 encap
= nla_nest_start(skb
, OVS_KEY_ATTR_ENCAP
);
1819 if (!swkey
->eth
.tci
)
1824 if (swkey
->eth
.type
== htons(ETH_P_802_2
)) {
1826 * Ethertype 802.2 is represented in the netlink with omitted
1827 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1828 * 0xffff in the mask attribute. Ethertype can also
1831 if (is_mask
&& output
->eth
.type
)
1832 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
,
1834 goto nla_put_failure
;
1838 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, output
->eth
.type
))
1839 goto nla_put_failure
;
1841 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1842 struct ovs_key_ipv4
*ipv4_key
;
1844 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV4
, sizeof(*ipv4_key
));
1846 goto nla_put_failure
;
1847 ipv4_key
= nla_data(nla
);
1848 ipv4_key
->ipv4_src
= output
->ipv4
.addr
.src
;
1849 ipv4_key
->ipv4_dst
= output
->ipv4
.addr
.dst
;
1850 ipv4_key
->ipv4_proto
= output
->ip
.proto
;
1851 ipv4_key
->ipv4_tos
= output
->ip
.tos
;
1852 ipv4_key
->ipv4_ttl
= output
->ip
.ttl
;
1853 ipv4_key
->ipv4_frag
= output
->ip
.frag
;
1854 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1855 struct ovs_key_ipv6
*ipv6_key
;
1857 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV6
, sizeof(*ipv6_key
));
1859 goto nla_put_failure
;
1860 ipv6_key
= nla_data(nla
);
1861 memcpy(ipv6_key
->ipv6_src
, &output
->ipv6
.addr
.src
,
1862 sizeof(ipv6_key
->ipv6_src
));
1863 memcpy(ipv6_key
->ipv6_dst
, &output
->ipv6
.addr
.dst
,
1864 sizeof(ipv6_key
->ipv6_dst
));
1865 ipv6_key
->ipv6_label
= output
->ipv6
.label
;
1866 ipv6_key
->ipv6_proto
= output
->ip
.proto
;
1867 ipv6_key
->ipv6_tclass
= output
->ip
.tos
;
1868 ipv6_key
->ipv6_hlimit
= output
->ip
.ttl
;
1869 ipv6_key
->ipv6_frag
= output
->ip
.frag
;
1870 } else if (swkey
->eth
.type
== htons(ETH_P_ARP
) ||
1871 swkey
->eth
.type
== htons(ETH_P_RARP
)) {
1872 struct ovs_key_arp
*arp_key
;
1874 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ARP
, sizeof(*arp_key
));
1876 goto nla_put_failure
;
1877 arp_key
= nla_data(nla
);
1878 memset(arp_key
, 0, sizeof(struct ovs_key_arp
));
1879 arp_key
->arp_sip
= output
->ipv4
.addr
.src
;
1880 arp_key
->arp_tip
= output
->ipv4
.addr
.dst
;
1881 arp_key
->arp_op
= htons(output
->ip
.proto
);
1882 memcpy(arp_key
->arp_sha
, output
->ipv4
.arp
.sha
, ETH_ALEN
);
1883 memcpy(arp_key
->arp_tha
, output
->ipv4
.arp
.tha
, ETH_ALEN
);
1886 if ((swkey
->eth
.type
== htons(ETH_P_IP
) ||
1887 swkey
->eth
.type
== htons(ETH_P_IPV6
)) &&
1888 swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
1890 if (swkey
->ip
.proto
== IPPROTO_TCP
) {
1891 struct ovs_key_tcp
*tcp_key
;
1893 nla
= nla_reserve(skb
, OVS_KEY_ATTR_TCP
, sizeof(*tcp_key
));
1895 goto nla_put_failure
;
1896 tcp_key
= nla_data(nla
);
1897 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1898 tcp_key
->tcp_src
= output
->ipv4
.tp
.src
;
1899 tcp_key
->tcp_dst
= output
->ipv4
.tp
.dst
;
1900 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1901 tcp_key
->tcp_src
= output
->ipv6
.tp
.src
;
1902 tcp_key
->tcp_dst
= output
->ipv6
.tp
.dst
;
1904 } else if (swkey
->ip
.proto
== IPPROTO_UDP
) {
1905 struct ovs_key_udp
*udp_key
;
1907 nla
= nla_reserve(skb
, OVS_KEY_ATTR_UDP
, sizeof(*udp_key
));
1909 goto nla_put_failure
;
1910 udp_key
= nla_data(nla
);
1911 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1912 udp_key
->udp_src
= output
->ipv4
.tp
.src
;
1913 udp_key
->udp_dst
= output
->ipv4
.tp
.dst
;
1914 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1915 udp_key
->udp_src
= output
->ipv6
.tp
.src
;
1916 udp_key
->udp_dst
= output
->ipv6
.tp
.dst
;
1918 } else if (swkey
->ip
.proto
== IPPROTO_SCTP
) {
1919 struct ovs_key_sctp
*sctp_key
;
1921 nla
= nla_reserve(skb
, OVS_KEY_ATTR_SCTP
, sizeof(*sctp_key
));
1923 goto nla_put_failure
;
1924 sctp_key
= nla_data(nla
);
1925 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
1926 sctp_key
->sctp_src
= swkey
->ipv4
.tp
.src
;
1927 sctp_key
->sctp_dst
= swkey
->ipv4
.tp
.dst
;
1928 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
1929 sctp_key
->sctp_src
= swkey
->ipv6
.tp
.src
;
1930 sctp_key
->sctp_dst
= swkey
->ipv6
.tp
.dst
;
1932 } else if (swkey
->eth
.type
== htons(ETH_P_IP
) &&
1933 swkey
->ip
.proto
== IPPROTO_ICMP
) {
1934 struct ovs_key_icmp
*icmp_key
;
1936 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMP
, sizeof(*icmp_key
));
1938 goto nla_put_failure
;
1939 icmp_key
= nla_data(nla
);
1940 icmp_key
->icmp_type
= ntohs(output
->ipv4
.tp
.src
);
1941 icmp_key
->icmp_code
= ntohs(output
->ipv4
.tp
.dst
);
1942 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
) &&
1943 swkey
->ip
.proto
== IPPROTO_ICMPV6
) {
1944 struct ovs_key_icmpv6
*icmpv6_key
;
1946 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMPV6
,
1947 sizeof(*icmpv6_key
));
1949 goto nla_put_failure
;
1950 icmpv6_key
= nla_data(nla
);
1951 icmpv6_key
->icmpv6_type
= ntohs(output
->ipv6
.tp
.src
);
1952 icmpv6_key
->icmpv6_code
= ntohs(output
->ipv6
.tp
.dst
);
1954 if (icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
1955 icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
) {
1956 struct ovs_key_nd
*nd_key
;
1958 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ND
, sizeof(*nd_key
));
1960 goto nla_put_failure
;
1961 nd_key
= nla_data(nla
);
1962 memcpy(nd_key
->nd_target
, &output
->ipv6
.nd
.target
,
1963 sizeof(nd_key
->nd_target
));
1964 memcpy(nd_key
->nd_sll
, output
->ipv6
.nd
.sll
, ETH_ALEN
);
1965 memcpy(nd_key
->nd_tll
, output
->ipv6
.nd
.tll
, ETH_ALEN
);
1972 nla_nest_end(skb
, encap
);
1980 /* Initializes the flow module.
1981 * Returns zero if successful or a negative error code. */
1982 int ovs_flow_init(void)
1984 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
1986 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
), 0,
1988 if (flow_cache
== NULL
)
1994 /* Uninitializes the flow module. */
1995 void ovs_flow_exit(void)
1997 kmem_cache_destroy(flow_cache
);
2000 struct sw_flow_mask
*ovs_sw_flow_mask_alloc(void)
2002 struct sw_flow_mask
*mask
;
2004 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
2006 mask
->ref_count
= 0;
2011 void ovs_sw_flow_mask_add_ref(struct sw_flow_mask
*mask
)
2016 void ovs_sw_flow_mask_del_ref(struct sw_flow_mask
*mask
, bool deferred
)
2021 BUG_ON(!mask
->ref_count
);
2024 if (!mask
->ref_count
) {
2025 list_del_rcu(&mask
->list
);
2027 kfree_rcu(mask
, rcu
);
2033 static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask
*a
,
2034 const struct sw_flow_mask
*b
)
2036 u8
*a_
= (u8
*)&a
->key
+ a
->range
.start
;
2037 u8
*b_
= (u8
*)&b
->key
+ b
->range
.start
;
2039 return (a
->range
.end
== b
->range
.end
)
2040 && (a
->range
.start
== b
->range
.start
)
2041 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
2044 struct sw_flow_mask
*ovs_sw_flow_mask_find(const struct flow_table
*tbl
,
2045 const struct sw_flow_mask
*mask
)
2047 struct list_head
*ml
;
2049 list_for_each(ml
, tbl
->mask_list
) {
2050 struct sw_flow_mask
*m
;
2051 m
= container_of(ml
, struct sw_flow_mask
, list
);
2052 if (ovs_sw_flow_mask_equal(mask
, m
))
2060 * add a new mask into the mask list.
2061 * The caller needs to make sure that 'mask' is not the same
2062 * as any masks that are already on the list.
2064 void ovs_sw_flow_mask_insert(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
2066 list_add_rcu(&mask
->list
, tbl
->mask_list
);
2070 * Set 'range' fields in the mask to the value of 'val'.
2072 static void ovs_sw_flow_mask_set(struct sw_flow_mask
*mask
,
2073 struct sw_flow_key_range
*range
, u8 val
)
2075 u8
*m
= (u8
*)&mask
->key
+ range
->start
;
2077 mask
->range
= *range
;
2078 memset(m
, val
, range_n_bytes(range
));