2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/jhash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define REHASH_INTERVAL (10 * 60 * HZ)
52 static struct kmem_cache
*flow_cache
;
54 static u16
range_n_bytes(const struct sw_flow_key_range
*range
)
56 return range
->end
- range
->start
;
59 void ovs_flow_mask_key(struct sw_flow_key
*dst
, const struct sw_flow_key
*src
,
60 const struct sw_flow_mask
*mask
)
62 const long *m
= (long *)((u8
*)&mask
->key
+ mask
->range
.start
);
63 const long *s
= (long *)((u8
*)src
+ mask
->range
.start
);
64 long *d
= (long *)((u8
*)dst
+ mask
->range
.start
);
67 /* The memory outside of the 'mask->range' are not set since
68 * further operations on 'dst' only uses contents within
71 for (i
= 0; i
< range_n_bytes(&mask
->range
); i
+= sizeof(long))
75 struct sw_flow
*ovs_flow_alloc(void)
79 flow
= kmem_cache_alloc(flow_cache
, GFP_KERNEL
);
81 return ERR_PTR(-ENOMEM
);
83 spin_lock_init(&flow
->lock
);
90 int ovs_flow_tbl_count(struct flow_table
*table
)
95 static struct flex_array
*alloc_buckets(unsigned int n_buckets
)
97 struct flex_array
*buckets
;
100 buckets
= flex_array_alloc(sizeof(struct hlist_head
),
101 n_buckets
, GFP_KERNEL
);
105 err
= flex_array_prealloc(buckets
, 0, n_buckets
, GFP_KERNEL
);
107 flex_array_free(buckets
);
111 for (i
= 0; i
< n_buckets
; i
++)
112 INIT_HLIST_HEAD((struct hlist_head
*)
113 flex_array_get(buckets
, i
));
118 static void flow_free(struct sw_flow
*flow
)
120 kfree((struct sf_flow_acts __force
*)flow
->sf_acts
);
121 kmem_cache_free(flow_cache
, flow
);
124 static void rcu_free_flow_callback(struct rcu_head
*rcu
)
126 struct sw_flow
*flow
= container_of(rcu
, struct sw_flow
, rcu
);
131 void ovs_flow_free(struct sw_flow
*flow
, bool deferred
)
136 ovs_sw_flow_mask_del_ref(flow
->mask
, deferred
);
139 call_rcu(&flow
->rcu
, rcu_free_flow_callback
);
144 static void free_buckets(struct flex_array
*buckets
)
146 flex_array_free(buckets
);
149 static void __table_instance_destroy(struct table_instance
*ti
)
156 for (i
= 0; i
< ti
->n_buckets
; i
++) {
157 struct sw_flow
*flow
;
158 struct hlist_head
*head
= flex_array_get(ti
->buckets
, i
);
159 struct hlist_node
*n
;
160 int ver
= ti
->node_ver
;
162 hlist_for_each_entry_safe(flow
, n
, head
, hash_node
[ver
]) {
163 hlist_del(&flow
->hash_node
[ver
]);
164 ovs_flow_free(flow
, false);
169 free_buckets(ti
->buckets
);
173 static struct table_instance
*table_instance_alloc(int new_size
)
175 struct table_instance
*ti
= kmalloc(sizeof(*ti
), GFP_KERNEL
);
180 ti
->buckets
= alloc_buckets(new_size
);
186 ti
->n_buckets
= new_size
;
188 ti
->keep_flows
= false;
189 get_random_bytes(&ti
->hash_seed
, sizeof(u32
));
194 int ovs_flow_tbl_init(struct flow_table
*table
)
196 struct table_instance
*ti
;
198 ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
203 rcu_assign_pointer(table
->ti
, ti
);
204 INIT_LIST_HEAD(&table
->mask_list
);
205 table
->last_rehash
= jiffies
;
210 static void flow_tbl_destroy_rcu_cb(struct rcu_head
*rcu
)
212 struct table_instance
*ti
= container_of(rcu
, struct table_instance
, rcu
);
214 __table_instance_destroy(ti
);
217 static void table_instance_destroy(struct table_instance
*ti
, bool deferred
)
223 call_rcu(&ti
->rcu
, flow_tbl_destroy_rcu_cb
);
225 __table_instance_destroy(ti
);
228 void ovs_flow_tbl_destroy(struct flow_table
*table
, bool deferred
)
230 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
232 table_instance_destroy(ti
, deferred
);
235 struct sw_flow
*ovs_flow_tbl_dump_next(struct table_instance
*ti
,
236 u32
*bucket
, u32
*last
)
238 struct sw_flow
*flow
;
239 struct hlist_head
*head
;
244 while (*bucket
< ti
->n_buckets
) {
246 head
= flex_array_get(ti
->buckets
, *bucket
);
247 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ver
]) {
262 static struct hlist_head
*find_bucket(struct table_instance
*ti
, u32 hash
)
264 hash
= jhash_1word(hash
, ti
->hash_seed
);
265 return flex_array_get(ti
->buckets
,
266 (hash
& (ti
->n_buckets
- 1)));
269 static void table_instance_insert(struct table_instance
*ti
, struct sw_flow
*flow
)
271 struct hlist_head
*head
;
273 head
= find_bucket(ti
, flow
->hash
);
274 hlist_add_head_rcu(&flow
->hash_node
[ti
->node_ver
], head
);
277 static void flow_table_copy_flows(struct table_instance
*old
,
278 struct table_instance
*new)
283 old_ver
= old
->node_ver
;
284 new->node_ver
= !old_ver
;
286 /* Insert in new table. */
287 for (i
= 0; i
< old
->n_buckets
; i
++) {
288 struct sw_flow
*flow
;
289 struct hlist_head
*head
;
291 head
= flex_array_get(old
->buckets
, i
);
293 hlist_for_each_entry(flow
, head
, hash_node
[old_ver
])
294 table_instance_insert(new, flow
);
297 old
->keep_flows
= true;
300 static struct table_instance
*table_instance_rehash(struct table_instance
*ti
,
303 struct table_instance
*new_ti
;
305 new_ti
= table_instance_alloc(n_buckets
);
307 return ERR_PTR(-ENOMEM
);
309 flow_table_copy_flows(ti
, new_ti
);
314 int ovs_flow_tbl_flush(struct flow_table
*flow_table
)
316 struct table_instance
*old_ti
;
317 struct table_instance
*new_ti
;
319 old_ti
= ovsl_dereference(flow_table
->ti
);
320 new_ti
= table_instance_alloc(TBL_MIN_BUCKETS
);
324 rcu_assign_pointer(flow_table
->ti
, new_ti
);
325 flow_table
->last_rehash
= jiffies
;
326 flow_table
->count
= 0;
328 table_instance_destroy(old_ti
, true);
332 static u32
flow_hash(const struct sw_flow_key
*key
, int key_start
,
335 u32
*hash_key
= (u32
*)((u8
*)key
+ key_start
);
336 int hash_u32s
= (key_end
- key_start
) >> 2;
338 /* Make sure number of hash bytes are multiple of u32. */
339 BUILD_BUG_ON(sizeof(long) % sizeof(u32
));
341 return jhash2(hash_key
, hash_u32s
, 0);
344 static int flow_key_start(const struct sw_flow_key
*key
)
346 if (key
->tun_key
.ipv4_dst
)
349 return rounddown(offsetof(struct sw_flow_key
, phy
),
353 static bool cmp_key(const struct sw_flow_key
*key1
,
354 const struct sw_flow_key
*key2
,
355 int key_start
, int key_end
)
357 const long *cp1
= (long *)((u8
*)key1
+ key_start
);
358 const long *cp2
= (long *)((u8
*)key2
+ key_start
);
362 for (i
= key_start
; i
< key_end
; i
+= sizeof(long))
363 diffs
|= *cp1
++ ^ *cp2
++;
368 static bool flow_cmp_masked_key(const struct sw_flow
*flow
,
369 const struct sw_flow_key
*key
,
370 int key_start
, int key_end
)
372 return cmp_key(&flow
->key
, key
, key_start
, key_end
);
375 bool ovs_flow_cmp_unmasked_key(const struct sw_flow
*flow
,
376 struct sw_flow_match
*match
)
378 struct sw_flow_key
*key
= match
->key
;
379 int key_start
= flow_key_start(key
);
380 int key_end
= match
->range
.end
;
382 return cmp_key(&flow
->unmasked_key
, key
, key_start
, key_end
);
385 static struct sw_flow
*masked_flow_lookup(struct table_instance
*ti
,
386 const struct sw_flow_key
*unmasked
,
387 struct sw_flow_mask
*mask
)
389 struct sw_flow
*flow
;
390 struct hlist_head
*head
;
391 int key_start
= mask
->range
.start
;
392 int key_end
= mask
->range
.end
;
394 struct sw_flow_key masked_key
;
396 ovs_flow_mask_key(&masked_key
, unmasked
, mask
);
397 hash
= flow_hash(&masked_key
, key_start
, key_end
);
398 head
= find_bucket(ti
, hash
);
399 hlist_for_each_entry_rcu(flow
, head
, hash_node
[ti
->node_ver
]) {
400 if (flow
->mask
== mask
&&
401 flow_cmp_masked_key(flow
, &masked_key
,
408 struct sw_flow
*ovs_flow_tbl_lookup(struct flow_table
*tbl
,
409 const struct sw_flow_key
*key
)
411 struct table_instance
*ti
= rcu_dereference(tbl
->ti
);
412 struct sw_flow_mask
*mask
;
413 struct sw_flow
*flow
;
415 list_for_each_entry_rcu(mask
, &tbl
->mask_list
, list
) {
416 flow
= masked_flow_lookup(ti
, key
, mask
);
417 if (flow
) /* Found */
423 static struct table_instance
*table_instance_expand(struct table_instance
*ti
)
425 return table_instance_rehash(ti
, ti
->n_buckets
* 2);
428 void ovs_flow_tbl_insert(struct flow_table
*table
, struct sw_flow
*flow
)
430 struct table_instance
*ti
= NULL
;
431 struct table_instance
*new_ti
= NULL
;
433 ti
= ovsl_dereference(table
->ti
);
435 /* Expand table, if necessary, to make room. */
436 if (table
->count
> ti
->n_buckets
)
437 new_ti
= table_instance_expand(ti
);
438 else if (time_after(jiffies
, table
->last_rehash
+ REHASH_INTERVAL
))
439 new_ti
= table_instance_rehash(ti
, ti
->n_buckets
);
441 if (new_ti
&& !IS_ERR(new_ti
)) {
442 rcu_assign_pointer(table
->ti
, new_ti
);
443 ovs_flow_tbl_destroy(table
, true);
444 ti
= ovsl_dereference(table
->ti
);
445 table
->last_rehash
= jiffies
;
448 flow
->hash
= flow_hash(&flow
->key
, flow
->mask
->range
.start
,
449 flow
->mask
->range
.end
);
450 table_instance_insert(ti
, flow
);
454 void ovs_flow_tbl_remove(struct flow_table
*table
, struct sw_flow
*flow
)
456 struct table_instance
*ti
= ovsl_dereference(table
->ti
);
458 BUG_ON(table
->count
== 0);
459 hlist_del_rcu(&flow
->hash_node
[ti
->node_ver
]);
463 struct sw_flow_mask
*ovs_sw_flow_mask_alloc(void)
465 struct sw_flow_mask
*mask
;
467 mask
= kmalloc(sizeof(*mask
), GFP_KERNEL
);
474 void ovs_sw_flow_mask_add_ref(struct sw_flow_mask
*mask
)
479 static void rcu_free_sw_flow_mask_cb(struct rcu_head
*rcu
)
481 struct sw_flow_mask
*mask
= container_of(rcu
, struct sw_flow_mask
, rcu
);
486 void ovs_sw_flow_mask_del_ref(struct sw_flow_mask
*mask
, bool deferred
)
491 BUG_ON(!mask
->ref_count
);
494 if (!mask
->ref_count
) {
495 list_del_rcu(&mask
->list
);
497 call_rcu(&mask
->rcu
, rcu_free_sw_flow_mask_cb
);
503 static bool mask_equal(const struct sw_flow_mask
*a
,
504 const struct sw_flow_mask
*b
)
506 u8
*a_
= (u8
*)&a
->key
+ a
->range
.start
;
507 u8
*b_
= (u8
*)&b
->key
+ b
->range
.start
;
509 return (a
->range
.end
== b
->range
.end
)
510 && (a
->range
.start
== b
->range
.start
)
511 && (memcmp(a_
, b_
, range_n_bytes(&a
->range
)) == 0);
514 struct sw_flow_mask
*ovs_sw_flow_mask_find(const struct flow_table
*tbl
,
515 const struct sw_flow_mask
*mask
)
517 struct list_head
*ml
;
519 list_for_each(ml
, &tbl
->mask_list
) {
520 struct sw_flow_mask
*m
;
521 m
= container_of(ml
, struct sw_flow_mask
, list
);
522 if (mask_equal(mask
, m
))
530 * add a new mask into the mask list.
531 * The caller needs to make sure that 'mask' is not the same
532 * as any masks that are already on the list.
534 void ovs_sw_flow_mask_insert(struct flow_table
*tbl
, struct sw_flow_mask
*mask
)
536 list_add_rcu(&mask
->list
, &tbl
->mask_list
);
539 /* Initializes the flow module.
540 * Returns zero if successful or a negative error code. */
541 int ovs_flow_init(void)
543 BUILD_BUG_ON(__alignof__(struct sw_flow_key
) % __alignof__(long));
544 BUILD_BUG_ON(sizeof(struct sw_flow_key
) % sizeof(long));
546 flow_cache
= kmem_cache_create("sw_flow", sizeof(struct sw_flow
), 0,
548 if (flow_cache
== NULL
)
554 /* Uninitializes the flow module. */
555 void ovs_flow_exit(void)
557 kmem_cache_destroy(flow_cache
);
This page took 0.049019 seconds and 5 git commands to generate.