Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / net / openvswitch / flow_table.c
1 /*
2 * Copyright (c) 2007-2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19 #include "flow.h"
20 #include "datapath.h"
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/hash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
32 #include <linux/in.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
43 #include <net/ip.h>
44 #include <net/ipv6.h>
45 #include <net/ndisc.h>
46
47 #define TBL_MIN_BUCKETS 1024
48 #define REHASH_INTERVAL (10 * 60 * HZ)
49
50 static struct kmem_cache *flow_cache;
51
52 static u16 range_n_bytes(const struct sw_flow_key_range *range)
53 {
54 return range->end - range->start;
55 }
56
57 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
58 const struct sw_flow_mask *mask)
59 {
60 const long *m = (long *)((u8 *)&mask->key + mask->range.start);
61 const long *s = (long *)((u8 *)src + mask->range.start);
62 long *d = (long *)((u8 *)dst + mask->range.start);
63 int i;
64
65 /* The memory outside of the 'mask->range' are not set since
66 * further operations on 'dst' only uses contents within
67 * 'mask->range'.
68 */
69 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
70 *d++ = *s++ & *m++;
71 }
72
73 struct sw_flow *ovs_flow_alloc(bool percpu_stats)
74 {
75 struct sw_flow *flow;
76 int cpu;
77
78 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
79 if (!flow)
80 return ERR_PTR(-ENOMEM);
81
82 flow->sf_acts = NULL;
83 flow->mask = NULL;
84
85 flow->stats.is_percpu = percpu_stats;
86
87 if (!percpu_stats) {
88 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
89 if (!flow->stats.stat)
90 goto err;
91
92 spin_lock_init(&flow->stats.stat->lock);
93 } else {
94 flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
95 if (!flow->stats.cpu_stats)
96 goto err;
97
98 for_each_possible_cpu(cpu) {
99 struct flow_stats *cpu_stats;
100
101 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
102 spin_lock_init(&cpu_stats->lock);
103 }
104 }
105 return flow;
106 err:
107 kmem_cache_free(flow_cache, flow);
108 return ERR_PTR(-ENOMEM);
109 }
110
111 int ovs_flow_tbl_count(struct flow_table *table)
112 {
113 return table->count;
114 }
115
116 static struct flex_array *alloc_buckets(unsigned int n_buckets)
117 {
118 struct flex_array *buckets;
119 int i, err;
120
121 buckets = flex_array_alloc(sizeof(struct hlist_head),
122 n_buckets, GFP_KERNEL);
123 if (!buckets)
124 return NULL;
125
126 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
127 if (err) {
128 flex_array_free(buckets);
129 return NULL;
130 }
131
132 for (i = 0; i < n_buckets; i++)
133 INIT_HLIST_HEAD((struct hlist_head *)
134 flex_array_get(buckets, i));
135
136 return buckets;
137 }
138
139 static void flow_free(struct sw_flow *flow)
140 {
141 kfree((struct sf_flow_acts __force *)flow->sf_acts);
142 if (flow->stats.is_percpu)
143 free_percpu(flow->stats.cpu_stats);
144 else
145 kfree(flow->stats.stat);
146 kmem_cache_free(flow_cache, flow);
147 }
148
149 static void rcu_free_flow_callback(struct rcu_head *rcu)
150 {
151 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
152
153 flow_free(flow);
154 }
155
156 void ovs_flow_free(struct sw_flow *flow, bool deferred)
157 {
158 if (!flow)
159 return;
160
161 if (flow->mask) {
162 struct sw_flow_mask *mask = flow->mask;
163
164 /* ovs-lock is required to protect mask-refcount and
165 * mask list.
166 */
167 ASSERT_OVSL();
168 BUG_ON(!mask->ref_count);
169 mask->ref_count--;
170
171 if (!mask->ref_count) {
172 list_del_rcu(&mask->list);
173 if (deferred)
174 kfree_rcu(mask, rcu);
175 else
176 kfree(mask);
177 }
178 }
179
180 if (deferred)
181 call_rcu(&flow->rcu, rcu_free_flow_callback);
182 else
183 flow_free(flow);
184 }
185
186 static void free_buckets(struct flex_array *buckets)
187 {
188 flex_array_free(buckets);
189 }
190
191
192 static void __table_instance_destroy(struct table_instance *ti)
193 {
194 free_buckets(ti->buckets);
195 kfree(ti);
196 }
197
198 static struct table_instance *table_instance_alloc(int new_size)
199 {
200 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
201
202 if (!ti)
203 return NULL;
204
205 ti->buckets = alloc_buckets(new_size);
206
207 if (!ti->buckets) {
208 kfree(ti);
209 return NULL;
210 }
211 ti->n_buckets = new_size;
212 ti->node_ver = 0;
213 ti->keep_flows = false;
214 get_random_bytes(&ti->hash_seed, sizeof(u32));
215
216 return ti;
217 }
218
219 int ovs_flow_tbl_init(struct flow_table *table)
220 {
221 struct table_instance *ti;
222
223 ti = table_instance_alloc(TBL_MIN_BUCKETS);
224
225 if (!ti)
226 return -ENOMEM;
227
228 rcu_assign_pointer(table->ti, ti);
229 INIT_LIST_HEAD(&table->mask_list);
230 table->last_rehash = jiffies;
231 table->count = 0;
232 return 0;
233 }
234
235 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
236 {
237 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
238
239 __table_instance_destroy(ti);
240 }
241
242 static void table_instance_destroy(struct table_instance *ti, bool deferred)
243 {
244 int i;
245
246 if (!ti)
247 return;
248
249 if (ti->keep_flows)
250 goto skip_flows;
251
252 for (i = 0; i < ti->n_buckets; i++) {
253 struct sw_flow *flow;
254 struct hlist_head *head = flex_array_get(ti->buckets, i);
255 struct hlist_node *n;
256 int ver = ti->node_ver;
257
258 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
259 hlist_del_rcu(&flow->hash_node[ver]);
260 ovs_flow_free(flow, deferred);
261 }
262 }
263
264 skip_flows:
265 if (deferred)
266 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
267 else
268 __table_instance_destroy(ti);
269 }
270
271 void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
272 {
273 struct table_instance *ti = ovsl_dereference(table->ti);
274
275 table_instance_destroy(ti, deferred);
276 }
277
278 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
279 u32 *bucket, u32 *last)
280 {
281 struct sw_flow *flow;
282 struct hlist_head *head;
283 int ver;
284 int i;
285
286 ver = ti->node_ver;
287 while (*bucket < ti->n_buckets) {
288 i = 0;
289 head = flex_array_get(ti->buckets, *bucket);
290 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
291 if (i < *last) {
292 i++;
293 continue;
294 }
295 *last = i + 1;
296 return flow;
297 }
298 (*bucket)++;
299 *last = 0;
300 }
301
302 return NULL;
303 }
304
305 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
306 {
307 hash = jhash_1word(hash, ti->hash_seed);
308 return flex_array_get(ti->buckets,
309 (hash & (ti->n_buckets - 1)));
310 }
311
312 static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
313 {
314 struct hlist_head *head;
315
316 head = find_bucket(ti, flow->hash);
317 hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
318 }
319
320 static void flow_table_copy_flows(struct table_instance *old,
321 struct table_instance *new)
322 {
323 int old_ver;
324 int i;
325
326 old_ver = old->node_ver;
327 new->node_ver = !old_ver;
328
329 /* Insert in new table. */
330 for (i = 0; i < old->n_buckets; i++) {
331 struct sw_flow *flow;
332 struct hlist_head *head;
333
334 head = flex_array_get(old->buckets, i);
335
336 hlist_for_each_entry(flow, head, hash_node[old_ver])
337 table_instance_insert(new, flow);
338 }
339
340 old->keep_flows = true;
341 }
342
343 static struct table_instance *table_instance_rehash(struct table_instance *ti,
344 int n_buckets)
345 {
346 struct table_instance *new_ti;
347
348 new_ti = table_instance_alloc(n_buckets);
349 if (!new_ti)
350 return NULL;
351
352 flow_table_copy_flows(ti, new_ti);
353
354 return new_ti;
355 }
356
357 int ovs_flow_tbl_flush(struct flow_table *flow_table)
358 {
359 struct table_instance *old_ti;
360 struct table_instance *new_ti;
361
362 old_ti = ovsl_dereference(flow_table->ti);
363 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
364 if (!new_ti)
365 return -ENOMEM;
366
367 rcu_assign_pointer(flow_table->ti, new_ti);
368 flow_table->last_rehash = jiffies;
369 flow_table->count = 0;
370
371 table_instance_destroy(old_ti, true);
372 return 0;
373 }
374
375 static u32 flow_hash(const struct sw_flow_key *key, int key_start,
376 int key_end)
377 {
378 u32 *hash_key = (u32 *)((u8 *)key + key_start);
379 int hash_u32s = (key_end - key_start) >> 2;
380
381 /* Make sure number of hash bytes are multiple of u32. */
382 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
383
384 return arch_fast_hash2(hash_key, hash_u32s, 0);
385 }
386
387 static int flow_key_start(const struct sw_flow_key *key)
388 {
389 if (key->tun_key.ipv4_dst)
390 return 0;
391 else
392 return rounddown(offsetof(struct sw_flow_key, phy),
393 sizeof(long));
394 }
395
396 static bool cmp_key(const struct sw_flow_key *key1,
397 const struct sw_flow_key *key2,
398 int key_start, int key_end)
399 {
400 const long *cp1 = (long *)((u8 *)key1 + key_start);
401 const long *cp2 = (long *)((u8 *)key2 + key_start);
402 long diffs = 0;
403 int i;
404
405 for (i = key_start; i < key_end; i += sizeof(long))
406 diffs |= *cp1++ ^ *cp2++;
407
408 return diffs == 0;
409 }
410
411 static bool flow_cmp_masked_key(const struct sw_flow *flow,
412 const struct sw_flow_key *key,
413 int key_start, int key_end)
414 {
415 return cmp_key(&flow->key, key, key_start, key_end);
416 }
417
418 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
419 struct sw_flow_match *match)
420 {
421 struct sw_flow_key *key = match->key;
422 int key_start = flow_key_start(key);
423 int key_end = match->range.end;
424
425 return cmp_key(&flow->unmasked_key, key, key_start, key_end);
426 }
427
428 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
429 const struct sw_flow_key *unmasked,
430 struct sw_flow_mask *mask)
431 {
432 struct sw_flow *flow;
433 struct hlist_head *head;
434 int key_start = mask->range.start;
435 int key_end = mask->range.end;
436 u32 hash;
437 struct sw_flow_key masked_key;
438
439 ovs_flow_mask_key(&masked_key, unmasked, mask);
440 hash = flow_hash(&masked_key, key_start, key_end);
441 head = find_bucket(ti, hash);
442 hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
443 if (flow->mask == mask && flow->hash == hash &&
444 flow_cmp_masked_key(flow, &masked_key,
445 key_start, key_end))
446 return flow;
447 }
448 return NULL;
449 }
450
451 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
452 const struct sw_flow_key *key,
453 u32 *n_mask_hit)
454 {
455 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
456 struct sw_flow_mask *mask;
457 struct sw_flow *flow;
458
459 *n_mask_hit = 0;
460 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
461 (*n_mask_hit)++;
462 flow = masked_flow_lookup(ti, key, mask);
463 if (flow) /* Found */
464 return flow;
465 }
466 return NULL;
467 }
468
469 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
470 const struct sw_flow_key *key)
471 {
472 u32 __always_unused n_mask_hit;
473
474 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
475 }
476
477 int ovs_flow_tbl_num_masks(const struct flow_table *table)
478 {
479 struct sw_flow_mask *mask;
480 int num = 0;
481
482 list_for_each_entry(mask, &table->mask_list, list)
483 num++;
484
485 return num;
486 }
487
488 static struct table_instance *table_instance_expand(struct table_instance *ti)
489 {
490 return table_instance_rehash(ti, ti->n_buckets * 2);
491 }
492
493 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
494 {
495 struct table_instance *ti = ovsl_dereference(table->ti);
496
497 BUG_ON(table->count == 0);
498 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
499 table->count--;
500 }
501
502 static struct sw_flow_mask *mask_alloc(void)
503 {
504 struct sw_flow_mask *mask;
505
506 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
507 if (mask)
508 mask->ref_count = 1;
509
510 return mask;
511 }
512
513 static bool mask_equal(const struct sw_flow_mask *a,
514 const struct sw_flow_mask *b)
515 {
516 u8 *a_ = (u8 *)&a->key + a->range.start;
517 u8 *b_ = (u8 *)&b->key + b->range.start;
518
519 return (a->range.end == b->range.end)
520 && (a->range.start == b->range.start)
521 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
522 }
523
524 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
525 const struct sw_flow_mask *mask)
526 {
527 struct list_head *ml;
528
529 list_for_each(ml, &tbl->mask_list) {
530 struct sw_flow_mask *m;
531 m = container_of(ml, struct sw_flow_mask, list);
532 if (mask_equal(mask, m))
533 return m;
534 }
535
536 return NULL;
537 }
538
539 /* Add 'mask' into the mask list, if it is not already there. */
540 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
541 struct sw_flow_mask *new)
542 {
543 struct sw_flow_mask *mask;
544 mask = flow_mask_find(tbl, new);
545 if (!mask) {
546 /* Allocate a new mask if none exsits. */
547 mask = mask_alloc();
548 if (!mask)
549 return -ENOMEM;
550 mask->key = new->key;
551 mask->range = new->range;
552 list_add_rcu(&mask->list, &tbl->mask_list);
553 } else {
554 BUG_ON(!mask->ref_count);
555 mask->ref_count++;
556 }
557
558 flow->mask = mask;
559 return 0;
560 }
561
562 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
563 struct sw_flow_mask *mask)
564 {
565 struct table_instance *new_ti = NULL;
566 struct table_instance *ti;
567 int err;
568
569 err = flow_mask_insert(table, flow, mask);
570 if (err)
571 return err;
572
573 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
574 flow->mask->range.end);
575 ti = ovsl_dereference(table->ti);
576 table_instance_insert(ti, flow);
577 table->count++;
578
579 /* Expand table, if necessary, to make room. */
580 if (table->count > ti->n_buckets)
581 new_ti = table_instance_expand(ti);
582 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
583 new_ti = table_instance_rehash(ti, ti->n_buckets);
584
585 if (new_ti) {
586 rcu_assign_pointer(table->ti, new_ti);
587 table_instance_destroy(ti, true);
588 table->last_rehash = jiffies;
589 }
590 return 0;
591 }
592
593 /* Initializes the flow module.
594 * Returns zero if successful or a negative error code. */
595 int ovs_flow_init(void)
596 {
597 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
598 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
599
600 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
601 0, NULL);
602 if (flow_cache == NULL)
603 return -ENOMEM;
604
605 return 0;
606 }
607
608 /* Uninitializes the flow module. */
609 void ovs_flow_exit(void)
610 {
611 kmem_cache_destroy(flow_cache);
612 }
This page took 0.066158 seconds and 5 git commands to generate.