Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* flow.c: Generic flow cache. |
2 | * | |
3 | * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru) | |
4 | * Copyright (C) 2003 David S. Miller (davem@redhat.com) | |
5 | */ | |
6 | ||
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/jhash.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/completion.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/notifier.h> | |
21 | #include <linux/cpu.h> | |
22 | #include <linux/cpumask.h> | |
4a3e2f71 | 23 | #include <linux/mutex.h> |
1da177e4 | 24 | #include <net/flow.h> |
60063497 | 25 | #include <linux/atomic.h> |
df71837d | 26 | #include <linux/security.h> |
1da177e4 LT |
27 | |
28 | struct flow_cache_entry { | |
8e479560 TT |
29 | union { |
30 | struct hlist_node hlist; | |
31 | struct list_head gc_list; | |
32 | } u; | |
0542b69e | 33 | struct net *net; |
fe1a5f03 TT |
34 | u16 family; |
35 | u8 dir; | |
36 | u32 genid; | |
37 | struct flowi key; | |
38 | struct flow_cache_object *object; | |
1da177e4 LT |
39 | }; |
40 | ||
d7997fe1 | 41 | struct flow_cache_percpu { |
8e479560 | 42 | struct hlist_head *hash_table; |
d7997fe1 TT |
43 | int hash_count; |
44 | u32 hash_rnd; | |
45 | int hash_rnd_recalc; | |
46 | struct tasklet_struct flush_tasklet; | |
5f58a5c8 | 47 | }; |
1da177e4 LT |
48 | |
49 | struct flow_flush_info { | |
fe1a5f03 | 50 | struct flow_cache *cache; |
d7997fe1 TT |
51 | atomic_t cpuleft; |
52 | struct completion completion; | |
1da177e4 | 53 | }; |
1da177e4 | 54 | |
d7997fe1 TT |
55 | struct flow_cache { |
56 | u32 hash_shift; | |
83b6b1f5 | 57 | struct flow_cache_percpu __percpu *percpu; |
d7997fe1 TT |
58 | struct notifier_block hotcpu_notifier; |
59 | int low_watermark; | |
60 | int high_watermark; | |
61 | struct timer_list rnd_timer; | |
62 | }; | |
63 | ||
64 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | |
9e34a5b5 | 65 | EXPORT_SYMBOL(flow_cache_genid); |
d7997fe1 | 66 | static struct flow_cache flow_cache_global; |
83b6b1f5 | 67 | static struct kmem_cache *flow_cachep __read_mostly; |
d7997fe1 | 68 | |
8e479560 TT |
69 | static DEFINE_SPINLOCK(flow_cache_gc_lock); |
70 | static LIST_HEAD(flow_cache_gc_list); | |
71 | ||
d7997fe1 TT |
72 | #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) |
73 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
1da177e4 LT |
74 | |
75 | static void flow_cache_new_hashrnd(unsigned long arg) | |
76 | { | |
d7997fe1 | 77 | struct flow_cache *fc = (void *) arg; |
1da177e4 LT |
78 | int i; |
79 | ||
6f912042 | 80 | for_each_possible_cpu(i) |
d7997fe1 | 81 | per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; |
1da177e4 | 82 | |
d7997fe1 TT |
83 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
84 | add_timer(&fc->rnd_timer); | |
1da177e4 LT |
85 | } |
86 | ||
fe1a5f03 TT |
87 | static int flow_entry_valid(struct flow_cache_entry *fle) |
88 | { | |
89 | if (atomic_read(&flow_cache_genid) != fle->genid) | |
90 | return 0; | |
91 | if (fle->object && !fle->object->ops->check(fle->object)) | |
92 | return 0; | |
93 | return 1; | |
94 | } | |
95 | ||
8e479560 | 96 | static void flow_entry_kill(struct flow_cache_entry *fle) |
134b0fc5 JM |
97 | { |
98 | if (fle->object) | |
fe1a5f03 | 99 | fle->object->ops->delete(fle->object); |
134b0fc5 | 100 | kmem_cache_free(flow_cachep, fle); |
8e479560 TT |
101 | } |
102 | ||
103 | static void flow_cache_gc_task(struct work_struct *work) | |
104 | { | |
105 | struct list_head gc_list; | |
106 | struct flow_cache_entry *fce, *n; | |
107 | ||
108 | INIT_LIST_HEAD(&gc_list); | |
109 | spin_lock_bh(&flow_cache_gc_lock); | |
110 | list_splice_tail_init(&flow_cache_gc_list, &gc_list); | |
111 | spin_unlock_bh(&flow_cache_gc_lock); | |
112 | ||
113 | list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) | |
114 | flow_entry_kill(fce); | |
115 | } | |
116 | static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task); | |
117 | ||
118 | static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, | |
119 | int deleted, struct list_head *gc_list) | |
120 | { | |
121 | if (deleted) { | |
122 | fcp->hash_count -= deleted; | |
123 | spin_lock_bh(&flow_cache_gc_lock); | |
124 | list_splice_tail(gc_list, &flow_cache_gc_list); | |
125 | spin_unlock_bh(&flow_cache_gc_lock); | |
126 | schedule_work(&flow_cache_gc_work); | |
127 | } | |
134b0fc5 JM |
128 | } |
129 | ||
d7997fe1 TT |
130 | static void __flow_cache_shrink(struct flow_cache *fc, |
131 | struct flow_cache_percpu *fcp, | |
132 | int shrink_to) | |
1da177e4 | 133 | { |
8e479560 | 134 | struct flow_cache_entry *fle; |
b67bfe0d | 135 | struct hlist_node *tmp; |
8e479560 TT |
136 | LIST_HEAD(gc_list); |
137 | int i, deleted = 0; | |
1da177e4 | 138 | |
d7997fe1 | 139 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
fe1a5f03 | 140 | int saved = 0; |
1da177e4 | 141 | |
b67bfe0d | 142 | hlist_for_each_entry_safe(fle, tmp, |
8e479560 | 143 | &fcp->hash_table[i], u.hlist) { |
fe1a5f03 TT |
144 | if (saved < shrink_to && |
145 | flow_entry_valid(fle)) { | |
146 | saved++; | |
fe1a5f03 | 147 | } else { |
8e479560 TT |
148 | deleted++; |
149 | hlist_del(&fle->u.hlist); | |
150 | list_add_tail(&fle->u.gc_list, &gc_list); | |
fe1a5f03 | 151 | } |
1da177e4 LT |
152 | } |
153 | } | |
8e479560 TT |
154 | |
155 | flow_cache_queue_garbage(fcp, deleted, &gc_list); | |
1da177e4 LT |
156 | } |
157 | ||
d7997fe1 TT |
158 | static void flow_cache_shrink(struct flow_cache *fc, |
159 | struct flow_cache_percpu *fcp) | |
1da177e4 | 160 | { |
d7997fe1 | 161 | int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); |
1da177e4 | 162 | |
d7997fe1 | 163 | __flow_cache_shrink(fc, fcp, shrink_to); |
1da177e4 LT |
164 | } |
165 | ||
d7997fe1 TT |
166 | static void flow_new_hash_rnd(struct flow_cache *fc, |
167 | struct flow_cache_percpu *fcp) | |
1da177e4 | 168 | { |
d7997fe1 TT |
169 | get_random_bytes(&fcp->hash_rnd, sizeof(u32)); |
170 | fcp->hash_rnd_recalc = 0; | |
171 | __flow_cache_shrink(fc, fcp, 0); | |
1da177e4 LT |
172 | } |
173 | ||
d7997fe1 TT |
174 | static u32 flow_hash_code(struct flow_cache *fc, |
175 | struct flow_cache_percpu *fcp, | |
aa1c366e | 176 | const struct flowi *key, |
177 | size_t keysize) | |
1da177e4 | 178 | { |
dee9f4bc | 179 | const u32 *k = (const u32 *) key; |
aa1c366e | 180 | const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); |
1da177e4 | 181 | |
aa1c366e | 182 | return jhash2(k, length, fcp->hash_rnd) |
a02cec21 | 183 | & (flow_cache_hash_size(fc) - 1); |
1da177e4 LT |
184 | } |
185 | ||
1da177e4 | 186 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
aa1c366e | 187 | * important assumptions that we can here, such as alignment. |
1da177e4 | 188 | */ |
aa1c366e | 189 | static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, |
190 | size_t keysize) | |
1da177e4 | 191 | { |
dee9f4bc | 192 | const flow_compare_t *k1, *k1_lim, *k2; |
1da177e4 | 193 | |
dee9f4bc | 194 | k1 = (const flow_compare_t *) key1; |
aa1c366e | 195 | k1_lim = k1 + keysize; |
1da177e4 | 196 | |
dee9f4bc | 197 | k2 = (const flow_compare_t *) key2; |
1da177e4 LT |
198 | |
199 | do { | |
200 | if (*k1++ != *k2++) | |
201 | return 1; | |
202 | } while (k1 < k1_lim); | |
203 | ||
204 | return 0; | |
205 | } | |
206 | ||
fe1a5f03 | 207 | struct flow_cache_object * |
dee9f4bc | 208 | flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, |
fe1a5f03 | 209 | flow_resolve_t resolver, void *ctx) |
1da177e4 | 210 | { |
d7997fe1 TT |
211 | struct flow_cache *fc = &flow_cache_global; |
212 | struct flow_cache_percpu *fcp; | |
8e479560 | 213 | struct flow_cache_entry *fle, *tfle; |
fe1a5f03 | 214 | struct flow_cache_object *flo; |
aa1c366e | 215 | size_t keysize; |
1da177e4 | 216 | unsigned int hash; |
1da177e4 LT |
217 | |
218 | local_bh_disable(); | |
7a9b2d59 | 219 | fcp = this_cpu_ptr(fc->percpu); |
1da177e4 LT |
220 | |
221 | fle = NULL; | |
fe1a5f03 | 222 | flo = NULL; |
aa1c366e | 223 | |
224 | keysize = flow_key_size(family); | |
225 | if (!keysize) | |
226 | goto nocache; | |
227 | ||
1da177e4 LT |
228 | /* Packet really early in init? Making flow_cache_init a |
229 | * pre-smp initcall would solve this. --RR */ | |
d7997fe1 | 230 | if (!fcp->hash_table) |
1da177e4 LT |
231 | goto nocache; |
232 | ||
d7997fe1 TT |
233 | if (fcp->hash_rnd_recalc) |
234 | flow_new_hash_rnd(fc, fcp); | |
1da177e4 | 235 | |
aa1c366e | 236 | hash = flow_hash_code(fc, fcp, key, keysize); |
b67bfe0d | 237 | hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) { |
0542b69e | 238 | if (tfle->net == net && |
239 | tfle->family == family && | |
8e479560 | 240 | tfle->dir == dir && |
aa1c366e | 241 | flow_key_compare(key, &tfle->key, keysize) == 0) { |
8e479560 | 242 | fle = tfle; |
1da177e4 | 243 | break; |
8e479560 | 244 | } |
1da177e4 LT |
245 | } |
246 | ||
fe1a5f03 | 247 | if (unlikely(!fle)) { |
d7997fe1 TT |
248 | if (fcp->hash_count > fc->high_watermark) |
249 | flow_cache_shrink(fc, fcp); | |
1da177e4 | 250 | |
54e6ecb2 | 251 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
1da177e4 | 252 | if (fle) { |
0542b69e | 253 | fle->net = net; |
1da177e4 LT |
254 | fle->family = family; |
255 | fle->dir = dir; | |
aa1c366e | 256 | memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); |
1da177e4 | 257 | fle->object = NULL; |
8e479560 | 258 | hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); |
d7997fe1 | 259 | fcp->hash_count++; |
1da177e4 | 260 | } |
fe1a5f03 TT |
261 | } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) { |
262 | flo = fle->object; | |
263 | if (!flo) | |
264 | goto ret_object; | |
265 | flo = flo->ops->get(flo); | |
266 | if (flo) | |
267 | goto ret_object; | |
268 | } else if (fle->object) { | |
269 | flo = fle->object; | |
270 | flo->ops->delete(flo); | |
271 | fle->object = NULL; | |
1da177e4 LT |
272 | } |
273 | ||
274 | nocache: | |
fe1a5f03 TT |
275 | flo = NULL; |
276 | if (fle) { | |
277 | flo = fle->object; | |
278 | fle->object = NULL; | |
279 | } | |
280 | flo = resolver(net, key, family, dir, flo, ctx); | |
281 | if (fle) { | |
282 | fle->genid = atomic_read(&flow_cache_genid); | |
283 | if (!IS_ERR(flo)) | |
284 | fle->object = flo; | |
285 | else | |
286 | fle->genid--; | |
287 | } else { | |
8fbcec24 | 288 | if (!IS_ERR_OR_NULL(flo)) |
fe1a5f03 | 289 | flo->ops->delete(flo); |
1da177e4 | 290 | } |
fe1a5f03 TT |
291 | ret_object: |
292 | local_bh_enable(); | |
293 | return flo; | |
1da177e4 | 294 | } |
9e34a5b5 | 295 | EXPORT_SYMBOL(flow_cache_lookup); |
1da177e4 LT |
296 | |
297 | static void flow_cache_flush_tasklet(unsigned long data) | |
298 | { | |
299 | struct flow_flush_info *info = (void *)data; | |
d7997fe1 TT |
300 | struct flow_cache *fc = info->cache; |
301 | struct flow_cache_percpu *fcp; | |
8e479560 | 302 | struct flow_cache_entry *fle; |
b67bfe0d | 303 | struct hlist_node *tmp; |
8e479560 TT |
304 | LIST_HEAD(gc_list); |
305 | int i, deleted = 0; | |
1da177e4 | 306 | |
7a9b2d59 | 307 | fcp = this_cpu_ptr(fc->percpu); |
d7997fe1 | 308 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
b67bfe0d | 309 | hlist_for_each_entry_safe(fle, tmp, |
8e479560 | 310 | &fcp->hash_table[i], u.hlist) { |
fe1a5f03 | 311 | if (flow_entry_valid(fle)) |
1da177e4 LT |
312 | continue; |
313 | ||
8e479560 TT |
314 | deleted++; |
315 | hlist_del(&fle->u.hlist); | |
316 | list_add_tail(&fle->u.gc_list, &gc_list); | |
1da177e4 LT |
317 | } |
318 | } | |
319 | ||
8e479560 TT |
320 | flow_cache_queue_garbage(fcp, deleted, &gc_list); |
321 | ||
1da177e4 LT |
322 | if (atomic_dec_and_test(&info->cpuleft)) |
323 | complete(&info->completion); | |
324 | } | |
325 | ||
1da177e4 LT |
326 | static void flow_cache_flush_per_cpu(void *data) |
327 | { | |
328 | struct flow_flush_info *info = data; | |
1da177e4 LT |
329 | struct tasklet_struct *tasklet; |
330 | ||
1f743b07 | 331 | tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); |
1da177e4 LT |
332 | tasklet->data = (unsigned long)info; |
333 | tasklet_schedule(tasklet); | |
334 | } | |
335 | ||
336 | void flow_cache_flush(void) | |
337 | { | |
338 | struct flow_flush_info info; | |
4a3e2f71 | 339 | static DEFINE_MUTEX(flow_flush_sem); |
1da177e4 LT |
340 | |
341 | /* Don't want cpus going down or up during this. */ | |
86ef5c9a | 342 | get_online_cpus(); |
4a3e2f71 | 343 | mutex_lock(&flow_flush_sem); |
d7997fe1 | 344 | info.cache = &flow_cache_global; |
1da177e4 LT |
345 | atomic_set(&info.cpuleft, num_online_cpus()); |
346 | init_completion(&info.completion); | |
347 | ||
348 | local_bh_disable(); | |
8691e5a8 | 349 | smp_call_function(flow_cache_flush_per_cpu, &info, 0); |
1da177e4 LT |
350 | flow_cache_flush_tasklet((unsigned long)&info); |
351 | local_bh_enable(); | |
352 | ||
353 | wait_for_completion(&info.completion); | |
4a3e2f71 | 354 | mutex_unlock(&flow_flush_sem); |
86ef5c9a | 355 | put_online_cpus(); |
1da177e4 LT |
356 | } |
357 | ||
c0ed1c14 SK |
358 | static void flow_cache_flush_task(struct work_struct *work) |
359 | { | |
360 | flow_cache_flush(); | |
361 | } | |
362 | ||
363 | static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); | |
364 | ||
365 | void flow_cache_flush_deferred(void) | |
366 | { | |
367 | schedule_work(&flow_cache_flush_work); | |
368 | } | |
369 | ||
83b6b1f5 | 370 | static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) |
1da177e4 | 371 | { |
83b6b1f5 ED |
372 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
373 | size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); | |
d7997fe1 | 374 | |
83b6b1f5 ED |
375 | if (!fcp->hash_table) { |
376 | fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); | |
377 | if (!fcp->hash_table) { | |
378 | pr_err("NET: failed to allocate flow cache sz %zu\n", sz); | |
379 | return -ENOMEM; | |
380 | } | |
381 | fcp->hash_rnd_recalc = 1; | |
382 | fcp->hash_count = 0; | |
383 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); | |
384 | } | |
385 | return 0; | |
1da177e4 LT |
386 | } |
387 | ||
83b6b1f5 | 388 | static int __cpuinit flow_cache_cpu(struct notifier_block *nfb, |
1da177e4 LT |
389 | unsigned long action, |
390 | void *hcpu) | |
391 | { | |
d7997fe1 | 392 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); |
83b6b1f5 | 393 | int res, cpu = (unsigned long) hcpu; |
d7997fe1 TT |
394 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
395 | ||
83b6b1f5 ED |
396 | switch (action) { |
397 | case CPU_UP_PREPARE: | |
398 | case CPU_UP_PREPARE_FROZEN: | |
399 | res = flow_cache_cpu_prepare(fc, cpu); | |
400 | if (res) | |
401 | return notifier_from_errno(res); | |
402 | break; | |
403 | case CPU_DEAD: | |
404 | case CPU_DEAD_FROZEN: | |
d7997fe1 | 405 | __flow_cache_shrink(fc, fcp, 0); |
83b6b1f5 ED |
406 | break; |
407 | } | |
1da177e4 LT |
408 | return NOTIFY_OK; |
409 | } | |
1da177e4 | 410 | |
83b6b1f5 | 411 | static int __init flow_cache_init(struct flow_cache *fc) |
1da177e4 LT |
412 | { |
413 | int i; | |
414 | ||
d7997fe1 TT |
415 | fc->hash_shift = 10; |
416 | fc->low_watermark = 2 * flow_cache_hash_size(fc); | |
417 | fc->high_watermark = 4 * flow_cache_hash_size(fc); | |
418 | ||
d7997fe1 | 419 | fc->percpu = alloc_percpu(struct flow_cache_percpu); |
83b6b1f5 ED |
420 | if (!fc->percpu) |
421 | return -ENOMEM; | |
1da177e4 | 422 | |
83b6b1f5 ED |
423 | for_each_online_cpu(i) { |
424 | if (flow_cache_cpu_prepare(fc, i)) | |
6ccc3abd | 425 | goto err; |
83b6b1f5 | 426 | } |
d7997fe1 TT |
427 | fc->hotcpu_notifier = (struct notifier_block){ |
428 | .notifier_call = flow_cache_cpu, | |
429 | }; | |
430 | register_hotcpu_notifier(&fc->hotcpu_notifier); | |
1da177e4 | 431 | |
83b6b1f5 ED |
432 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, |
433 | (unsigned long) fc); | |
434 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | |
435 | add_timer(&fc->rnd_timer); | |
436 | ||
1da177e4 | 437 | return 0; |
6ccc3abd | 438 | |
439 | err: | |
440 | for_each_possible_cpu(i) { | |
441 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); | |
442 | kfree(fcp->hash_table); | |
443 | fcp->hash_table = NULL; | |
444 | } | |
445 | ||
446 | free_percpu(fc->percpu); | |
447 | fc->percpu = NULL; | |
448 | ||
449 | return -ENOMEM; | |
1da177e4 LT |
450 | } |
451 | ||
d7997fe1 TT |
452 | static int __init flow_cache_init_global(void) |
453 | { | |
454 | flow_cachep = kmem_cache_create("flow_cache", | |
455 | sizeof(struct flow_cache_entry), | |
456 | 0, SLAB_PANIC, NULL); | |
457 | ||
458 | return flow_cache_init(&flow_cache_global); | |
459 | } | |
460 | ||
461 | module_init(flow_cache_init_global); |