Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* flow.c: Generic flow cache. |
2 | * | |
3 | * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru) | |
4 | * Copyright (C) 2003 David S. Miller (davem@redhat.com) | |
5 | */ | |
6 | ||
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/jhash.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/completion.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/notifier.h> | |
21 | #include <linux/cpu.h> | |
22 | #include <linux/cpumask.h> | |
4a3e2f71 | 23 | #include <linux/mutex.h> |
1da177e4 | 24 | #include <net/flow.h> |
60063497 | 25 | #include <linux/atomic.h> |
df71837d | 26 | #include <linux/security.h> |
ca925cf1 | 27 | #include <net/net_namespace.h> |
1da177e4 LT |
28 | |
29 | struct flow_cache_entry { | |
8e479560 TT |
30 | union { |
31 | struct hlist_node hlist; | |
32 | struct list_head gc_list; | |
33 | } u; | |
0542b69e | 34 | struct net *net; |
fe1a5f03 TT |
35 | u16 family; |
36 | u8 dir; | |
37 | u32 genid; | |
38 | struct flowi key; | |
39 | struct flow_cache_object *object; | |
1da177e4 LT |
40 | }; |
41 | ||
1da177e4 | 42 | struct flow_flush_info { |
fe1a5f03 | 43 | struct flow_cache *cache; |
d7997fe1 TT |
44 | atomic_t cpuleft; |
45 | struct completion completion; | |
1da177e4 | 46 | }; |
1da177e4 | 47 | |
d7997fe1 TT |
48 | #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) |
49 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
1da177e4 LT |
50 | |
51 | static void flow_cache_new_hashrnd(unsigned long arg) | |
52 | { | |
d7997fe1 | 53 | struct flow_cache *fc = (void *) arg; |
1da177e4 LT |
54 | int i; |
55 | ||
6f912042 | 56 | for_each_possible_cpu(i) |
d7997fe1 | 57 | per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; |
1da177e4 | 58 | |
d7997fe1 TT |
59 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
60 | add_timer(&fc->rnd_timer); | |
1da177e4 LT |
61 | } |
62 | ||
ca925cf1 FD |
63 | static int flow_entry_valid(struct flow_cache_entry *fle, |
64 | struct netns_xfrm *xfrm) | |
fe1a5f03 | 65 | { |
ca925cf1 | 66 | if (atomic_read(&xfrm->flow_cache_genid) != fle->genid) |
fe1a5f03 TT |
67 | return 0; |
68 | if (fle->object && !fle->object->ops->check(fle->object)) | |
69 | return 0; | |
70 | return 1; | |
71 | } | |
72 | ||
ca925cf1 FD |
73 | static void flow_entry_kill(struct flow_cache_entry *fle, |
74 | struct netns_xfrm *xfrm) | |
134b0fc5 JM |
75 | { |
76 | if (fle->object) | |
fe1a5f03 | 77 | fle->object->ops->delete(fle->object); |
ca925cf1 | 78 | kmem_cache_free(xfrm->flow_cachep, fle); |
8e479560 TT |
79 | } |
80 | ||
81 | static void flow_cache_gc_task(struct work_struct *work) | |
82 | { | |
83 | struct list_head gc_list; | |
84 | struct flow_cache_entry *fce, *n; | |
ca925cf1 FD |
85 | struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, |
86 | flow_cache_gc_work); | |
8e479560 TT |
87 | |
88 | INIT_LIST_HEAD(&gc_list); | |
ca925cf1 FD |
89 | spin_lock_bh(&xfrm->flow_cache_gc_lock); |
90 | list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list); | |
91 | spin_unlock_bh(&xfrm->flow_cache_gc_lock); | |
8e479560 TT |
92 | |
93 | list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) | |
ca925cf1 | 94 | flow_entry_kill(fce, xfrm); |
8e479560 | 95 | } |
8e479560 TT |
96 | |
97 | static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, | |
ca925cf1 FD |
98 | int deleted, struct list_head *gc_list, |
99 | struct netns_xfrm *xfrm) | |
8e479560 TT |
100 | { |
101 | if (deleted) { | |
102 | fcp->hash_count -= deleted; | |
ca925cf1 FD |
103 | spin_lock_bh(&xfrm->flow_cache_gc_lock); |
104 | list_splice_tail(gc_list, &xfrm->flow_cache_gc_list); | |
105 | spin_unlock_bh(&xfrm->flow_cache_gc_lock); | |
106 | schedule_work(&xfrm->flow_cache_gc_work); | |
8e479560 | 107 | } |
134b0fc5 JM |
108 | } |
109 | ||
d7997fe1 TT |
110 | static void __flow_cache_shrink(struct flow_cache *fc, |
111 | struct flow_cache_percpu *fcp, | |
112 | int shrink_to) | |
1da177e4 | 113 | { |
8e479560 | 114 | struct flow_cache_entry *fle; |
b67bfe0d | 115 | struct hlist_node *tmp; |
8e479560 TT |
116 | LIST_HEAD(gc_list); |
117 | int i, deleted = 0; | |
ca925cf1 FD |
118 | struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, |
119 | flow_cache_global); | |
1da177e4 | 120 | |
d7997fe1 | 121 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
fe1a5f03 | 122 | int saved = 0; |
1da177e4 | 123 | |
b67bfe0d | 124 | hlist_for_each_entry_safe(fle, tmp, |
8e479560 | 125 | &fcp->hash_table[i], u.hlist) { |
fe1a5f03 | 126 | if (saved < shrink_to && |
ca925cf1 | 127 | flow_entry_valid(fle, xfrm)) { |
fe1a5f03 | 128 | saved++; |
fe1a5f03 | 129 | } else { |
8e479560 TT |
130 | deleted++; |
131 | hlist_del(&fle->u.hlist); | |
132 | list_add_tail(&fle->u.gc_list, &gc_list); | |
fe1a5f03 | 133 | } |
1da177e4 LT |
134 | } |
135 | } | |
8e479560 | 136 | |
ca925cf1 | 137 | flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); |
1da177e4 LT |
138 | } |
139 | ||
d7997fe1 TT |
140 | static void flow_cache_shrink(struct flow_cache *fc, |
141 | struct flow_cache_percpu *fcp) | |
1da177e4 | 142 | { |
d7997fe1 | 143 | int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); |
1da177e4 | 144 | |
d7997fe1 | 145 | __flow_cache_shrink(fc, fcp, shrink_to); |
1da177e4 LT |
146 | } |
147 | ||
d7997fe1 TT |
148 | static void flow_new_hash_rnd(struct flow_cache *fc, |
149 | struct flow_cache_percpu *fcp) | |
1da177e4 | 150 | { |
d7997fe1 TT |
151 | get_random_bytes(&fcp->hash_rnd, sizeof(u32)); |
152 | fcp->hash_rnd_recalc = 0; | |
153 | __flow_cache_shrink(fc, fcp, 0); | |
1da177e4 LT |
154 | } |
155 | ||
d7997fe1 TT |
156 | static u32 flow_hash_code(struct flow_cache *fc, |
157 | struct flow_cache_percpu *fcp, | |
aa1c366e | 158 | const struct flowi *key, |
159 | size_t keysize) | |
1da177e4 | 160 | { |
dee9f4bc | 161 | const u32 *k = (const u32 *) key; |
aa1c366e | 162 | const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); |
1da177e4 | 163 | |
aa1c366e | 164 | return jhash2(k, length, fcp->hash_rnd) |
a02cec21 | 165 | & (flow_cache_hash_size(fc) - 1); |
1da177e4 LT |
166 | } |
167 | ||
1da177e4 | 168 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
aa1c366e | 169 | * important assumptions that we can here, such as alignment. |
1da177e4 | 170 | */ |
aa1c366e | 171 | static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, |
172 | size_t keysize) | |
1da177e4 | 173 | { |
dee9f4bc | 174 | const flow_compare_t *k1, *k1_lim, *k2; |
1da177e4 | 175 | |
dee9f4bc | 176 | k1 = (const flow_compare_t *) key1; |
aa1c366e | 177 | k1_lim = k1 + keysize; |
1da177e4 | 178 | |
dee9f4bc | 179 | k2 = (const flow_compare_t *) key2; |
1da177e4 LT |
180 | |
181 | do { | |
182 | if (*k1++ != *k2++) | |
183 | return 1; | |
184 | } while (k1 < k1_lim); | |
185 | ||
186 | return 0; | |
187 | } | |
188 | ||
fe1a5f03 | 189 | struct flow_cache_object * |
dee9f4bc | 190 | flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, |
fe1a5f03 | 191 | flow_resolve_t resolver, void *ctx) |
1da177e4 | 192 | { |
ca925cf1 | 193 | struct flow_cache *fc = &net->xfrm.flow_cache_global; |
d7997fe1 | 194 | struct flow_cache_percpu *fcp; |
8e479560 | 195 | struct flow_cache_entry *fle, *tfle; |
fe1a5f03 | 196 | struct flow_cache_object *flo; |
aa1c366e | 197 | size_t keysize; |
1da177e4 | 198 | unsigned int hash; |
1da177e4 LT |
199 | |
200 | local_bh_disable(); | |
7a9b2d59 | 201 | fcp = this_cpu_ptr(fc->percpu); |
1da177e4 LT |
202 | |
203 | fle = NULL; | |
fe1a5f03 | 204 | flo = NULL; |
aa1c366e | 205 | |
206 | keysize = flow_key_size(family); | |
207 | if (!keysize) | |
208 | goto nocache; | |
209 | ||
1da177e4 LT |
210 | /* Packet really early in init? Making flow_cache_init a |
211 | * pre-smp initcall would solve this. --RR */ | |
d7997fe1 | 212 | if (!fcp->hash_table) |
1da177e4 LT |
213 | goto nocache; |
214 | ||
d7997fe1 TT |
215 | if (fcp->hash_rnd_recalc) |
216 | flow_new_hash_rnd(fc, fcp); | |
1da177e4 | 217 | |
aa1c366e | 218 | hash = flow_hash_code(fc, fcp, key, keysize); |
b67bfe0d | 219 | hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) { |
0542b69e | 220 | if (tfle->net == net && |
221 | tfle->family == family && | |
8e479560 | 222 | tfle->dir == dir && |
aa1c366e | 223 | flow_key_compare(key, &tfle->key, keysize) == 0) { |
8e479560 | 224 | fle = tfle; |
1da177e4 | 225 | break; |
8e479560 | 226 | } |
1da177e4 LT |
227 | } |
228 | ||
fe1a5f03 | 229 | if (unlikely(!fle)) { |
d7997fe1 TT |
230 | if (fcp->hash_count > fc->high_watermark) |
231 | flow_cache_shrink(fc, fcp); | |
1da177e4 | 232 | |
ca925cf1 | 233 | fle = kmem_cache_alloc(net->xfrm.flow_cachep, GFP_ATOMIC); |
1da177e4 | 234 | if (fle) { |
0542b69e | 235 | fle->net = net; |
1da177e4 LT |
236 | fle->family = family; |
237 | fle->dir = dir; | |
aa1c366e | 238 | memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); |
1da177e4 | 239 | fle->object = NULL; |
8e479560 | 240 | hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); |
d7997fe1 | 241 | fcp->hash_count++; |
1da177e4 | 242 | } |
ca925cf1 | 243 | } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) { |
fe1a5f03 TT |
244 | flo = fle->object; |
245 | if (!flo) | |
246 | goto ret_object; | |
247 | flo = flo->ops->get(flo); | |
248 | if (flo) | |
249 | goto ret_object; | |
250 | } else if (fle->object) { | |
251 | flo = fle->object; | |
252 | flo->ops->delete(flo); | |
253 | fle->object = NULL; | |
1da177e4 LT |
254 | } |
255 | ||
256 | nocache: | |
fe1a5f03 TT |
257 | flo = NULL; |
258 | if (fle) { | |
259 | flo = fle->object; | |
260 | fle->object = NULL; | |
261 | } | |
262 | flo = resolver(net, key, family, dir, flo, ctx); | |
263 | if (fle) { | |
ca925cf1 | 264 | fle->genid = atomic_read(&net->xfrm.flow_cache_genid); |
fe1a5f03 TT |
265 | if (!IS_ERR(flo)) |
266 | fle->object = flo; | |
267 | else | |
268 | fle->genid--; | |
269 | } else { | |
8fbcec24 | 270 | if (!IS_ERR_OR_NULL(flo)) |
fe1a5f03 | 271 | flo->ops->delete(flo); |
1da177e4 | 272 | } |
fe1a5f03 TT |
273 | ret_object: |
274 | local_bh_enable(); | |
275 | return flo; | |
1da177e4 | 276 | } |
9e34a5b5 | 277 | EXPORT_SYMBOL(flow_cache_lookup); |
1da177e4 LT |
278 | |
279 | static void flow_cache_flush_tasklet(unsigned long data) | |
280 | { | |
281 | struct flow_flush_info *info = (void *)data; | |
d7997fe1 TT |
282 | struct flow_cache *fc = info->cache; |
283 | struct flow_cache_percpu *fcp; | |
8e479560 | 284 | struct flow_cache_entry *fle; |
b67bfe0d | 285 | struct hlist_node *tmp; |
8e479560 TT |
286 | LIST_HEAD(gc_list); |
287 | int i, deleted = 0; | |
ca925cf1 FD |
288 | struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, |
289 | flow_cache_global); | |
1da177e4 | 290 | |
7a9b2d59 | 291 | fcp = this_cpu_ptr(fc->percpu); |
d7997fe1 | 292 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
b67bfe0d | 293 | hlist_for_each_entry_safe(fle, tmp, |
8e479560 | 294 | &fcp->hash_table[i], u.hlist) { |
ca925cf1 | 295 | if (flow_entry_valid(fle, xfrm)) |
1da177e4 LT |
296 | continue; |
297 | ||
8e479560 TT |
298 | deleted++; |
299 | hlist_del(&fle->u.hlist); | |
300 | list_add_tail(&fle->u.gc_list, &gc_list); | |
1da177e4 LT |
301 | } |
302 | } | |
303 | ||
ca925cf1 | 304 | flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); |
8e479560 | 305 | |
1da177e4 LT |
306 | if (atomic_dec_and_test(&info->cpuleft)) |
307 | complete(&info->completion); | |
308 | } | |
309 | ||
8fdc929f CM |
310 | /* |
311 | * Return whether a cpu needs flushing. Conservatively, we assume | |
312 | * the presence of any entries means the core may require flushing, | |
313 | * since the flow_cache_ops.check() function may assume it's running | |
314 | * on the same core as the per-cpu cache component. | |
315 | */ | |
316 | static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) | |
317 | { | |
318 | struct flow_cache_percpu *fcp; | |
319 | int i; | |
320 | ||
27815032 | 321 | fcp = per_cpu_ptr(fc->percpu, cpu); |
8fdc929f CM |
322 | for (i = 0; i < flow_cache_hash_size(fc); i++) |
323 | if (!hlist_empty(&fcp->hash_table[i])) | |
324 | return 0; | |
325 | return 1; | |
326 | } | |
327 | ||
1da177e4 LT |
328 | static void flow_cache_flush_per_cpu(void *data) |
329 | { | |
330 | struct flow_flush_info *info = data; | |
1da177e4 LT |
331 | struct tasklet_struct *tasklet; |
332 | ||
50eab050 | 333 | tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; |
1da177e4 LT |
334 | tasklet->data = (unsigned long)info; |
335 | tasklet_schedule(tasklet); | |
336 | } | |
337 | ||
ca925cf1 | 338 | void flow_cache_flush(struct net *net) |
1da177e4 LT |
339 | { |
340 | struct flow_flush_info info; | |
8fdc929f CM |
341 | cpumask_var_t mask; |
342 | int i, self; | |
343 | ||
344 | /* Track which cpus need flushing to avoid disturbing all cores. */ | |
345 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | |
346 | return; | |
347 | cpumask_clear(mask); | |
1da177e4 LT |
348 | |
349 | /* Don't want cpus going down or up during this. */ | |
86ef5c9a | 350 | get_online_cpus(); |
ca925cf1 FD |
351 | mutex_lock(&net->xfrm.flow_flush_sem); |
352 | info.cache = &net->xfrm.flow_cache_global; | |
8fdc929f CM |
353 | for_each_online_cpu(i) |
354 | if (!flow_cache_percpu_empty(info.cache, i)) | |
355 | cpumask_set_cpu(i, mask); | |
356 | atomic_set(&info.cpuleft, cpumask_weight(mask)); | |
357 | if (atomic_read(&info.cpuleft) == 0) | |
358 | goto done; | |
359 | ||
1da177e4 LT |
360 | init_completion(&info.completion); |
361 | ||
362 | local_bh_disable(); | |
8fdc929f CM |
363 | self = cpumask_test_and_clear_cpu(smp_processor_id(), mask); |
364 | on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0); | |
365 | if (self) | |
366 | flow_cache_flush_tasklet((unsigned long)&info); | |
1da177e4 LT |
367 | local_bh_enable(); |
368 | ||
369 | wait_for_completion(&info.completion); | |
8fdc929f CM |
370 | |
371 | done: | |
ca925cf1 | 372 | mutex_unlock(&net->xfrm.flow_flush_sem); |
86ef5c9a | 373 | put_online_cpus(); |
8fdc929f | 374 | free_cpumask_var(mask); |
1da177e4 LT |
375 | } |
376 | ||
c0ed1c14 SK |
377 | static void flow_cache_flush_task(struct work_struct *work) |
378 | { | |
ca925cf1 FD |
379 | struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, |
380 | flow_cache_gc_work); | |
381 | struct net *net = container_of(xfrm, struct net, xfrm); | |
c0ed1c14 | 382 | |
ca925cf1 FD |
383 | flow_cache_flush(net); |
384 | } | |
c0ed1c14 | 385 | |
ca925cf1 | 386 | void flow_cache_flush_deferred(struct net *net) |
c0ed1c14 | 387 | { |
ca925cf1 | 388 | schedule_work(&net->xfrm.flow_cache_flush_work); |
c0ed1c14 SK |
389 | } |
390 | ||
013dbb32 | 391 | static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) |
1da177e4 | 392 | { |
83b6b1f5 ED |
393 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
394 | size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); | |
d7997fe1 | 395 | |
83b6b1f5 ED |
396 | if (!fcp->hash_table) { |
397 | fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); | |
398 | if (!fcp->hash_table) { | |
399 | pr_err("NET: failed to allocate flow cache sz %zu\n", sz); | |
400 | return -ENOMEM; | |
401 | } | |
402 | fcp->hash_rnd_recalc = 1; | |
403 | fcp->hash_count = 0; | |
404 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); | |
405 | } | |
406 | return 0; | |
1da177e4 LT |
407 | } |
408 | ||
013dbb32 | 409 | static int flow_cache_cpu(struct notifier_block *nfb, |
1da177e4 LT |
410 | unsigned long action, |
411 | void *hcpu) | |
412 | { | |
ca925cf1 FD |
413 | struct flow_cache *fc = container_of(nfb, struct flow_cache, |
414 | hotcpu_notifier); | |
83b6b1f5 | 415 | int res, cpu = (unsigned long) hcpu; |
d7997fe1 TT |
416 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
417 | ||
83b6b1f5 ED |
418 | switch (action) { |
419 | case CPU_UP_PREPARE: | |
420 | case CPU_UP_PREPARE_FROZEN: | |
421 | res = flow_cache_cpu_prepare(fc, cpu); | |
422 | if (res) | |
423 | return notifier_from_errno(res); | |
424 | break; | |
425 | case CPU_DEAD: | |
426 | case CPU_DEAD_FROZEN: | |
d7997fe1 | 427 | __flow_cache_shrink(fc, fcp, 0); |
83b6b1f5 ED |
428 | break; |
429 | } | |
1da177e4 LT |
430 | return NOTIFY_OK; |
431 | } | |
1da177e4 | 432 | |
ca925cf1 | 433 | int flow_cache_init(struct net *net) |
1da177e4 LT |
434 | { |
435 | int i; | |
ca925cf1 FD |
436 | struct flow_cache *fc = &net->xfrm.flow_cache_global; |
437 | ||
438 | /* Initialize per-net flow cache global variables here */ | |
439 | net->xfrm.flow_cachep = kmem_cache_create("flow_cache", | |
440 | sizeof(struct flow_cache_entry), | |
441 | 0, SLAB_PANIC, NULL); | |
442 | spin_lock_init(&net->xfrm.flow_cache_gc_lock); | |
443 | INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list); | |
444 | INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); | |
445 | INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); | |
446 | mutex_init(&net->xfrm.flow_flush_sem); | |
1da177e4 | 447 | |
d7997fe1 TT |
448 | fc->hash_shift = 10; |
449 | fc->low_watermark = 2 * flow_cache_hash_size(fc); | |
450 | fc->high_watermark = 4 * flow_cache_hash_size(fc); | |
451 | ||
d7997fe1 | 452 | fc->percpu = alloc_percpu(struct flow_cache_percpu); |
83b6b1f5 ED |
453 | if (!fc->percpu) |
454 | return -ENOMEM; | |
1da177e4 | 455 | |
83b6b1f5 ED |
456 | for_each_online_cpu(i) { |
457 | if (flow_cache_cpu_prepare(fc, i)) | |
6ccc3abd | 458 | goto err; |
83b6b1f5 | 459 | } |
d7997fe1 TT |
460 | fc->hotcpu_notifier = (struct notifier_block){ |
461 | .notifier_call = flow_cache_cpu, | |
462 | }; | |
463 | register_hotcpu_notifier(&fc->hotcpu_notifier); | |
1da177e4 | 464 | |
83b6b1f5 ED |
465 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, |
466 | (unsigned long) fc); | |
467 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | |
468 | add_timer(&fc->rnd_timer); | |
469 | ||
1da177e4 | 470 | return 0; |
6ccc3abd | 471 | |
472 | err: | |
473 | for_each_possible_cpu(i) { | |
474 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); | |
475 | kfree(fcp->hash_table); | |
476 | fcp->hash_table = NULL; | |
477 | } | |
478 | ||
479 | free_percpu(fc->percpu); | |
480 | fc->percpu = NULL; | |
481 | ||
482 | return -ENOMEM; | |
1da177e4 | 483 | } |
ca925cf1 | 484 | EXPORT_SYMBOL(flow_cache_init); |