Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* flow.c: Generic flow cache. |
2 | * | |
3 | * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru) | |
4 | * Copyright (C) 2003 David S. Miller (davem@redhat.com) | |
5 | */ | |
6 | ||
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/jhash.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/completion.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/notifier.h> | |
21 | #include <linux/cpu.h> | |
22 | #include <linux/cpumask.h> | |
4a3e2f71 | 23 | #include <linux/mutex.h> |
1da177e4 LT |
24 | #include <net/flow.h> |
25 | #include <asm/atomic.h> | |
df71837d | 26 | #include <linux/security.h> |
1da177e4 LT |
27 | |
28 | struct flow_cache_entry { | |
29 | struct flow_cache_entry *next; | |
30 | u16 family; | |
31 | u8 dir; | |
1da177e4 | 32 | u32 genid; |
dd5a1843 | 33 | struct flowi key; |
1da177e4 LT |
34 | void *object; |
35 | atomic_t *object_ref; | |
36 | }; | |
37 | ||
d7997fe1 TT |
38 | struct flow_cache_percpu { |
39 | struct flow_cache_entry ** hash_table; | |
40 | int hash_count; | |
41 | u32 hash_rnd; | |
42 | int hash_rnd_recalc; | |
43 | struct tasklet_struct flush_tasklet; | |
5f58a5c8 | 44 | }; |
1da177e4 LT |
45 | |
46 | struct flow_flush_info { | |
d7997fe1 TT |
47 | struct flow_cache * cache; |
48 | atomic_t cpuleft; | |
49 | struct completion completion; | |
1da177e4 | 50 | }; |
1da177e4 | 51 | |
d7997fe1 TT |
52 | struct flow_cache { |
53 | u32 hash_shift; | |
54 | unsigned long order; | |
55 | struct flow_cache_percpu * percpu; | |
56 | struct notifier_block hotcpu_notifier; | |
57 | int low_watermark; | |
58 | int high_watermark; | |
59 | struct timer_list rnd_timer; | |
60 | }; | |
61 | ||
62 | atomic_t flow_cache_genid = ATOMIC_INIT(0); | |
63 | static struct flow_cache flow_cache_global; | |
64 | static struct kmem_cache *flow_cachep; | |
65 | ||
66 | #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift) | |
67 | #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) | |
1da177e4 LT |
68 | |
69 | static void flow_cache_new_hashrnd(unsigned long arg) | |
70 | { | |
d7997fe1 | 71 | struct flow_cache *fc = (void *) arg; |
1da177e4 LT |
72 | int i; |
73 | ||
6f912042 | 74 | for_each_possible_cpu(i) |
d7997fe1 | 75 | per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; |
1da177e4 | 76 | |
d7997fe1 TT |
77 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; |
78 | add_timer(&fc->rnd_timer); | |
1da177e4 LT |
79 | } |
80 | ||
d7997fe1 TT |
81 | static void flow_entry_kill(struct flow_cache *fc, |
82 | struct flow_cache_percpu *fcp, | |
83 | struct flow_cache_entry *fle) | |
134b0fc5 JM |
84 | { |
85 | if (fle->object) | |
86 | atomic_dec(fle->object_ref); | |
87 | kmem_cache_free(flow_cachep, fle); | |
d7997fe1 | 88 | fcp->hash_count--; |
134b0fc5 JM |
89 | } |
90 | ||
d7997fe1 TT |
91 | static void __flow_cache_shrink(struct flow_cache *fc, |
92 | struct flow_cache_percpu *fcp, | |
93 | int shrink_to) | |
1da177e4 LT |
94 | { |
95 | struct flow_cache_entry *fle, **flp; | |
96 | int i; | |
97 | ||
d7997fe1 | 98 | for (i = 0; i < flow_cache_hash_size(fc); i++) { |
1da177e4 LT |
99 | int k = 0; |
100 | ||
d7997fe1 | 101 | flp = &fcp->hash_table[i]; |
1da177e4 LT |
102 | while ((fle = *flp) != NULL && k < shrink_to) { |
103 | k++; | |
104 | flp = &fle->next; | |
105 | } | |
106 | while ((fle = *flp) != NULL) { | |
107 | *flp = fle->next; | |
d7997fe1 | 108 | flow_entry_kill(fc, fcp, fle); |
1da177e4 LT |
109 | } |
110 | } | |
111 | } | |
112 | ||
d7997fe1 TT |
113 | static void flow_cache_shrink(struct flow_cache *fc, |
114 | struct flow_cache_percpu *fcp) | |
1da177e4 | 115 | { |
d7997fe1 | 116 | int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); |
1da177e4 | 117 | |
d7997fe1 | 118 | __flow_cache_shrink(fc, fcp, shrink_to); |
1da177e4 LT |
119 | } |
120 | ||
d7997fe1 TT |
121 | static void flow_new_hash_rnd(struct flow_cache *fc, |
122 | struct flow_cache_percpu *fcp) | |
1da177e4 | 123 | { |
d7997fe1 TT |
124 | get_random_bytes(&fcp->hash_rnd, sizeof(u32)); |
125 | fcp->hash_rnd_recalc = 0; | |
126 | __flow_cache_shrink(fc, fcp, 0); | |
1da177e4 LT |
127 | } |
128 | ||
d7997fe1 TT |
129 | static u32 flow_hash_code(struct flow_cache *fc, |
130 | struct flow_cache_percpu *fcp, | |
131 | struct flowi *key) | |
1da177e4 LT |
132 | { |
133 | u32 *k = (u32 *) key; | |
134 | ||
d7997fe1 TT |
135 | return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) |
136 | & (flow_cache_hash_size(fc) - 1)); | |
1da177e4 LT |
137 | } |
138 | ||
139 | #if (BITS_PER_LONG == 64) | |
140 | typedef u64 flow_compare_t; | |
141 | #else | |
142 | typedef u32 flow_compare_t; | |
143 | #endif | |
144 | ||
1da177e4 LT |
145 | /* I hear what you're saying, use memcmp. But memcmp cannot make |
146 | * important assumptions that we can here, such as alignment and | |
147 | * constant size. | |
148 | */ | |
149 | static int flow_key_compare(struct flowi *key1, struct flowi *key2) | |
150 | { | |
151 | flow_compare_t *k1, *k1_lim, *k2; | |
152 | const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); | |
153 | ||
f0fe91de | 154 | BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); |
1da177e4 LT |
155 | |
156 | k1 = (flow_compare_t *) key1; | |
157 | k1_lim = k1 + n_elem; | |
158 | ||
159 | k2 = (flow_compare_t *) key2; | |
160 | ||
161 | do { | |
162 | if (*k1++ != *k2++) | |
163 | return 1; | |
164 | } while (k1 < k1_lim); | |
165 | ||
166 | return 0; | |
167 | } | |
168 | ||
52479b62 | 169 | void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, |
1da177e4 LT |
170 | flow_resolve_t resolver) |
171 | { | |
d7997fe1 TT |
172 | struct flow_cache *fc = &flow_cache_global; |
173 | struct flow_cache_percpu *fcp; | |
1da177e4 LT |
174 | struct flow_cache_entry *fle, **head; |
175 | unsigned int hash; | |
1da177e4 LT |
176 | |
177 | local_bh_disable(); | |
d7997fe1 | 178 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
1da177e4 LT |
179 | |
180 | fle = NULL; | |
181 | /* Packet really early in init? Making flow_cache_init a | |
182 | * pre-smp initcall would solve this. --RR */ | |
d7997fe1 | 183 | if (!fcp->hash_table) |
1da177e4 LT |
184 | goto nocache; |
185 | ||
d7997fe1 TT |
186 | if (fcp->hash_rnd_recalc) |
187 | flow_new_hash_rnd(fc, fcp); | |
188 | hash = flow_hash_code(fc, fcp, key); | |
1da177e4 | 189 | |
d7997fe1 | 190 | head = &fcp->hash_table[hash]; |
1da177e4 LT |
191 | for (fle = *head; fle; fle = fle->next) { |
192 | if (fle->family == family && | |
193 | fle->dir == dir && | |
194 | flow_key_compare(key, &fle->key) == 0) { | |
195 | if (fle->genid == atomic_read(&flow_cache_genid)) { | |
196 | void *ret = fle->object; | |
197 | ||
198 | if (ret) | |
199 | atomic_inc(fle->object_ref); | |
200 | local_bh_enable(); | |
201 | ||
202 | return ret; | |
203 | } | |
204 | break; | |
205 | } | |
206 | } | |
207 | ||
208 | if (!fle) { | |
d7997fe1 TT |
209 | if (fcp->hash_count > fc->high_watermark) |
210 | flow_cache_shrink(fc, fcp); | |
1da177e4 | 211 | |
54e6ecb2 | 212 | fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); |
1da177e4 LT |
213 | if (fle) { |
214 | fle->next = *head; | |
215 | *head = fle; | |
216 | fle->family = family; | |
217 | fle->dir = dir; | |
218 | memcpy(&fle->key, key, sizeof(*key)); | |
219 | fle->object = NULL; | |
d7997fe1 | 220 | fcp->hash_count++; |
1da177e4 LT |
221 | } |
222 | } | |
223 | ||
224 | nocache: | |
225 | { | |
134b0fc5 | 226 | int err; |
1da177e4 LT |
227 | void *obj; |
228 | atomic_t *obj_ref; | |
229 | ||
52479b62 | 230 | err = resolver(net, key, family, dir, &obj, &obj_ref); |
1da177e4 | 231 | |
e0e8f1c8 HX |
232 | if (fle && !err) { |
233 | fle->genid = atomic_read(&flow_cache_genid); | |
234 | ||
235 | if (fle->object) | |
236 | atomic_dec(fle->object_ref); | |
237 | ||
238 | fle->object = obj; | |
239 | fle->object_ref = obj_ref; | |
240 | if (obj) | |
241 | atomic_inc(fle->object_ref); | |
1da177e4 LT |
242 | } |
243 | local_bh_enable(); | |
244 | ||
134b0fc5 JM |
245 | if (err) |
246 | obj = ERR_PTR(err); | |
1da177e4 LT |
247 | return obj; |
248 | } | |
249 | } | |
250 | ||
251 | static void flow_cache_flush_tasklet(unsigned long data) | |
252 | { | |
253 | struct flow_flush_info *info = (void *)data; | |
d7997fe1 TT |
254 | struct flow_cache *fc = info->cache; |
255 | struct flow_cache_percpu *fcp; | |
1da177e4 | 256 | int i; |
1da177e4 | 257 | |
d7997fe1 TT |
258 | fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); |
259 | for (i = 0; i < flow_cache_hash_size(fc); i++) { | |
1da177e4 LT |
260 | struct flow_cache_entry *fle; |
261 | ||
d7997fe1 | 262 | fle = fcp->hash_table[i]; |
1da177e4 LT |
263 | for (; fle; fle = fle->next) { |
264 | unsigned genid = atomic_read(&flow_cache_genid); | |
265 | ||
266 | if (!fle->object || fle->genid == genid) | |
267 | continue; | |
268 | ||
269 | fle->object = NULL; | |
270 | atomic_dec(fle->object_ref); | |
271 | } | |
272 | } | |
273 | ||
274 | if (atomic_dec_and_test(&info->cpuleft)) | |
275 | complete(&info->completion); | |
276 | } | |
277 | ||
1da177e4 LT |
278 | static void flow_cache_flush_per_cpu(void *data) |
279 | { | |
280 | struct flow_flush_info *info = data; | |
281 | int cpu; | |
282 | struct tasklet_struct *tasklet; | |
283 | ||
284 | cpu = smp_processor_id(); | |
d7997fe1 | 285 | tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet; |
1da177e4 LT |
286 | tasklet->data = (unsigned long)info; |
287 | tasklet_schedule(tasklet); | |
288 | } | |
289 | ||
290 | void flow_cache_flush(void) | |
291 | { | |
292 | struct flow_flush_info info; | |
4a3e2f71 | 293 | static DEFINE_MUTEX(flow_flush_sem); |
1da177e4 LT |
294 | |
295 | /* Don't want cpus going down or up during this. */ | |
86ef5c9a | 296 | get_online_cpus(); |
4a3e2f71 | 297 | mutex_lock(&flow_flush_sem); |
d7997fe1 | 298 | info.cache = &flow_cache_global; |
1da177e4 LT |
299 | atomic_set(&info.cpuleft, num_online_cpus()); |
300 | init_completion(&info.completion); | |
301 | ||
302 | local_bh_disable(); | |
8691e5a8 | 303 | smp_call_function(flow_cache_flush_per_cpu, &info, 0); |
1da177e4 LT |
304 | flow_cache_flush_tasklet((unsigned long)&info); |
305 | local_bh_enable(); | |
306 | ||
307 | wait_for_completion(&info.completion); | |
4a3e2f71 | 308 | mutex_unlock(&flow_flush_sem); |
86ef5c9a | 309 | put_online_cpus(); |
1da177e4 LT |
310 | } |
311 | ||
d7997fe1 TT |
312 | static void __init flow_cache_cpu_prepare(struct flow_cache *fc, |
313 | struct flow_cache_percpu *fcp) | |
1da177e4 | 314 | { |
d7997fe1 TT |
315 | fcp->hash_table = (struct flow_cache_entry **) |
316 | __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order); | |
317 | if (!fcp->hash_table) | |
318 | panic("NET: failed to allocate flow cache order %lu\n", fc->order); | |
319 | ||
320 | fcp->hash_rnd_recalc = 1; | |
321 | fcp->hash_count = 0; | |
322 | tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); | |
1da177e4 LT |
323 | } |
324 | ||
1da177e4 LT |
325 | static int flow_cache_cpu(struct notifier_block *nfb, |
326 | unsigned long action, | |
327 | void *hcpu) | |
328 | { | |
d7997fe1 TT |
329 | struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); |
330 | int cpu = (unsigned long) hcpu; | |
331 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); | |
332 | ||
8bb78442 | 333 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
d7997fe1 | 334 | __flow_cache_shrink(fc, fcp, 0); |
1da177e4 LT |
335 | return NOTIFY_OK; |
336 | } | |
1da177e4 | 337 | |
d7997fe1 | 338 | static int flow_cache_init(struct flow_cache *fc) |
1da177e4 | 339 | { |
d7997fe1 | 340 | unsigned long order; |
1da177e4 LT |
341 | int i; |
342 | ||
d7997fe1 TT |
343 | fc->hash_shift = 10; |
344 | fc->low_watermark = 2 * flow_cache_hash_size(fc); | |
345 | fc->high_watermark = 4 * flow_cache_hash_size(fc); | |
346 | ||
347 | for (order = 0; | |
348 | (PAGE_SIZE << order) < | |
349 | (sizeof(struct flow_cache_entry *)*flow_cache_hash_size(fc)); | |
350 | order++) | |
351 | /* NOTHING */; | |
352 | fc->order = order; | |
353 | fc->percpu = alloc_percpu(struct flow_cache_percpu); | |
1da177e4 | 354 | |
d7997fe1 TT |
355 | setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, |
356 | (unsigned long) fc); | |
357 | fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; | |
358 | add_timer(&fc->rnd_timer); | |
1da177e4 | 359 | |
6f912042 | 360 | for_each_possible_cpu(i) |
d7997fe1 TT |
361 | flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i)); |
362 | ||
363 | fc->hotcpu_notifier = (struct notifier_block){ | |
364 | .notifier_call = flow_cache_cpu, | |
365 | }; | |
366 | register_hotcpu_notifier(&fc->hotcpu_notifier); | |
1da177e4 | 367 | |
1da177e4 LT |
368 | return 0; |
369 | } | |
370 | ||
d7997fe1 TT |
371 | static int __init flow_cache_init_global(void) |
372 | { | |
373 | flow_cachep = kmem_cache_create("flow_cache", | |
374 | sizeof(struct flow_cache_entry), | |
375 | 0, SLAB_PANIC, NULL); | |
376 | ||
377 | return flow_cache_init(&flow_cache_global); | |
378 | } | |
379 | ||
380 | module_init(flow_cache_init_global); | |
1da177e4 LT |
381 | |
382 | EXPORT_SYMBOL(flow_cache_genid); | |
383 | EXPORT_SYMBOL(flow_cache_lookup); |