Merge branch 'sh/pci-express-integration'
[deliverable/linux.git] / fs / mbcache.c
1 /*
2 * linux/fs/mbcache.c
3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
4 */
5
6 /*
7 * Filesystem Meta Information Block Cache (mbcache)
8 *
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
12 *
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
17 *
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
21 * or lists.
22 *
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
27 */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31
32 #include <linux/hash.h>
33 #include <linux/fs.h>
34 #include <linux/mm.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
39
40
41 #ifdef MB_CACHE_DEBUG
42 # define mb_debug(f...) do { \
43 printk(KERN_DEBUG f); \
44 printk("\n"); \
45 } while (0)
46 #define mb_assert(c) do { if (!(c)) \
47 printk(KERN_ERR "assertion " #c " failed\n"); \
48 } while(0)
49 #else
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
52 #endif
53 #define mb_error(f...) do { \
54 printk(KERN_ERR f); \
55 printk("\n"); \
56 } while(0)
57
58 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
59
60 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
61
62 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
63 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
64 MODULE_LICENSE("GPL");
65
66 EXPORT_SYMBOL(mb_cache_create);
67 EXPORT_SYMBOL(mb_cache_shrink);
68 EXPORT_SYMBOL(mb_cache_destroy);
69 EXPORT_SYMBOL(mb_cache_entry_alloc);
70 EXPORT_SYMBOL(mb_cache_entry_insert);
71 EXPORT_SYMBOL(mb_cache_entry_release);
72 EXPORT_SYMBOL(mb_cache_entry_free);
73 EXPORT_SYMBOL(mb_cache_entry_get);
74 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
75 EXPORT_SYMBOL(mb_cache_entry_find_first);
76 EXPORT_SYMBOL(mb_cache_entry_find_next);
77 #endif
78
79 struct mb_cache {
80 struct list_head c_cache_list;
81 const char *c_name;
82 atomic_t c_entry_count;
83 int c_bucket_bits;
84 struct kmem_cache *c_entry_cache;
85 struct list_head *c_block_hash;
86 struct list_head *c_index_hash;
87 };
88
89
90 /*
91 * Global data: list of all mbcache's, lru list, and a spinlock for
92 * accessing cache data structures on SMP machines. The lru list is
93 * global across all mbcaches.
94 */
95
96 static LIST_HEAD(mb_cache_list);
97 static LIST_HEAD(mb_cache_lru_list);
98 static DEFINE_SPINLOCK(mb_cache_spinlock);
99
100 /*
101 * What the mbcache registers as to get shrunk dynamically.
102 */
103
104 static int mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask);
105
106 static struct shrinker mb_cache_shrinker = {
107 .shrink = mb_cache_shrink_fn,
108 .seeks = DEFAULT_SEEKS,
109 };
110
111 static inline int
112 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
113 {
114 return !list_empty(&ce->e_block_list);
115 }
116
117
118 static void
119 __mb_cache_entry_unhash(struct mb_cache_entry *ce)
120 {
121 if (__mb_cache_entry_is_hashed(ce)) {
122 list_del_init(&ce->e_block_list);
123 list_del(&ce->e_index.o_list);
124 }
125 }
126
127
128 static void
129 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
130 {
131 struct mb_cache *cache = ce->e_cache;
132
133 mb_assert(!(ce->e_used || ce->e_queued));
134 kmem_cache_free(cache->c_entry_cache, ce);
135 atomic_dec(&cache->c_entry_count);
136 }
137
138
139 static void
140 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
141 __releases(mb_cache_spinlock)
142 {
143 /* Wake up all processes queuing for this cache entry. */
144 if (ce->e_queued)
145 wake_up_all(&mb_cache_queue);
146 if (ce->e_used >= MB_CACHE_WRITER)
147 ce->e_used -= MB_CACHE_WRITER;
148 ce->e_used--;
149 if (!(ce->e_used || ce->e_queued)) {
150 if (!__mb_cache_entry_is_hashed(ce))
151 goto forget;
152 mb_assert(list_empty(&ce->e_lru_list));
153 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
154 }
155 spin_unlock(&mb_cache_spinlock);
156 return;
157 forget:
158 spin_unlock(&mb_cache_spinlock);
159 __mb_cache_entry_forget(ce, GFP_KERNEL);
160 }
161
162
163 /*
164 * mb_cache_shrink_fn() memory pressure callback
165 *
166 * This function is called by the kernel memory management when memory
167 * gets low.
168 *
169 * @shrink: (ignored)
170 * @nr_to_scan: Number of objects to scan
171 * @gfp_mask: (ignored)
172 *
173 * Returns the number of objects which are present in the cache.
174 */
175 static int
176 mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
177 {
178 LIST_HEAD(free_list);
179 struct mb_cache *cache;
180 struct mb_cache_entry *entry, *tmp;
181 int count = 0;
182
183 mb_debug("trying to free %d entries", nr_to_scan);
184 spin_lock(&mb_cache_spinlock);
185 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
186 struct mb_cache_entry *ce =
187 list_entry(mb_cache_lru_list.next,
188 struct mb_cache_entry, e_lru_list);
189 list_move_tail(&ce->e_lru_list, &free_list);
190 __mb_cache_entry_unhash(ce);
191 }
192 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
193 mb_debug("cache %s (%d)", cache->c_name,
194 atomic_read(&cache->c_entry_count));
195 count += atomic_read(&cache->c_entry_count);
196 }
197 spin_unlock(&mb_cache_spinlock);
198 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
199 __mb_cache_entry_forget(entry, gfp_mask);
200 }
201 return (count / 100) * sysctl_vfs_cache_pressure;
202 }
203
204
205 /*
206 * mb_cache_create() create a new cache
207 *
208 * All entries in one cache are equal size. Cache entries may be from
209 * multiple devices. If this is the first mbcache created, registers
210 * the cache with kernel memory management. Returns NULL if no more
211 * memory was available.
212 *
213 * @name: name of the cache (informal)
214 * @bucket_bits: log2(number of hash buckets)
215 */
216 struct mb_cache *
217 mb_cache_create(const char *name, int bucket_bits)
218 {
219 int n, bucket_count = 1 << bucket_bits;
220 struct mb_cache *cache = NULL;
221
222 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
223 if (!cache)
224 return NULL;
225 cache->c_name = name;
226 atomic_set(&cache->c_entry_count, 0);
227 cache->c_bucket_bits = bucket_bits;
228 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
229 GFP_KERNEL);
230 if (!cache->c_block_hash)
231 goto fail;
232 for (n=0; n<bucket_count; n++)
233 INIT_LIST_HEAD(&cache->c_block_hash[n]);
234 cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
235 GFP_KERNEL);
236 if (!cache->c_index_hash)
237 goto fail;
238 for (n=0; n<bucket_count; n++)
239 INIT_LIST_HEAD(&cache->c_index_hash[n]);
240 cache->c_entry_cache = kmem_cache_create(name,
241 sizeof(struct mb_cache_entry), 0,
242 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
243 if (!cache->c_entry_cache)
244 goto fail2;
245
246 spin_lock(&mb_cache_spinlock);
247 list_add(&cache->c_cache_list, &mb_cache_list);
248 spin_unlock(&mb_cache_spinlock);
249 return cache;
250
251 fail2:
252 kfree(cache->c_index_hash);
253
254 fail:
255 kfree(cache->c_block_hash);
256 kfree(cache);
257 return NULL;
258 }
259
260
261 /*
262 * mb_cache_shrink()
263 *
264 * Removes all cache entries of a device from the cache. All cache entries
265 * currently in use cannot be freed, and thus remain in the cache. All others
266 * are freed.
267 *
268 * @bdev: which device's cache entries to shrink
269 */
270 void
271 mb_cache_shrink(struct block_device *bdev)
272 {
273 LIST_HEAD(free_list);
274 struct list_head *l, *ltmp;
275
276 spin_lock(&mb_cache_spinlock);
277 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
278 struct mb_cache_entry *ce =
279 list_entry(l, struct mb_cache_entry, e_lru_list);
280 if (ce->e_bdev == bdev) {
281 list_move_tail(&ce->e_lru_list, &free_list);
282 __mb_cache_entry_unhash(ce);
283 }
284 }
285 spin_unlock(&mb_cache_spinlock);
286 list_for_each_safe(l, ltmp, &free_list) {
287 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
288 e_lru_list), GFP_KERNEL);
289 }
290 }
291
292
293 /*
294 * mb_cache_destroy()
295 *
296 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
297 * and then destroys it. If this was the last mbcache, un-registers the
298 * mbcache from kernel memory management.
299 */
300 void
301 mb_cache_destroy(struct mb_cache *cache)
302 {
303 LIST_HEAD(free_list);
304 struct list_head *l, *ltmp;
305
306 spin_lock(&mb_cache_spinlock);
307 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
308 struct mb_cache_entry *ce =
309 list_entry(l, struct mb_cache_entry, e_lru_list);
310 if (ce->e_cache == cache) {
311 list_move_tail(&ce->e_lru_list, &free_list);
312 __mb_cache_entry_unhash(ce);
313 }
314 }
315 list_del(&cache->c_cache_list);
316 spin_unlock(&mb_cache_spinlock);
317
318 list_for_each_safe(l, ltmp, &free_list) {
319 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
320 e_lru_list), GFP_KERNEL);
321 }
322
323 if (atomic_read(&cache->c_entry_count) > 0) {
324 mb_error("cache %s: %d orphaned entries",
325 cache->c_name,
326 atomic_read(&cache->c_entry_count));
327 }
328
329 kmem_cache_destroy(cache->c_entry_cache);
330
331 kfree(cache->c_index_hash);
332 kfree(cache->c_block_hash);
333 kfree(cache);
334 }
335
336
337 /*
338 * mb_cache_entry_alloc()
339 *
340 * Allocates a new cache entry. The new entry will not be valid initially,
341 * and thus cannot be looked up yet. It should be filled with data, and
342 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
343 * if no more memory was available.
344 */
345 struct mb_cache_entry *
346 mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
347 {
348 struct mb_cache_entry *ce;
349
350 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
351 if (ce) {
352 atomic_inc(&cache->c_entry_count);
353 INIT_LIST_HEAD(&ce->e_lru_list);
354 INIT_LIST_HEAD(&ce->e_block_list);
355 ce->e_cache = cache;
356 ce->e_used = 1 + MB_CACHE_WRITER;
357 ce->e_queued = 0;
358 }
359 return ce;
360 }
361
362
363 /*
364 * mb_cache_entry_insert()
365 *
366 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
367 * the cache. After this, the cache entry can be looked up, but is not yet
368 * in the lru list as the caller still holds a handle to it. Returns 0 on
369 * success, or -EBUSY if a cache entry for that device + inode exists
370 * already (this may happen after a failed lookup, but when another process
371 * has inserted the same cache entry in the meantime).
372 *
373 * @bdev: device the cache entry belongs to
374 * @block: block number
375 * @key: lookup key
376 */
377 int
378 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
379 sector_t block, unsigned int key)
380 {
381 struct mb_cache *cache = ce->e_cache;
382 unsigned int bucket;
383 struct list_head *l;
384 int error = -EBUSY;
385
386 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
387 cache->c_bucket_bits);
388 spin_lock(&mb_cache_spinlock);
389 list_for_each_prev(l, &cache->c_block_hash[bucket]) {
390 struct mb_cache_entry *ce =
391 list_entry(l, struct mb_cache_entry, e_block_list);
392 if (ce->e_bdev == bdev && ce->e_block == block)
393 goto out;
394 }
395 __mb_cache_entry_unhash(ce);
396 ce->e_bdev = bdev;
397 ce->e_block = block;
398 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
399 ce->e_index.o_key = key;
400 bucket = hash_long(key, cache->c_bucket_bits);
401 list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
402 error = 0;
403 out:
404 spin_unlock(&mb_cache_spinlock);
405 return error;
406 }
407
408
409 /*
410 * mb_cache_entry_release()
411 *
412 * Release a handle to a cache entry. When the last handle to a cache entry
413 * is released it is either freed (if it is invalid) or otherwise inserted
414 * in to the lru list.
415 */
416 void
417 mb_cache_entry_release(struct mb_cache_entry *ce)
418 {
419 spin_lock(&mb_cache_spinlock);
420 __mb_cache_entry_release_unlock(ce);
421 }
422
423
424 /*
425 * mb_cache_entry_free()
426 *
427 * This is equivalent to the sequence mb_cache_entry_takeout() --
428 * mb_cache_entry_release().
429 */
430 void
431 mb_cache_entry_free(struct mb_cache_entry *ce)
432 {
433 spin_lock(&mb_cache_spinlock);
434 mb_assert(list_empty(&ce->e_lru_list));
435 __mb_cache_entry_unhash(ce);
436 __mb_cache_entry_release_unlock(ce);
437 }
438
439
440 /*
441 * mb_cache_entry_get()
442 *
443 * Get a cache entry by device / block number. (There can only be one entry
444 * in the cache per device and block.) Returns NULL if no such cache entry
445 * exists. The returned cache entry is locked for exclusive access ("single
446 * writer").
447 */
448 struct mb_cache_entry *
449 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
450 sector_t block)
451 {
452 unsigned int bucket;
453 struct list_head *l;
454 struct mb_cache_entry *ce;
455
456 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
457 cache->c_bucket_bits);
458 spin_lock(&mb_cache_spinlock);
459 list_for_each(l, &cache->c_block_hash[bucket]) {
460 ce = list_entry(l, struct mb_cache_entry, e_block_list);
461 if (ce->e_bdev == bdev && ce->e_block == block) {
462 DEFINE_WAIT(wait);
463
464 if (!list_empty(&ce->e_lru_list))
465 list_del_init(&ce->e_lru_list);
466
467 while (ce->e_used > 0) {
468 ce->e_queued++;
469 prepare_to_wait(&mb_cache_queue, &wait,
470 TASK_UNINTERRUPTIBLE);
471 spin_unlock(&mb_cache_spinlock);
472 schedule();
473 spin_lock(&mb_cache_spinlock);
474 ce->e_queued--;
475 }
476 finish_wait(&mb_cache_queue, &wait);
477 ce->e_used += 1 + MB_CACHE_WRITER;
478
479 if (!__mb_cache_entry_is_hashed(ce)) {
480 __mb_cache_entry_release_unlock(ce);
481 return NULL;
482 }
483 goto cleanup;
484 }
485 }
486 ce = NULL;
487
488 cleanup:
489 spin_unlock(&mb_cache_spinlock);
490 return ce;
491 }
492
493 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
494
495 static struct mb_cache_entry *
496 __mb_cache_entry_find(struct list_head *l, struct list_head *head,
497 struct block_device *bdev, unsigned int key)
498 {
499 while (l != head) {
500 struct mb_cache_entry *ce =
501 list_entry(l, struct mb_cache_entry, e_index.o_list);
502 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
503 DEFINE_WAIT(wait);
504
505 if (!list_empty(&ce->e_lru_list))
506 list_del_init(&ce->e_lru_list);
507
508 /* Incrementing before holding the lock gives readers
509 priority over writers. */
510 ce->e_used++;
511 while (ce->e_used >= MB_CACHE_WRITER) {
512 ce->e_queued++;
513 prepare_to_wait(&mb_cache_queue, &wait,
514 TASK_UNINTERRUPTIBLE);
515 spin_unlock(&mb_cache_spinlock);
516 schedule();
517 spin_lock(&mb_cache_spinlock);
518 ce->e_queued--;
519 }
520 finish_wait(&mb_cache_queue, &wait);
521
522 if (!__mb_cache_entry_is_hashed(ce)) {
523 __mb_cache_entry_release_unlock(ce);
524 spin_lock(&mb_cache_spinlock);
525 return ERR_PTR(-EAGAIN);
526 }
527 return ce;
528 }
529 l = l->next;
530 }
531 return NULL;
532 }
533
534
535 /*
536 * mb_cache_entry_find_first()
537 *
538 * Find the first cache entry on a given device with a certain key in
539 * an additional index. Additonal matches can be found with
540 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
541 * returned cache entry is locked for shared access ("multiple readers").
542 *
543 * @cache: the cache to search
544 * @bdev: the device the cache entry should belong to
545 * @key: the key in the index
546 */
547 struct mb_cache_entry *
548 mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
549 unsigned int key)
550 {
551 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
552 struct list_head *l;
553 struct mb_cache_entry *ce;
554
555 spin_lock(&mb_cache_spinlock);
556 l = cache->c_index_hash[bucket].next;
557 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
558 spin_unlock(&mb_cache_spinlock);
559 return ce;
560 }
561
562
563 /*
564 * mb_cache_entry_find_next()
565 *
566 * Find the next cache entry on a given device with a certain key in an
567 * additional index. Returns NULL if no match could be found. The previous
568 * entry is atomatically released, so that mb_cache_entry_find_next() can
569 * be called like this:
570 *
571 * entry = mb_cache_entry_find_first();
572 * while (entry) {
573 * ...
574 * entry = mb_cache_entry_find_next(entry, ...);
575 * }
576 *
577 * @prev: The previous match
578 * @bdev: the device the cache entry should belong to
579 * @key: the key in the index
580 */
581 struct mb_cache_entry *
582 mb_cache_entry_find_next(struct mb_cache_entry *prev,
583 struct block_device *bdev, unsigned int key)
584 {
585 struct mb_cache *cache = prev->e_cache;
586 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
587 struct list_head *l;
588 struct mb_cache_entry *ce;
589
590 spin_lock(&mb_cache_spinlock);
591 l = prev->e_index.o_list.next;
592 ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
593 __mb_cache_entry_release_unlock(prev);
594 return ce;
595 }
596
597 #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
598
599 static int __init init_mbcache(void)
600 {
601 register_shrinker(&mb_cache_shrinker);
602 return 0;
603 }
604
605 static void __exit exit_mbcache(void)
606 {
607 unregister_shrinker(&mb_cache_shrinker);
608 }
609
610 module_init(init_mbcache)
611 module_exit(exit_mbcache)
612
This page took 0.043953 seconds and 6 git commands to generate.