fs/mbcache.c: doucple the locking of local from global data
[deliverable/linux.git] / fs / mbcache.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/mbcache.c
3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
4 */
5
6/*
7 * Filesystem Meta Information Block Cache (mbcache)
8 *
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
12 *
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
17 *
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
21 * or lists.
22 *
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
27 */
28
1f3e55fe
M
29/*
30 * Lock descriptions and usage:
31 *
32 * Each hash chain of both the block and index hash tables now contains
33 * a built-in lock used to serialize accesses to the hash chain.
34 *
35 * Accesses to global data structures mb_cache_list and mb_cache_lru_list
36 * are serialized via the global spinlock mb_cache_spinlock.
37 *
38 * Each mb_cache_entry contains a spinlock, e_entry_lock, to serialize
39 * accesses to its local data, such as e_used and e_queued.
40 *
41 * Lock ordering:
42 *
43 * Each block hash chain's lock has the highest lock order, followed by an
44 * index hash chain's lock, mb_cache_bg_lock (used to implement mb_cache_entry's
45 * lock), and mb_cach_spinlock, with the lowest order. While holding
46 * either a block or index hash chain lock, a thread can acquire an
47 * mc_cache_bg_lock, which in turn can also acquire mb_cache_spinlock.
48 *
49 * Synchronization:
50 *
51 * Since both mb_cache_entry_get and mb_cache_entry_find scan the block and
52 * index hash chian, it needs to lock the corresponding hash chain. For each
53 * mb_cache_entry within the chain, it needs to lock the mb_cache_entry to
54 * prevent either any simultaneous release or free on the entry and also
55 * to serialize accesses to either the e_used or e_queued member of the entry.
56 *
57 * To avoid having a dangling reference to an already freed
58 * mb_cache_entry, an mb_cache_entry is only freed when it is not on a
59 * block hash chain and also no longer being referenced, both e_used,
60 * and e_queued are 0's. When an mb_cache_entry is explicitly freed it is
61 * first removed from a block hash chain.
62 */
63
1da177e4
LT
64#include <linux/kernel.h>
65#include <linux/module.h>
66
67#include <linux/hash.h>
68#include <linux/fs.h>
69#include <linux/mm.h>
70#include <linux/slab.h>
71#include <linux/sched.h>
3e037e52 72#include <linux/list_bl.h>
1da177e4 73#include <linux/mbcache.h>
3e037e52 74#include <linux/init.h>
1f3e55fe 75#include <linux/blockgroup_lock.h>
1da177e4
LT
76
77#ifdef MB_CACHE_DEBUG
78# define mb_debug(f...) do { \
79 printk(KERN_DEBUG f); \
80 printk("\n"); \
81 } while (0)
82#define mb_assert(c) do { if (!(c)) \
83 printk(KERN_ERR "assertion " #c " failed\n"); \
84 } while(0)
85#else
86# define mb_debug(f...) do { } while(0)
87# define mb_assert(c) do { } while(0)
88#endif
89#define mb_error(f...) do { \
90 printk(KERN_ERR f); \
91 printk("\n"); \
92 } while(0)
93
94#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
95
1f3e55fe
M
96#define MB_CACHE_ENTRY_LOCK_BITS __builtin_log2(NR_BG_LOCKS)
97#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \
98 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
99
75c96f85 100static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
1f3e55fe
M
101static struct blockgroup_lock *mb_cache_bg_lock;
102
1da177e4
LT
103MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
104MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
105MODULE_LICENSE("GPL");
106
107EXPORT_SYMBOL(mb_cache_create);
108EXPORT_SYMBOL(mb_cache_shrink);
109EXPORT_SYMBOL(mb_cache_destroy);
110EXPORT_SYMBOL(mb_cache_entry_alloc);
111EXPORT_SYMBOL(mb_cache_entry_insert);
112EXPORT_SYMBOL(mb_cache_entry_release);
113EXPORT_SYMBOL(mb_cache_entry_free);
114EXPORT_SYMBOL(mb_cache_entry_get);
115#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
116EXPORT_SYMBOL(mb_cache_entry_find_first);
117EXPORT_SYMBOL(mb_cache_entry_find_next);
118#endif
119
1da177e4
LT
120/*
121 * Global data: list of all mbcache's, lru list, and a spinlock for
122 * accessing cache data structures on SMP machines. The lru list is
123 * global across all mbcaches.
124 */
125
126static LIST_HEAD(mb_cache_list);
127static LIST_HEAD(mb_cache_lru_list);
128static DEFINE_SPINLOCK(mb_cache_spinlock);
1da177e4 129
1f3e55fe
M
130static inline void
131__spin_lock_mb_cache_entry(struct mb_cache_entry *ce)
132{
133 spin_lock(bgl_lock_ptr(mb_cache_bg_lock,
134 MB_CACHE_ENTRY_LOCK_INDEX(ce)));
135}
136
137static inline void
138__spin_unlock_mb_cache_entry(struct mb_cache_entry *ce)
139{
140 spin_unlock(bgl_lock_ptr(mb_cache_bg_lock,
141 MB_CACHE_ENTRY_LOCK_INDEX(ce)));
142}
143
1da177e4 144static inline int
3e037e52 145__mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
1da177e4 146{
3e037e52 147 return !hlist_bl_unhashed(&ce->e_block_list);
1da177e4
LT
148}
149
150
3e037e52
M
151static inline void
152__mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
1da177e4 153{
3e037e52
M
154 if (__mb_cache_entry_is_block_hashed(ce))
155 hlist_bl_del_init(&ce->e_block_list);
156}
157
158static inline int
159__mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
160{
161 return !hlist_bl_unhashed(&ce->e_index.o_list);
1da177e4
LT
162}
163
3e037e52
M
164static inline void
165__mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
166{
167 if (__mb_cache_entry_is_index_hashed(ce))
168 hlist_bl_del_init(&ce->e_index.o_list);
169}
170
1f3e55fe
M
171/*
172 * __mb_cache_entry_unhash_unlock()
173 *
174 * This function is called to unhash both the block and index hash
175 * chain.
176 * It assumes both the block and index hash chain is locked upon entry.
177 * It also unlock both hash chains both exit
178 */
3e037e52 179static inline void
1f3e55fe 180__mb_cache_entry_unhash_unlock(struct mb_cache_entry *ce)
3e037e52
M
181{
182 __mb_cache_entry_unhash_index(ce);
1f3e55fe 183 hlist_bl_unlock(ce->e_index_hash_p);
3e037e52 184 __mb_cache_entry_unhash_block(ce);
1f3e55fe 185 hlist_bl_unlock(ce->e_block_hash_p);
3e037e52 186}
1da177e4 187
858119e1 188static void
27496a8c 189__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
1da177e4
LT
190{
191 struct mb_cache *cache = ce->e_cache;
192
1f3e55fe 193 mb_assert(!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)));
2aec7c52
AG
194 kmem_cache_free(cache->c_entry_cache, ce);
195 atomic_dec(&cache->c_entry_count);
1da177e4
LT
196}
197
858119e1 198static void
1f3e55fe 199__mb_cache_entry_release(struct mb_cache_entry *ce)
1da177e4 200{
1f3e55fe
M
201 /* First lock the entry to serialize access to its local data. */
202 __spin_lock_mb_cache_entry(ce);
1da177e4
LT
203 /* Wake up all processes queuing for this cache entry. */
204 if (ce->e_queued)
205 wake_up_all(&mb_cache_queue);
206 if (ce->e_used >= MB_CACHE_WRITER)
207 ce->e_used -= MB_CACHE_WRITER;
1f3e55fe
M
208 /*
209 * Make sure that all cache entries on lru_list have
210 * both e_used and e_qued of 0s.
211 */
1da177e4 212 ce->e_used--;
1f3e55fe
M
213 if (!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))) {
214 if (!__mb_cache_entry_is_block_hashed(ce)) {
215 __spin_unlock_mb_cache_entry(ce);
1da177e4 216 goto forget;
1f3e55fe
M
217 }
218 /*
219 * Need access to lru list, first drop entry lock,
220 * then reacquire the lock in the proper order.
221 */
222 spin_lock(&mb_cache_spinlock);
223 if (list_empty(&ce->e_lru_list))
224 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
225 spin_unlock(&mb_cache_spinlock);
1da177e4 226 }
1f3e55fe 227 __spin_unlock_mb_cache_entry(ce);
1da177e4
LT
228 return;
229forget:
1f3e55fe 230 mb_assert(list_empty(&ce->e_lru_list));
1da177e4
LT
231 __mb_cache_entry_forget(ce, GFP_KERNEL);
232}
233
1da177e4 234/*
1ab6c499 235 * mb_cache_shrink_scan() memory pressure callback
1da177e4
LT
236 *
237 * This function is called by the kernel memory management when memory
238 * gets low.
239 *
7f8275d0 240 * @shrink: (ignored)
1495f230 241 * @sc: shrink_control passed from reclaim
1da177e4 242 *
1ab6c499 243 * Returns the number of objects freed.
1da177e4 244 */
1ab6c499
DC
245static unsigned long
246mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1da177e4
LT
247{
248 LIST_HEAD(free_list);
e566d48c 249 struct mb_cache_entry *entry, *tmp;
1495f230
YH
250 int nr_to_scan = sc->nr_to_scan;
251 gfp_t gfp_mask = sc->gfp_mask;
1ab6c499 252 unsigned long freed = 0;
1da177e4 253
1da177e4 254 mb_debug("trying to free %d entries", nr_to_scan);
e566d48c 255 spin_lock(&mb_cache_spinlock);
1f3e55fe 256 while ((nr_to_scan-- > 0) && !list_empty(&mb_cache_lru_list)) {
1da177e4
LT
257 struct mb_cache_entry *ce =
258 list_entry(mb_cache_lru_list.next,
1f3e55fe
M
259 struct mb_cache_entry, e_lru_list);
260 list_del_init(&ce->e_lru_list);
261 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))
262 continue;
263 spin_unlock(&mb_cache_spinlock);
264 /* Prevent any find or get operation on the entry */
265 hlist_bl_lock(ce->e_block_hash_p);
266 hlist_bl_lock(ce->e_index_hash_p);
267 /* Ignore if it is touched by a find/get */
268 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt) ||
269 !list_empty(&ce->e_lru_list)) {
270 hlist_bl_unlock(ce->e_index_hash_p);
271 hlist_bl_unlock(ce->e_block_hash_p);
272 spin_lock(&mb_cache_spinlock);
273 continue;
274 }
275 __mb_cache_entry_unhash_unlock(ce);
276 list_add_tail(&ce->e_lru_list, &free_list);
277 spin_lock(&mb_cache_spinlock);
1ab6c499
DC
278 }
279 spin_unlock(&mb_cache_spinlock);
1f3e55fe 280
1ab6c499
DC
281 list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
282 __mb_cache_entry_forget(entry, gfp_mask);
1f3e55fe 283 freed++;
1da177e4 284 }
1ab6c499
DC
285 return freed;
286}
287
288static unsigned long
289mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
290{
291 struct mb_cache *cache;
292 unsigned long count = 0;
293
294 spin_lock(&mb_cache_spinlock);
e566d48c
AG
295 list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
296 mb_debug("cache %s (%d)", cache->c_name,
297 atomic_read(&cache->c_entry_count));
298 count += atomic_read(&cache->c_entry_count);
299 }
1da177e4 300 spin_unlock(&mb_cache_spinlock);
1ab6c499 301
55f841ce 302 return vfs_pressure_ratio(count);
1da177e4
LT
303}
304
1ab6c499
DC
305static struct shrinker mb_cache_shrinker = {
306 .count_objects = mb_cache_shrink_count,
307 .scan_objects = mb_cache_shrink_scan,
308 .seeks = DEFAULT_SEEKS,
309};
1da177e4
LT
310
311/*
312 * mb_cache_create() create a new cache
313 *
314 * All entries in one cache are equal size. Cache entries may be from
315 * multiple devices. If this is the first mbcache created, registers
316 * the cache with kernel memory management. Returns NULL if no more
317 * memory was available.
318 *
319 * @name: name of the cache (informal)
1da177e4
LT
320 * @bucket_bits: log2(number of hash buckets)
321 */
322struct mb_cache *
2aec7c52 323mb_cache_create(const char *name, int bucket_bits)
1da177e4 324{
2aec7c52 325 int n, bucket_count = 1 << bucket_bits;
1da177e4
LT
326 struct mb_cache *cache = NULL;
327
1f3e55fe
M
328 if (!mb_cache_bg_lock) {
329 mb_cache_bg_lock = kmalloc(sizeof(struct blockgroup_lock),
330 GFP_KERNEL);
331 if (!mb_cache_bg_lock)
332 return NULL;
333 bgl_lock_init(mb_cache_bg_lock);
334 }
335
2aec7c52 336 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
1da177e4 337 if (!cache)
2aec7c52 338 return NULL;
1da177e4 339 cache->c_name = name;
1da177e4
LT
340 atomic_set(&cache->c_entry_count, 0);
341 cache->c_bucket_bits = bucket_bits;
3e037e52
M
342 cache->c_block_hash = kmalloc(bucket_count *
343 sizeof(struct hlist_bl_head), GFP_KERNEL);
1da177e4
LT
344 if (!cache->c_block_hash)
345 goto fail;
346 for (n=0; n<bucket_count; n++)
3e037e52
M
347 INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
348 cache->c_index_hash = kmalloc(bucket_count *
349 sizeof(struct hlist_bl_head), GFP_KERNEL);
2aec7c52
AG
350 if (!cache->c_index_hash)
351 goto fail;
352 for (n=0; n<bucket_count; n++)
3e037e52 353 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
2aec7c52
AG
354 cache->c_entry_cache = kmem_cache_create(name,
355 sizeof(struct mb_cache_entry), 0,
20c2df83 356 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
1da177e4 357 if (!cache->c_entry_cache)
2aec7c52 358 goto fail2;
1da177e4 359
3a48ee8a
AG
360 /*
361 * Set an upper limit on the number of cache entries so that the hash
362 * chains won't grow too long.
363 */
364 cache->c_max_entries = bucket_count << 4;
365
1da177e4
LT
366 spin_lock(&mb_cache_spinlock);
367 list_add(&cache->c_cache_list, &mb_cache_list);
368 spin_unlock(&mb_cache_spinlock);
369 return cache;
370
2aec7c52
AG
371fail2:
372 kfree(cache->c_index_hash);
373
1da177e4 374fail:
2aec7c52
AG
375 kfree(cache->c_block_hash);
376 kfree(cache);
1da177e4
LT
377 return NULL;
378}
379
380
381/*
382 * mb_cache_shrink()
383 *
7f927fcc 384 * Removes all cache entries of a device from the cache. All cache entries
1da177e4
LT
385 * currently in use cannot be freed, and thus remain in the cache. All others
386 * are freed.
387 *
1da177e4
LT
388 * @bdev: which device's cache entries to shrink
389 */
390void
8c52ab42 391mb_cache_shrink(struct block_device *bdev)
1da177e4
LT
392{
393 LIST_HEAD(free_list);
1f3e55fe
M
394 struct list_head *l;
395 struct mb_cache_entry *ce, *tmp;
1da177e4 396
1f3e55fe 397 l = &mb_cache_lru_list;
1da177e4 398 spin_lock(&mb_cache_spinlock);
1f3e55fe
M
399 while (!list_is_last(l, &mb_cache_lru_list)) {
400 l = l->next;
401 ce = list_entry(l, struct mb_cache_entry, e_lru_list);
1da177e4 402 if (ce->e_bdev == bdev) {
1f3e55fe
M
403 list_del_init(&ce->e_lru_list);
404 if (ce->e_used || ce->e_queued ||
405 atomic_read(&ce->e_refcnt))
406 continue;
407 spin_unlock(&mb_cache_spinlock);
408 /*
409 * Prevent any find or get operation on the entry.
410 */
411 hlist_bl_lock(ce->e_block_hash_p);
412 hlist_bl_lock(ce->e_index_hash_p);
413 /* Ignore if it is touched by a find/get */
414 if (ce->e_used || ce->e_queued ||
415 atomic_read(&ce->e_refcnt) ||
416 !list_empty(&ce->e_lru_list)) {
417 hlist_bl_unlock(ce->e_index_hash_p);
418 hlist_bl_unlock(ce->e_block_hash_p);
419 l = &mb_cache_lru_list;
420 spin_lock(&mb_cache_spinlock);
421 continue;
422 }
423 __mb_cache_entry_unhash_unlock(ce);
424 mb_assert(!(ce->e_used || ce->e_queued ||
425 atomic_read(&ce->e_refcnt)));
426 list_add_tail(&ce->e_lru_list, &free_list);
427 l = &mb_cache_lru_list;
428 spin_lock(&mb_cache_spinlock);
1da177e4
LT
429 }
430 }
431 spin_unlock(&mb_cache_spinlock);
1f3e55fe
M
432
433 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
434 __mb_cache_entry_forget(ce, GFP_KERNEL);
1da177e4
LT
435 }
436}
437
438
439/*
440 * mb_cache_destroy()
441 *
442 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
443 * and then destroys it. If this was the last mbcache, un-registers the
444 * mbcache from kernel memory management.
445 */
446void
447mb_cache_destroy(struct mb_cache *cache)
448{
449 LIST_HEAD(free_list);
1f3e55fe 450 struct mb_cache_entry *ce, *tmp;
1da177e4
LT
451
452 spin_lock(&mb_cache_spinlock);
1f3e55fe
M
453 list_for_each_entry_safe(ce, tmp, &mb_cache_lru_list, e_lru_list) {
454 if (ce->e_cache == cache)
1da177e4 455 list_move_tail(&ce->e_lru_list, &free_list);
1da177e4
LT
456 }
457 list_del(&cache->c_cache_list);
458 spin_unlock(&mb_cache_spinlock);
459
1f3e55fe
M
460 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
461 list_del_init(&ce->e_lru_list);
462 /*
463 * Prevent any find or get operation on the entry.
464 */
465 hlist_bl_lock(ce->e_block_hash_p);
466 hlist_bl_lock(ce->e_index_hash_p);
467 mb_assert(!(ce->e_used || ce->e_queued ||
468 atomic_read(&ce->e_refcnt)));
469 __mb_cache_entry_unhash_unlock(ce);
470 __mb_cache_entry_forget(ce, GFP_KERNEL);
1da177e4
LT
471 }
472
473 if (atomic_read(&cache->c_entry_count) > 0) {
474 mb_error("cache %s: %d orphaned entries",
475 cache->c_name,
476 atomic_read(&cache->c_entry_count));
477 }
478
2aec7c52 479 kfree(cache->c_index_hash);
1da177e4
LT
480 kfree(cache->c_block_hash);
481 kfree(cache);
482}
483
1da177e4
LT
484/*
485 * mb_cache_entry_alloc()
486 *
487 * Allocates a new cache entry. The new entry will not be valid initially,
488 * and thus cannot be looked up yet. It should be filled with data, and
489 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
490 * if no more memory was available.
491 */
492struct mb_cache_entry *
335e92e8 493mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
1da177e4 494{
1f3e55fe 495 struct mb_cache_entry *ce;
3a48ee8a
AG
496
497 if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
1f3e55fe
M
498 struct list_head *l;
499
500 l = &mb_cache_lru_list;
3a48ee8a 501 spin_lock(&mb_cache_spinlock);
1f3e55fe
M
502 while (!list_is_last(l, &mb_cache_lru_list)) {
503 l = l->next;
504 ce = list_entry(l, struct mb_cache_entry, e_lru_list);
505 if (ce->e_cache == cache) {
506 list_del_init(&ce->e_lru_list);
507 if (ce->e_used || ce->e_queued ||
508 atomic_read(&ce->e_refcnt))
509 continue;
510 spin_unlock(&mb_cache_spinlock);
511 /*
512 * Prevent any find or get operation on the
513 * entry.
514 */
515 hlist_bl_lock(ce->e_block_hash_p);
516 hlist_bl_lock(ce->e_index_hash_p);
517 /* Ignore if it is touched by a find/get */
518 if (ce->e_used || ce->e_queued ||
519 atomic_read(&ce->e_refcnt) ||
520 !list_empty(&ce->e_lru_list)) {
521 hlist_bl_unlock(ce->e_index_hash_p);
522 hlist_bl_unlock(ce->e_block_hash_p);
523 l = &mb_cache_lru_list;
524 spin_lock(&mb_cache_spinlock);
525 continue;
526 }
527 mb_assert(list_empty(&ce->e_lru_list));
528 mb_assert(!(ce->e_used || ce->e_queued ||
529 atomic_read(&ce->e_refcnt)));
530 __mb_cache_entry_unhash_unlock(ce);
531 goto found;
532 }
3a48ee8a
AG
533 }
534 spin_unlock(&mb_cache_spinlock);
535 }
1f3e55fe
M
536
537 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
538 if (!ce)
539 return NULL;
540 atomic_inc(&cache->c_entry_count);
541 INIT_LIST_HEAD(&ce->e_lru_list);
542 INIT_HLIST_BL_NODE(&ce->e_block_list);
543 INIT_HLIST_BL_NODE(&ce->e_index.o_list);
544 ce->e_cache = cache;
545 ce->e_queued = 0;
546 atomic_set(&ce->e_refcnt, 0);
547found:
3e037e52
M
548 ce->e_block_hash_p = &cache->c_block_hash[0];
549 ce->e_index_hash_p = &cache->c_index_hash[0];
3a48ee8a 550 ce->e_used = 1 + MB_CACHE_WRITER;
1da177e4
LT
551 return ce;
552}
553
554
555/*
556 * mb_cache_entry_insert()
557 *
558 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
559 * the cache. After this, the cache entry can be looked up, but is not yet
560 * in the lru list as the caller still holds a handle to it. Returns 0 on
561 * success, or -EBUSY if a cache entry for that device + inode exists
562 * already (this may happen after a failed lookup, but when another process
563 * has inserted the same cache entry in the meantime).
564 *
565 * @bdev: device the cache entry belongs to
566 * @block: block number
2aec7c52 567 * @key: lookup key
1da177e4
LT
568 */
569int
570mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
2aec7c52 571 sector_t block, unsigned int key)
1da177e4
LT
572{
573 struct mb_cache *cache = ce->e_cache;
574 unsigned int bucket;
3e037e52 575 struct hlist_bl_node *l;
3e037e52
M
576 struct hlist_bl_head *block_hash_p;
577 struct hlist_bl_head *index_hash_p;
578 struct mb_cache_entry *lce;
1da177e4 579
3e037e52 580 mb_assert(ce);
1da177e4
LT
581 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
582 cache->c_bucket_bits);
3e037e52 583 block_hash_p = &cache->c_block_hash[bucket];
1f3e55fe 584 hlist_bl_lock(block_hash_p);
3e037e52 585 hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
1f3e55fe
M
586 if (lce->e_bdev == bdev && lce->e_block == block) {
587 hlist_bl_unlock(block_hash_p);
588 return -EBUSY;
589 }
1da177e4 590 }
3e037e52 591 mb_assert(!__mb_cache_entry_is_block_hashed(ce));
1f3e55fe
M
592 __mb_cache_entry_unhash_block(ce);
593 __mb_cache_entry_unhash_index(ce);
1da177e4
LT
594 ce->e_bdev = bdev;
595 ce->e_block = block;
3e037e52 596 ce->e_block_hash_p = block_hash_p;
2aec7c52 597 ce->e_index.o_key = key;
1f3e55fe
M
598 hlist_bl_add_head(&ce->e_block_list, block_hash_p);
599 hlist_bl_unlock(block_hash_p);
2aec7c52 600 bucket = hash_long(key, cache->c_bucket_bits);
3e037e52 601 index_hash_p = &cache->c_index_hash[bucket];
1f3e55fe 602 hlist_bl_lock(index_hash_p);
3e037e52
M
603 ce->e_index_hash_p = index_hash_p;
604 hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
1f3e55fe
M
605 hlist_bl_unlock(index_hash_p);
606 return 0;
1da177e4
LT
607}
608
609
610/*
611 * mb_cache_entry_release()
612 *
613 * Release a handle to a cache entry. When the last handle to a cache entry
614 * is released it is either freed (if it is invalid) or otherwise inserted
615 * in to the lru list.
616 */
617void
618mb_cache_entry_release(struct mb_cache_entry *ce)
619{
1f3e55fe 620 __mb_cache_entry_release(ce);
1da177e4
LT
621}
622
623
624/*
625 * mb_cache_entry_free()
626 *
1da177e4
LT
627 */
628void
629mb_cache_entry_free(struct mb_cache_entry *ce)
630{
1f3e55fe 631 mb_assert(ce);
1da177e4 632 mb_assert(list_empty(&ce->e_lru_list));
1f3e55fe
M
633 hlist_bl_lock(ce->e_index_hash_p);
634 __mb_cache_entry_unhash_index(ce);
635 hlist_bl_unlock(ce->e_index_hash_p);
636 hlist_bl_lock(ce->e_block_hash_p);
637 __mb_cache_entry_unhash_block(ce);
638 hlist_bl_unlock(ce->e_block_hash_p);
639 __mb_cache_entry_release(ce);
1da177e4
LT
640}
641
642
643/*
644 * mb_cache_entry_get()
645 *
646 * Get a cache entry by device / block number. (There can only be one entry
647 * in the cache per device and block.) Returns NULL if no such cache entry
648 * exists. The returned cache entry is locked for exclusive access ("single
649 * writer").
650 */
651struct mb_cache_entry *
652mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
653 sector_t block)
654{
655 unsigned int bucket;
3e037e52 656 struct hlist_bl_node *l;
1da177e4 657 struct mb_cache_entry *ce;
3e037e52 658 struct hlist_bl_head *block_hash_p;
1da177e4
LT
659
660 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
661 cache->c_bucket_bits);
3e037e52 662 block_hash_p = &cache->c_block_hash[bucket];
1f3e55fe
M
663 /* First serialize access to the block corresponding hash chain. */
664 hlist_bl_lock(block_hash_p);
3e037e52
M
665 hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
666 mb_assert(ce->e_block_hash_p == block_hash_p);
1da177e4 667 if (ce->e_bdev == bdev && ce->e_block == block) {
1f3e55fe
M
668 /*
669 * Prevent a free from removing the entry.
670 */
671 atomic_inc(&ce->e_refcnt);
672 hlist_bl_unlock(block_hash_p);
673 __spin_lock_mb_cache_entry(ce);
674 atomic_dec(&ce->e_refcnt);
675 if (ce->e_used > 0) {
676 DEFINE_WAIT(wait);
677 while (ce->e_used > 0) {
678 ce->e_queued++;
679 prepare_to_wait(&mb_cache_queue, &wait,
680 TASK_UNINTERRUPTIBLE);
681 __spin_unlock_mb_cache_entry(ce);
682 schedule();
683 __spin_lock_mb_cache_entry(ce);
684 ce->e_queued--;
685 }
686 finish_wait(&mb_cache_queue, &wait);
687 }
688 ce->e_used += 1 + MB_CACHE_WRITER;
689 __spin_unlock_mb_cache_entry(ce);
1da177e4 690
1f3e55fe
M
691 if (!list_empty(&ce->e_lru_list)) {
692 spin_lock(&mb_cache_spinlock);
1da177e4 693 list_del_init(&ce->e_lru_list);
1da177e4 694 spin_unlock(&mb_cache_spinlock);
1da177e4 695 }
3e037e52 696 if (!__mb_cache_entry_is_block_hashed(ce)) {
1f3e55fe 697 __mb_cache_entry_release(ce);
1da177e4
LT
698 return NULL;
699 }
1f3e55fe 700 return ce;
1da177e4
LT
701 }
702 }
1f3e55fe
M
703 hlist_bl_unlock(block_hash_p);
704 return NULL;
1da177e4
LT
705}
706
707#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
708
709static struct mb_cache_entry *
3e037e52 710__mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
2aec7c52 711 struct block_device *bdev, unsigned int key)
1da177e4 712{
1f3e55fe
M
713
714 /* The index hash chain is alredy acquire by caller. */
3e037e52 715 while (l != NULL) {
1da177e4 716 struct mb_cache_entry *ce =
3e037e52
M
717 hlist_bl_entry(l, struct mb_cache_entry,
718 e_index.o_list);
719 mb_assert(ce->e_index_hash_p == head);
2aec7c52 720 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
1f3e55fe
M
721 /*
722 * Prevent a free from removing the entry.
723 */
724 atomic_inc(&ce->e_refcnt);
725 hlist_bl_unlock(head);
726 __spin_lock_mb_cache_entry(ce);
727 atomic_dec(&ce->e_refcnt);
728 ce->e_used++;
1da177e4
LT
729 /* Incrementing before holding the lock gives readers
730 priority over writers. */
1f3e55fe
M
731 if (ce->e_used >= MB_CACHE_WRITER) {
732 DEFINE_WAIT(wait);
733
734 while (ce->e_used >= MB_CACHE_WRITER) {
735 ce->e_queued++;
736 prepare_to_wait(&mb_cache_queue, &wait,
737 TASK_UNINTERRUPTIBLE);
738 __spin_unlock_mb_cache_entry(ce);
739 schedule();
740 __spin_lock_mb_cache_entry(ce);
741 ce->e_queued--;
742 }
743 finish_wait(&mb_cache_queue, &wait);
744 }
745 __spin_unlock_mb_cache_entry(ce);
746 if (!list_empty(&ce->e_lru_list)) {
1da177e4 747 spin_lock(&mb_cache_spinlock);
1f3e55fe
M
748 list_del_init(&ce->e_lru_list);
749 spin_unlock(&mb_cache_spinlock);
1da177e4 750 }
3e037e52 751 if (!__mb_cache_entry_is_block_hashed(ce)) {
1f3e55fe 752 __mb_cache_entry_release(ce);
1da177e4
LT
753 return ERR_PTR(-EAGAIN);
754 }
755 return ce;
756 }
757 l = l->next;
758 }
1f3e55fe 759 hlist_bl_unlock(head);
1da177e4
LT
760 return NULL;
761}
762
763
764/*
765 * mb_cache_entry_find_first()
766 *
767 * Find the first cache entry on a given device with a certain key in
25985edc 768 * an additional index. Additional matches can be found with
1da177e4
LT
769 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
770 * returned cache entry is locked for shared access ("multiple readers").
771 *
772 * @cache: the cache to search
1da177e4
LT
773 * @bdev: the device the cache entry should belong to
774 * @key: the key in the index
775 */
776struct mb_cache_entry *
2aec7c52
AG
777mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
778 unsigned int key)
1da177e4
LT
779{
780 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
3e037e52
M
781 struct hlist_bl_node *l;
782 struct mb_cache_entry *ce = NULL;
783 struct hlist_bl_head *index_hash_p;
1da177e4 784
3e037e52 785 index_hash_p = &cache->c_index_hash[bucket];
1f3e55fe 786 hlist_bl_lock(index_hash_p);
3e037e52
M
787 if (!hlist_bl_empty(index_hash_p)) {
788 l = hlist_bl_first(index_hash_p);
789 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
1f3e55fe
M
790 } else
791 hlist_bl_unlock(index_hash_p);
1da177e4
LT
792 return ce;
793}
794
795
796/*
797 * mb_cache_entry_find_next()
798 *
799 * Find the next cache entry on a given device with a certain key in an
800 * additional index. Returns NULL if no match could be found. The previous
801 * entry is atomatically released, so that mb_cache_entry_find_next() can
802 * be called like this:
803 *
804 * entry = mb_cache_entry_find_first();
805 * while (entry) {
806 * ...
807 * entry = mb_cache_entry_find_next(entry, ...);
808 * }
809 *
810 * @prev: The previous match
1da177e4
LT
811 * @bdev: the device the cache entry should belong to
812 * @key: the key in the index
813 */
814struct mb_cache_entry *
2aec7c52 815mb_cache_entry_find_next(struct mb_cache_entry *prev,
1da177e4
LT
816 struct block_device *bdev, unsigned int key)
817{
818 struct mb_cache *cache = prev->e_cache;
819 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
3e037e52 820 struct hlist_bl_node *l;
1da177e4 821 struct mb_cache_entry *ce;
3e037e52 822 struct hlist_bl_head *index_hash_p;
1da177e4 823
3e037e52
M
824 index_hash_p = &cache->c_index_hash[bucket];
825 mb_assert(prev->e_index_hash_p == index_hash_p);
1f3e55fe 826 hlist_bl_lock(index_hash_p);
3e037e52 827 mb_assert(!hlist_bl_empty(index_hash_p));
2aec7c52 828 l = prev->e_index.o_list.next;
3e037e52 829 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
1f3e55fe 830 __mb_cache_entry_release(prev);
1da177e4
LT
831 return ce;
832}
833
834#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
835
836static int __init init_mbcache(void)
837{
8e1f936b 838 register_shrinker(&mb_cache_shrinker);
1da177e4
LT
839 return 0;
840}
841
842static void __exit exit_mbcache(void)
843{
8e1f936b 844 unregister_shrinker(&mb_cache_shrinker);
1da177e4
LT
845}
846
847module_init(init_mbcache)
848module_exit(exit_mbcache)
849
This page took 0.730632 seconds and 5 git commands to generate.