2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
18 #define DM_MSG_PREFIX "bufio"
21 * Memory management policy:
22 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28 #define DM_BUFIO_MIN_BUFFERS 8
30 #define DM_BUFIO_MEMORY_PERCENT 2
31 #define DM_BUFIO_VMALLOC_PERCENT 25
32 #define DM_BUFIO_WRITEBACK_PERCENT 75
35 * Check buffer ages in this interval (seconds)
37 #define DM_BUFIO_WORK_TIMER_SECS 10
40 * Free buffers when they are older than this (seconds)
42 #define DM_BUFIO_DEFAULT_AGE_SECS 60
45 * The number of bvec entries that are embedded directly in the buffer.
46 * If the chunk size is larger, dm-io is used to do the io.
48 #define DM_BUFIO_INLINE_VECS 16
53 #define DM_BUFIO_HASH_BITS 20
54 #define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
66 * dm_buffer->list_mode
74 * All buffers are linked to cache_hash with their hash_list field.
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
87 struct dm_bufio_client
{
90 struct list_head lru
[LIST_SIZE
];
91 unsigned long n_buffers
[LIST_SIZE
];
93 struct block_device
*bdev
;
95 unsigned char sectors_per_block_bits
;
96 unsigned char pages_per_block_bits
;
97 unsigned char blocks_per_page_bits
;
99 void (*alloc_callback
)(struct dm_buffer
*);
100 void (*write_callback
)(struct dm_buffer
*);
102 struct dm_io_client
*dm_io
;
104 struct list_head reserved_buffers
;
105 unsigned need_reserved_buffers
;
107 struct hlist_head
*cache_hash
;
108 wait_queue_head_t free_buffer_wait
;
110 int async_write_error
;
112 struct list_head client_list
;
113 struct shrinker shrinker
;
124 * Describes how the block was allocated:
125 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
126 * See the comment at alloc_buffer_data.
130 DATA_MODE_GET_FREE_PAGES
= 1,
131 DATA_MODE_VMALLOC
= 2,
136 struct hlist_node hash_list
;
137 struct list_head lru_list
;
140 enum data_mode data_mode
;
141 unsigned char list_mode
; /* LIST_* */
146 unsigned long last_accessed
;
147 struct dm_bufio_client
*c
;
149 struct bio_vec bio_vec
[DM_BUFIO_INLINE_VECS
];
152 /*----------------------------------------------------------------*/
154 static struct kmem_cache
*dm_bufio_caches
[PAGE_SHIFT
- SECTOR_SHIFT
];
155 static char *dm_bufio_cache_names
[PAGE_SHIFT
- SECTOR_SHIFT
];
157 static inline int dm_bufio_cache_index(struct dm_bufio_client
*c
)
159 unsigned ret
= c
->blocks_per_page_bits
- 1;
161 BUG_ON(ret
>= ARRAY_SIZE(dm_bufio_caches
));
166 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
167 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
169 #define dm_bufio_in_request() (!!current->bio_list)
171 static void dm_bufio_lock(struct dm_bufio_client
*c
)
173 mutex_lock_nested(&c
->lock
, dm_bufio_in_request());
176 static int dm_bufio_trylock(struct dm_bufio_client
*c
)
178 return mutex_trylock(&c
->lock
);
181 static void dm_bufio_unlock(struct dm_bufio_client
*c
)
183 mutex_unlock(&c
->lock
);
187 * FIXME Move to sched.h?
189 #ifdef CONFIG_PREEMPT_VOLUNTARY
190 # define dm_bufio_cond_resched() \
192 if (unlikely(need_resched())) \
196 # define dm_bufio_cond_resched() do { } while (0)
199 /*----------------------------------------------------------------*/
202 * Default cache size: available memory divided by the ratio.
204 static unsigned long dm_bufio_default_cache_size
;
207 * Total cache size set by the user.
209 static unsigned long dm_bufio_cache_size
;
212 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
213 * at any time. If it disagrees, the user has changed cache size.
215 static unsigned long dm_bufio_cache_size_latch
;
217 static DEFINE_SPINLOCK(param_spinlock
);
220 * Buffers are freed after this timeout
222 static unsigned dm_bufio_max_age
= DM_BUFIO_DEFAULT_AGE_SECS
;
224 static unsigned long dm_bufio_peak_allocated
;
225 static unsigned long dm_bufio_allocated_kmem_cache
;
226 static unsigned long dm_bufio_allocated_get_free_pages
;
227 static unsigned long dm_bufio_allocated_vmalloc
;
228 static unsigned long dm_bufio_current_allocated
;
230 /*----------------------------------------------------------------*/
233 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
235 static unsigned long dm_bufio_cache_size_per_client
;
238 * The current number of clients.
240 static int dm_bufio_client_count
;
243 * The list of all clients.
245 static LIST_HEAD(dm_bufio_all_clients
);
248 * This mutex protects dm_bufio_cache_size_latch,
249 * dm_bufio_cache_size_per_client and dm_bufio_client_count
251 static DEFINE_MUTEX(dm_bufio_clients_lock
);
253 /*----------------------------------------------------------------*/
255 static void adjust_total_allocated(enum data_mode data_mode
, long diff
)
257 static unsigned long * const class_ptr
[DATA_MODE_LIMIT
] = {
258 &dm_bufio_allocated_kmem_cache
,
259 &dm_bufio_allocated_get_free_pages
,
260 &dm_bufio_allocated_vmalloc
,
263 spin_lock(¶m_spinlock
);
265 *class_ptr
[data_mode
] += diff
;
267 dm_bufio_current_allocated
+= diff
;
269 if (dm_bufio_current_allocated
> dm_bufio_peak_allocated
)
270 dm_bufio_peak_allocated
= dm_bufio_current_allocated
;
272 spin_unlock(¶m_spinlock
);
276 * Change the number of clients and recalculate per-client limit.
278 static void __cache_size_refresh(void)
280 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock
));
281 BUG_ON(dm_bufio_client_count
< 0);
283 dm_bufio_cache_size_latch
= ACCESS_ONCE(dm_bufio_cache_size
);
286 * Use default if set to 0 and report the actual cache size used.
288 if (!dm_bufio_cache_size_latch
) {
289 (void)cmpxchg(&dm_bufio_cache_size
, 0,
290 dm_bufio_default_cache_size
);
291 dm_bufio_cache_size_latch
= dm_bufio_default_cache_size
;
294 dm_bufio_cache_size_per_client
= dm_bufio_cache_size_latch
/
295 (dm_bufio_client_count
? : 1);
299 * Allocating buffer data.
301 * Small buffers are allocated with kmem_cache, to use space optimally.
303 * For large buffers, we choose between get_free_pages and vmalloc.
304 * Each has advantages and disadvantages.
306 * __get_free_pages can randomly fail if the memory is fragmented.
307 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
308 * as low as 128M) so using it for caching is not appropriate.
310 * If the allocation may fail we use __get_free_pages. Memory fragmentation
311 * won't have a fatal effect here, but it just causes flushes of some other
312 * buffers and more I/O will be performed. Don't use __get_free_pages if it
313 * always fails (i.e. order >= MAX_ORDER).
315 * If the allocation shouldn't fail we use __vmalloc. This is only for the
316 * initial reserve allocation, so there's no risk of wasting all vmalloc
319 static void *alloc_buffer_data(struct dm_bufio_client
*c
, gfp_t gfp_mask
,
320 enum data_mode
*data_mode
)
322 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT
) {
323 *data_mode
= DATA_MODE_SLAB
;
324 return kmem_cache_alloc(DM_BUFIO_CACHE(c
), gfp_mask
);
327 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT
&&
328 gfp_mask
& __GFP_NORETRY
) {
329 *data_mode
= DATA_MODE_GET_FREE_PAGES
;
330 return (void *)__get_free_pages(gfp_mask
,
331 c
->pages_per_block_bits
);
334 *data_mode
= DATA_MODE_VMALLOC
;
335 return __vmalloc(c
->block_size
, gfp_mask
, PAGE_KERNEL
);
339 * Free buffer's data.
341 static void free_buffer_data(struct dm_bufio_client
*c
,
342 void *data
, enum data_mode data_mode
)
346 kmem_cache_free(DM_BUFIO_CACHE(c
), data
);
349 case DATA_MODE_GET_FREE_PAGES
:
350 free_pages((unsigned long)data
, c
->pages_per_block_bits
);
353 case DATA_MODE_VMALLOC
:
358 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
365 * Allocate buffer and its data.
367 static struct dm_buffer
*alloc_buffer(struct dm_bufio_client
*c
, gfp_t gfp_mask
)
369 struct dm_buffer
*b
= kmalloc(sizeof(struct dm_buffer
) + c
->aux_size
,
377 b
->data
= alloc_buffer_data(c
, gfp_mask
, &b
->data_mode
);
383 adjust_total_allocated(b
->data_mode
, (long)c
->block_size
);
389 * Free buffer and its data.
391 static void free_buffer(struct dm_buffer
*b
)
393 struct dm_bufio_client
*c
= b
->c
;
395 adjust_total_allocated(b
->data_mode
, -(long)c
->block_size
);
397 free_buffer_data(c
, b
->data
, b
->data_mode
);
402 * Link buffer to the hash list and clean or dirty queue.
404 static void __link_buffer(struct dm_buffer
*b
, sector_t block
, int dirty
)
406 struct dm_bufio_client
*c
= b
->c
;
408 c
->n_buffers
[dirty
]++;
410 b
->list_mode
= dirty
;
411 list_add(&b
->lru_list
, &c
->lru
[dirty
]);
412 hlist_add_head(&b
->hash_list
, &c
->cache_hash
[DM_BUFIO_HASH(block
)]);
413 b
->last_accessed
= jiffies
;
417 * Unlink buffer from the hash list and dirty or clean queue.
419 static void __unlink_buffer(struct dm_buffer
*b
)
421 struct dm_bufio_client
*c
= b
->c
;
423 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
425 c
->n_buffers
[b
->list_mode
]--;
426 hlist_del(&b
->hash_list
);
427 list_del(&b
->lru_list
);
431 * Place the buffer to the head of dirty or clean LRU queue.
433 static void __relink_lru(struct dm_buffer
*b
, int dirty
)
435 struct dm_bufio_client
*c
= b
->c
;
437 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
439 c
->n_buffers
[b
->list_mode
]--;
440 c
->n_buffers
[dirty
]++;
441 b
->list_mode
= dirty
;
442 list_move(&b
->lru_list
, &c
->lru
[dirty
]);
445 /*----------------------------------------------------------------
446 * Submit I/O on the buffer.
448 * Bio interface is faster but it has some problems:
449 * the vector list is limited (increasing this limit increases
450 * memory-consumption per buffer, so it is not viable);
452 * the memory must be direct-mapped, not vmalloced;
454 * the I/O driver can reject requests spuriously if it thinks that
455 * the requests are too big for the device or if they cross a
456 * controller-defined memory boundary.
458 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
459 * it is not vmalloced, try using the bio interface.
461 * If the buffer is big, if it is vmalloced or if the underlying device
462 * rejects the bio because it is too large, use dm-io layer to do the I/O.
463 * The dm-io layer splits the I/O into multiple requests, avoiding the above
465 *--------------------------------------------------------------*/
468 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
469 * that the request was handled directly with bio interface.
471 static void dmio_complete(unsigned long error
, void *context
)
473 struct dm_buffer
*b
= context
;
475 b
->bio
.bi_end_io(&b
->bio
, error
? -EIO
: 0);
478 static void use_dmio(struct dm_buffer
*b
, int rw
, sector_t block
,
479 bio_end_io_t
*end_io
)
482 struct dm_io_request io_req
= {
484 .notify
.fn
= dmio_complete
,
486 .client
= b
->c
->dm_io
,
488 struct dm_io_region region
= {
490 .sector
= block
<< b
->c
->sectors_per_block_bits
,
491 .count
= b
->c
->block_size
>> SECTOR_SHIFT
,
494 if (b
->data_mode
!= DATA_MODE_VMALLOC
) {
495 io_req
.mem
.type
= DM_IO_KMEM
;
496 io_req
.mem
.ptr
.addr
= b
->data
;
498 io_req
.mem
.type
= DM_IO_VMA
;
499 io_req
.mem
.ptr
.vma
= b
->data
;
502 b
->bio
.bi_end_io
= end_io
;
504 r
= dm_io(&io_req
, 1, ®ion
, NULL
);
509 static void use_inline_bio(struct dm_buffer
*b
, int rw
, sector_t block
,
510 bio_end_io_t
*end_io
)
516 b
->bio
.bi_io_vec
= b
->bio_vec
;
517 b
->bio
.bi_max_vecs
= DM_BUFIO_INLINE_VECS
;
518 b
->bio
.bi_sector
= block
<< b
->c
->sectors_per_block_bits
;
519 b
->bio
.bi_bdev
= b
->c
->bdev
;
520 b
->bio
.bi_end_io
= end_io
;
523 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
524 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
527 len
= b
->c
->block_size
;
529 if (len
>= PAGE_SIZE
)
530 BUG_ON((unsigned long)ptr
& (PAGE_SIZE
- 1));
532 BUG_ON((unsigned long)ptr
& (len
- 1));
535 if (!bio_add_page(&b
->bio
, virt_to_page(ptr
),
536 len
< PAGE_SIZE
? len
: PAGE_SIZE
,
537 virt_to_phys(ptr
) & (PAGE_SIZE
- 1))) {
538 BUG_ON(b
->c
->block_size
<= PAGE_SIZE
);
539 use_dmio(b
, rw
, block
, end_io
);
547 submit_bio(rw
, &b
->bio
);
550 static void submit_io(struct dm_buffer
*b
, int rw
, sector_t block
,
551 bio_end_io_t
*end_io
)
553 if (rw
== WRITE
&& b
->c
->write_callback
)
554 b
->c
->write_callback(b
);
556 if (b
->c
->block_size
<= DM_BUFIO_INLINE_VECS
* PAGE_SIZE
&&
557 b
->data_mode
!= DATA_MODE_VMALLOC
)
558 use_inline_bio(b
, rw
, block
, end_io
);
560 use_dmio(b
, rw
, block
, end_io
);
563 /*----------------------------------------------------------------
564 * Writing dirty buffers
565 *--------------------------------------------------------------*/
568 * The endio routine for write.
570 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
573 static void write_endio(struct bio
*bio
, int error
)
575 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
577 b
->write_error
= error
;
578 if (unlikely(error
)) {
579 struct dm_bufio_client
*c
= b
->c
;
580 (void)cmpxchg(&c
->async_write_error
, 0, error
);
583 BUG_ON(!test_bit(B_WRITING
, &b
->state
));
585 smp_mb__before_clear_bit();
586 clear_bit(B_WRITING
, &b
->state
);
587 smp_mb__after_clear_bit();
589 wake_up_bit(&b
->state
, B_WRITING
);
593 * This function is called when wait_on_bit is actually waiting.
595 static int do_io_schedule(void *word
)
603 * Initiate a write on a dirty buffer, but don't wait for it.
605 * - If the buffer is not dirty, exit.
606 * - If there some previous write going on, wait for it to finish (we can't
607 * have two writes on the same buffer simultaneously).
608 * - Submit our write and don't wait on it. We set B_WRITING indicating
609 * that there is a write in progress.
611 static void __write_dirty_buffer(struct dm_buffer
*b
)
613 if (!test_bit(B_DIRTY
, &b
->state
))
616 clear_bit(B_DIRTY
, &b
->state
);
617 wait_on_bit_lock(&b
->state
, B_WRITING
,
618 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
620 submit_io(b
, WRITE
, b
->block
, write_endio
);
624 * Wait until any activity on the buffer finishes. Possibly write the
625 * buffer if it is dirty. When this function finishes, there is no I/O
626 * running on the buffer and the buffer is not dirty.
628 static void __make_buffer_clean(struct dm_buffer
*b
)
630 BUG_ON(b
->hold_count
);
632 if (!b
->state
) /* fast case */
635 wait_on_bit(&b
->state
, B_READING
, do_io_schedule
, TASK_UNINTERRUPTIBLE
);
636 __write_dirty_buffer(b
);
637 wait_on_bit(&b
->state
, B_WRITING
, do_io_schedule
, TASK_UNINTERRUPTIBLE
);
641 * Find some buffer that is not held by anybody, clean it, unlink it and
644 static struct dm_buffer
*__get_unclaimed_buffer(struct dm_bufio_client
*c
)
648 list_for_each_entry_reverse(b
, &c
->lru
[LIST_CLEAN
], lru_list
) {
649 BUG_ON(test_bit(B_WRITING
, &b
->state
));
650 BUG_ON(test_bit(B_DIRTY
, &b
->state
));
652 if (!b
->hold_count
) {
653 __make_buffer_clean(b
);
657 dm_bufio_cond_resched();
660 list_for_each_entry_reverse(b
, &c
->lru
[LIST_DIRTY
], lru_list
) {
661 BUG_ON(test_bit(B_READING
, &b
->state
));
663 if (!b
->hold_count
) {
664 __make_buffer_clean(b
);
668 dm_bufio_cond_resched();
675 * Wait until some other threads free some buffer or release hold count on
678 * This function is entered with c->lock held, drops it and regains it
681 static void __wait_for_free_buffer(struct dm_bufio_client
*c
)
683 DECLARE_WAITQUEUE(wait
, current
);
685 add_wait_queue(&c
->free_buffer_wait
, &wait
);
686 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
691 set_task_state(current
, TASK_RUNNING
);
692 remove_wait_queue(&c
->free_buffer_wait
, &wait
);
705 * Allocate a new buffer. If the allocation is not possible, wait until
706 * some other thread frees a buffer.
708 * May drop the lock and regain it.
710 static struct dm_buffer
*__alloc_buffer_wait_no_callback(struct dm_bufio_client
*c
, enum new_flag nf
)
715 * dm-bufio is resistant to allocation failures (it just keeps
716 * one buffer reserved in cases all the allocations fail).
717 * So set flags to not try too hard:
718 * GFP_NOIO: don't recurse into the I/O layer
719 * __GFP_NORETRY: don't retry and rather return failure
720 * __GFP_NOMEMALLOC: don't use emergency reserves
721 * __GFP_NOWARN: don't print a warning in case of failure
723 * For debugging, if we set the cache size to 1, no new buffers will
727 if (dm_bufio_cache_size_latch
!= 1) {
728 b
= alloc_buffer(c
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
733 if (nf
== NF_PREFETCH
)
736 if (!list_empty(&c
->reserved_buffers
)) {
737 b
= list_entry(c
->reserved_buffers
.next
,
738 struct dm_buffer
, lru_list
);
739 list_del(&b
->lru_list
);
740 c
->need_reserved_buffers
++;
745 b
= __get_unclaimed_buffer(c
);
749 __wait_for_free_buffer(c
);
753 static struct dm_buffer
*__alloc_buffer_wait(struct dm_bufio_client
*c
, enum new_flag nf
)
755 struct dm_buffer
*b
= __alloc_buffer_wait_no_callback(c
, nf
);
760 if (c
->alloc_callback
)
761 c
->alloc_callback(b
);
767 * Free a buffer and wake other threads waiting for free buffers.
769 static void __free_buffer_wake(struct dm_buffer
*b
)
771 struct dm_bufio_client
*c
= b
->c
;
773 if (!c
->need_reserved_buffers
)
776 list_add(&b
->lru_list
, &c
->reserved_buffers
);
777 c
->need_reserved_buffers
--;
780 wake_up(&c
->free_buffer_wait
);
783 static void __write_dirty_buffers_async(struct dm_bufio_client
*c
, int no_wait
)
785 struct dm_buffer
*b
, *tmp
;
787 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
788 BUG_ON(test_bit(B_READING
, &b
->state
));
790 if (!test_bit(B_DIRTY
, &b
->state
) &&
791 !test_bit(B_WRITING
, &b
->state
)) {
792 __relink_lru(b
, LIST_CLEAN
);
796 if (no_wait
&& test_bit(B_WRITING
, &b
->state
))
799 __write_dirty_buffer(b
);
800 dm_bufio_cond_resched();
805 * Get writeback threshold and buffer limit for a given client.
807 static void __get_memory_limit(struct dm_bufio_client
*c
,
808 unsigned long *threshold_buffers
,
809 unsigned long *limit_buffers
)
811 unsigned long buffers
;
813 if (ACCESS_ONCE(dm_bufio_cache_size
) != dm_bufio_cache_size_latch
) {
814 mutex_lock(&dm_bufio_clients_lock
);
815 __cache_size_refresh();
816 mutex_unlock(&dm_bufio_clients_lock
);
819 buffers
= dm_bufio_cache_size_per_client
>>
820 (c
->sectors_per_block_bits
+ SECTOR_SHIFT
);
822 if (buffers
< DM_BUFIO_MIN_BUFFERS
)
823 buffers
= DM_BUFIO_MIN_BUFFERS
;
825 *limit_buffers
= buffers
;
826 *threshold_buffers
= buffers
* DM_BUFIO_WRITEBACK_PERCENT
/ 100;
830 * Check if we're over watermark.
831 * If we are over threshold_buffers, start freeing buffers.
832 * If we're over "limit_buffers", block until we get under the limit.
834 static void __check_watermark(struct dm_bufio_client
*c
)
836 unsigned long threshold_buffers
, limit_buffers
;
838 __get_memory_limit(c
, &threshold_buffers
, &limit_buffers
);
840 while (c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
] >
843 struct dm_buffer
*b
= __get_unclaimed_buffer(c
);
848 __free_buffer_wake(b
);
849 dm_bufio_cond_resched();
852 if (c
->n_buffers
[LIST_DIRTY
] > threshold_buffers
)
853 __write_dirty_buffers_async(c
, 1);
857 * Find a buffer in the hash.
859 static struct dm_buffer
*__find(struct dm_bufio_client
*c
, sector_t block
)
863 hlist_for_each_entry(b
, &c
->cache_hash
[DM_BUFIO_HASH(block
)],
865 dm_bufio_cond_resched();
866 if (b
->block
== block
)
873 /*----------------------------------------------------------------
875 *--------------------------------------------------------------*/
877 static struct dm_buffer
*__bufio_new(struct dm_bufio_client
*c
, sector_t block
,
878 enum new_flag nf
, int *need_submit
)
880 struct dm_buffer
*b
, *new_b
= NULL
;
884 b
= __find(c
, block
);
891 new_b
= __alloc_buffer_wait(c
, nf
);
896 * We've had a period where the mutex was unlocked, so need to
897 * recheck the hash table.
899 b
= __find(c
, block
);
901 __free_buffer_wake(new_b
);
905 __check_watermark(c
);
911 __link_buffer(b
, block
, LIST_CLEAN
);
913 if (nf
== NF_FRESH
) {
918 b
->state
= 1 << B_READING
;
924 if (nf
== NF_PREFETCH
)
927 * Note: it is essential that we don't wait for the buffer to be
928 * read if dm_bufio_get function is used. Both dm_bufio_get and
929 * dm_bufio_prefetch can be used in the driver request routine.
930 * If the user called both dm_bufio_prefetch and dm_bufio_get on
931 * the same buffer, it would deadlock if we waited.
933 if (nf
== NF_GET
&& unlikely(test_bit(B_READING
, &b
->state
)))
937 __relink_lru(b
, test_bit(B_DIRTY
, &b
->state
) ||
938 test_bit(B_WRITING
, &b
->state
));
943 * The endio routine for reading: set the error, clear the bit and wake up
944 * anyone waiting on the buffer.
946 static void read_endio(struct bio
*bio
, int error
)
948 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
950 b
->read_error
= error
;
952 BUG_ON(!test_bit(B_READING
, &b
->state
));
954 smp_mb__before_clear_bit();
955 clear_bit(B_READING
, &b
->state
);
956 smp_mb__after_clear_bit();
958 wake_up_bit(&b
->state
, B_READING
);
962 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
963 * functions is similar except that dm_bufio_new doesn't read the
964 * buffer from the disk (assuming that the caller overwrites all the data
965 * and uses dm_bufio_mark_buffer_dirty to write new data back).
967 static void *new_read(struct dm_bufio_client
*c
, sector_t block
,
968 enum new_flag nf
, struct dm_buffer
**bp
)
974 b
= __bufio_new(c
, block
, nf
, &need_submit
);
981 submit_io(b
, READ
, b
->block
, read_endio
);
983 wait_on_bit(&b
->state
, B_READING
, do_io_schedule
, TASK_UNINTERRUPTIBLE
);
986 int error
= b
->read_error
;
990 return ERR_PTR(error
);
998 void *dm_bufio_get(struct dm_bufio_client
*c
, sector_t block
,
999 struct dm_buffer
**bp
)
1001 return new_read(c
, block
, NF_GET
, bp
);
1003 EXPORT_SYMBOL_GPL(dm_bufio_get
);
1005 void *dm_bufio_read(struct dm_bufio_client
*c
, sector_t block
,
1006 struct dm_buffer
**bp
)
1008 BUG_ON(dm_bufio_in_request());
1010 return new_read(c
, block
, NF_READ
, bp
);
1012 EXPORT_SYMBOL_GPL(dm_bufio_read
);
1014 void *dm_bufio_new(struct dm_bufio_client
*c
, sector_t block
,
1015 struct dm_buffer
**bp
)
1017 BUG_ON(dm_bufio_in_request());
1019 return new_read(c
, block
, NF_FRESH
, bp
);
1021 EXPORT_SYMBOL_GPL(dm_bufio_new
);
1023 void dm_bufio_prefetch(struct dm_bufio_client
*c
,
1024 sector_t block
, unsigned n_blocks
)
1026 struct blk_plug plug
;
1028 BUG_ON(dm_bufio_in_request());
1030 blk_start_plug(&plug
);
1033 for (; n_blocks
--; block
++) {
1035 struct dm_buffer
*b
;
1036 b
= __bufio_new(c
, block
, NF_PREFETCH
, &need_submit
);
1037 if (unlikely(b
!= NULL
)) {
1041 submit_io(b
, READ
, b
->block
, read_endio
);
1042 dm_bufio_release(b
);
1044 dm_bufio_cond_resched();
1056 blk_finish_plug(&plug
);
1058 EXPORT_SYMBOL_GPL(dm_bufio_prefetch
);
1060 void dm_bufio_release(struct dm_buffer
*b
)
1062 struct dm_bufio_client
*c
= b
->c
;
1066 BUG_ON(!b
->hold_count
);
1069 if (!b
->hold_count
) {
1070 wake_up(&c
->free_buffer_wait
);
1073 * If there were errors on the buffer, and the buffer is not
1074 * to be written, free the buffer. There is no point in caching
1077 if ((b
->read_error
|| b
->write_error
) &&
1078 !test_bit(B_READING
, &b
->state
) &&
1079 !test_bit(B_WRITING
, &b
->state
) &&
1080 !test_bit(B_DIRTY
, &b
->state
)) {
1082 __free_buffer_wake(b
);
1088 EXPORT_SYMBOL_GPL(dm_bufio_release
);
1090 void dm_bufio_mark_buffer_dirty(struct dm_buffer
*b
)
1092 struct dm_bufio_client
*c
= b
->c
;
1096 BUG_ON(test_bit(B_READING
, &b
->state
));
1098 if (!test_and_set_bit(B_DIRTY
, &b
->state
))
1099 __relink_lru(b
, LIST_DIRTY
);
1103 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty
);
1105 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client
*c
)
1107 BUG_ON(dm_bufio_in_request());
1110 __write_dirty_buffers_async(c
, 0);
1113 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async
);
1116 * For performance, it is essential that the buffers are written asynchronously
1117 * and simultaneously (so that the block layer can merge the writes) and then
1120 * Finally, we flush hardware disk cache.
1122 int dm_bufio_write_dirty_buffers(struct dm_bufio_client
*c
)
1125 unsigned long buffers_processed
= 0;
1126 struct dm_buffer
*b
, *tmp
;
1129 __write_dirty_buffers_async(c
, 0);
1132 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
1133 int dropped_lock
= 0;
1135 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
])
1136 buffers_processed
++;
1138 BUG_ON(test_bit(B_READING
, &b
->state
));
1140 if (test_bit(B_WRITING
, &b
->state
)) {
1141 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
]) {
1145 wait_on_bit(&b
->state
, B_WRITING
,
1147 TASK_UNINTERRUPTIBLE
);
1151 wait_on_bit(&b
->state
, B_WRITING
,
1153 TASK_UNINTERRUPTIBLE
);
1156 if (!test_bit(B_DIRTY
, &b
->state
) &&
1157 !test_bit(B_WRITING
, &b
->state
))
1158 __relink_lru(b
, LIST_CLEAN
);
1160 dm_bufio_cond_resched();
1163 * If we dropped the lock, the list is no longer consistent,
1164 * so we must restart the search.
1166 * In the most common case, the buffer just processed is
1167 * relinked to the clean list, so we won't loop scanning the
1168 * same buffer again and again.
1170 * This may livelock if there is another thread simultaneously
1171 * dirtying buffers, so we count the number of buffers walked
1172 * and if it exceeds the total number of buffers, it means that
1173 * someone is doing some writes simultaneously with us. In
1174 * this case, stop, dropping the lock.
1179 wake_up(&c
->free_buffer_wait
);
1182 a
= xchg(&c
->async_write_error
, 0);
1183 f
= dm_bufio_issue_flush(c
);
1189 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers
);
1192 * Use dm-io to send and empty barrier flush the device.
1194 int dm_bufio_issue_flush(struct dm_bufio_client
*c
)
1196 struct dm_io_request io_req
= {
1197 .bi_rw
= WRITE_FLUSH
,
1198 .mem
.type
= DM_IO_KMEM
,
1199 .mem
.ptr
.addr
= NULL
,
1202 struct dm_io_region io_reg
= {
1208 BUG_ON(dm_bufio_in_request());
1210 return dm_io(&io_req
, 1, &io_reg
, NULL
);
1212 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush
);
1215 * We first delete any other buffer that may be at that new location.
1217 * Then, we write the buffer to the original location if it was dirty.
1219 * Then, if we are the only one who is holding the buffer, relink the buffer
1220 * in the hash queue for the new location.
1222 * If there was someone else holding the buffer, we write it to the new
1223 * location but not relink it, because that other user needs to have the buffer
1224 * at the same place.
1226 void dm_bufio_release_move(struct dm_buffer
*b
, sector_t new_block
)
1228 struct dm_bufio_client
*c
= b
->c
;
1229 struct dm_buffer
*new;
1231 BUG_ON(dm_bufio_in_request());
1236 new = __find(c
, new_block
);
1238 if (new->hold_count
) {
1239 __wait_for_free_buffer(c
);
1244 * FIXME: Is there any point waiting for a write that's going
1245 * to be overwritten in a bit?
1247 __make_buffer_clean(new);
1248 __unlink_buffer(new);
1249 __free_buffer_wake(new);
1252 BUG_ON(!b
->hold_count
);
1253 BUG_ON(test_bit(B_READING
, &b
->state
));
1255 __write_dirty_buffer(b
);
1256 if (b
->hold_count
== 1) {
1257 wait_on_bit(&b
->state
, B_WRITING
,
1258 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
1259 set_bit(B_DIRTY
, &b
->state
);
1261 __link_buffer(b
, new_block
, LIST_DIRTY
);
1264 wait_on_bit_lock(&b
->state
, B_WRITING
,
1265 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
1267 * Relink buffer to "new_block" so that write_callback
1268 * sees "new_block" as a block number.
1269 * After the write, link the buffer back to old_block.
1270 * All this must be done in bufio lock, so that block number
1271 * change isn't visible to other threads.
1273 old_block
= b
->block
;
1275 __link_buffer(b
, new_block
, b
->list_mode
);
1276 submit_io(b
, WRITE
, new_block
, write_endio
);
1277 wait_on_bit(&b
->state
, B_WRITING
,
1278 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
1280 __link_buffer(b
, old_block
, b
->list_mode
);
1284 dm_bufio_release(b
);
1286 EXPORT_SYMBOL_GPL(dm_bufio_release_move
);
1288 unsigned dm_bufio_get_block_size(struct dm_bufio_client
*c
)
1290 return c
->block_size
;
1292 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size
);
1294 sector_t
dm_bufio_get_device_size(struct dm_bufio_client
*c
)
1296 return i_size_read(c
->bdev
->bd_inode
) >>
1297 (SECTOR_SHIFT
+ c
->sectors_per_block_bits
);
1299 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size
);
1301 sector_t
dm_bufio_get_block_number(struct dm_buffer
*b
)
1305 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number
);
1307 void *dm_bufio_get_block_data(struct dm_buffer
*b
)
1311 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data
);
1313 void *dm_bufio_get_aux_data(struct dm_buffer
*b
)
1317 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data
);
1319 struct dm_bufio_client
*dm_bufio_get_client(struct dm_buffer
*b
)
1323 EXPORT_SYMBOL_GPL(dm_bufio_get_client
);
1325 static void drop_buffers(struct dm_bufio_client
*c
)
1327 struct dm_buffer
*b
;
1330 BUG_ON(dm_bufio_in_request());
1333 * An optimization so that the buffers are not written one-by-one.
1335 dm_bufio_write_dirty_buffers_async(c
);
1339 while ((b
= __get_unclaimed_buffer(c
)))
1340 __free_buffer_wake(b
);
1342 for (i
= 0; i
< LIST_SIZE
; i
++)
1343 list_for_each_entry(b
, &c
->lru
[i
], lru_list
)
1344 DMERR("leaked buffer %llx, hold count %u, list %d",
1345 (unsigned long long)b
->block
, b
->hold_count
, i
);
1347 for (i
= 0; i
< LIST_SIZE
; i
++)
1348 BUG_ON(!list_empty(&c
->lru
[i
]));
1354 * Test if the buffer is unused and too old, and commit it.
1355 * At if noio is set, we must not do any I/O because we hold
1356 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1357 * different bufio client.
1359 static int __cleanup_old_buffer(struct dm_buffer
*b
, gfp_t gfp
,
1360 unsigned long max_jiffies
)
1362 if (jiffies
- b
->last_accessed
< max_jiffies
)
1365 if (!(gfp
& __GFP_IO
)) {
1366 if (test_bit(B_READING
, &b
->state
) ||
1367 test_bit(B_WRITING
, &b
->state
) ||
1368 test_bit(B_DIRTY
, &b
->state
))
1375 __make_buffer_clean(b
);
1377 __free_buffer_wake(b
);
1382 static void __scan(struct dm_bufio_client
*c
, unsigned long nr_to_scan
,
1383 struct shrink_control
*sc
)
1386 struct dm_buffer
*b
, *tmp
;
1388 for (l
= 0; l
< LIST_SIZE
; l
++) {
1389 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[l
], lru_list
)
1390 if (!__cleanup_old_buffer(b
, sc
->gfp_mask
, 0) &&
1393 dm_bufio_cond_resched();
1397 static int shrink(struct shrinker
*shrinker
, struct shrink_control
*sc
)
1399 struct dm_bufio_client
*c
=
1400 container_of(shrinker
, struct dm_bufio_client
, shrinker
);
1402 unsigned long nr_to_scan
= sc
->nr_to_scan
;
1404 if (sc
->gfp_mask
& __GFP_IO
)
1406 else if (!dm_bufio_trylock(c
))
1407 return !nr_to_scan
? 0 : -1;
1410 __scan(c
, nr_to_scan
, sc
);
1412 r
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1422 * Create the buffering interface
1424 struct dm_bufio_client
*dm_bufio_client_create(struct block_device
*bdev
, unsigned block_size
,
1425 unsigned reserved_buffers
, unsigned aux_size
,
1426 void (*alloc_callback
)(struct dm_buffer
*),
1427 void (*write_callback
)(struct dm_buffer
*))
1430 struct dm_bufio_client
*c
;
1433 BUG_ON(block_size
< 1 << SECTOR_SHIFT
||
1434 (block_size
& (block_size
- 1)));
1436 c
= kmalloc(sizeof(*c
), GFP_KERNEL
);
1441 c
->cache_hash
= vmalloc(sizeof(struct hlist_head
) << DM_BUFIO_HASH_BITS
);
1442 if (!c
->cache_hash
) {
1448 c
->block_size
= block_size
;
1449 c
->sectors_per_block_bits
= ffs(block_size
) - 1 - SECTOR_SHIFT
;
1450 c
->pages_per_block_bits
= (ffs(block_size
) - 1 >= PAGE_SHIFT
) ?
1451 ffs(block_size
) - 1 - PAGE_SHIFT
: 0;
1452 c
->blocks_per_page_bits
= (ffs(block_size
) - 1 < PAGE_SHIFT
?
1453 PAGE_SHIFT
- (ffs(block_size
) - 1) : 0);
1455 c
->aux_size
= aux_size
;
1456 c
->alloc_callback
= alloc_callback
;
1457 c
->write_callback
= write_callback
;
1459 for (i
= 0; i
< LIST_SIZE
; i
++) {
1460 INIT_LIST_HEAD(&c
->lru
[i
]);
1461 c
->n_buffers
[i
] = 0;
1464 for (i
= 0; i
< 1 << DM_BUFIO_HASH_BITS
; i
++)
1465 INIT_HLIST_HEAD(&c
->cache_hash
[i
]);
1467 mutex_init(&c
->lock
);
1468 INIT_LIST_HEAD(&c
->reserved_buffers
);
1469 c
->need_reserved_buffers
= reserved_buffers
;
1471 init_waitqueue_head(&c
->free_buffer_wait
);
1472 c
->async_write_error
= 0;
1474 c
->dm_io
= dm_io_client_create();
1475 if (IS_ERR(c
->dm_io
)) {
1476 r
= PTR_ERR(c
->dm_io
);
1480 mutex_lock(&dm_bufio_clients_lock
);
1481 if (c
->blocks_per_page_bits
) {
1482 if (!DM_BUFIO_CACHE_NAME(c
)) {
1483 DM_BUFIO_CACHE_NAME(c
) = kasprintf(GFP_KERNEL
, "dm_bufio_cache-%u", c
->block_size
);
1484 if (!DM_BUFIO_CACHE_NAME(c
)) {
1486 mutex_unlock(&dm_bufio_clients_lock
);
1491 if (!DM_BUFIO_CACHE(c
)) {
1492 DM_BUFIO_CACHE(c
) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c
),
1494 c
->block_size
, 0, NULL
);
1495 if (!DM_BUFIO_CACHE(c
)) {
1497 mutex_unlock(&dm_bufio_clients_lock
);
1502 mutex_unlock(&dm_bufio_clients_lock
);
1504 while (c
->need_reserved_buffers
) {
1505 struct dm_buffer
*b
= alloc_buffer(c
, GFP_KERNEL
);
1511 __free_buffer_wake(b
);
1514 mutex_lock(&dm_bufio_clients_lock
);
1515 dm_bufio_client_count
++;
1516 list_add(&c
->client_list
, &dm_bufio_all_clients
);
1517 __cache_size_refresh();
1518 mutex_unlock(&dm_bufio_clients_lock
);
1520 c
->shrinker
.shrink
= shrink
;
1521 c
->shrinker
.seeks
= 1;
1522 c
->shrinker
.batch
= 0;
1523 register_shrinker(&c
->shrinker
);
1529 while (!list_empty(&c
->reserved_buffers
)) {
1530 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1531 struct dm_buffer
, lru_list
);
1532 list_del(&b
->lru_list
);
1535 dm_io_client_destroy(c
->dm_io
);
1537 vfree(c
->cache_hash
);
1543 EXPORT_SYMBOL_GPL(dm_bufio_client_create
);
1546 * Free the buffering interface.
1547 * It is required that there are no references on any buffers.
1549 void dm_bufio_client_destroy(struct dm_bufio_client
*c
)
1555 unregister_shrinker(&c
->shrinker
);
1557 mutex_lock(&dm_bufio_clients_lock
);
1559 list_del(&c
->client_list
);
1560 dm_bufio_client_count
--;
1561 __cache_size_refresh();
1563 mutex_unlock(&dm_bufio_clients_lock
);
1565 for (i
= 0; i
< 1 << DM_BUFIO_HASH_BITS
; i
++)
1566 BUG_ON(!hlist_empty(&c
->cache_hash
[i
]));
1568 BUG_ON(c
->need_reserved_buffers
);
1570 while (!list_empty(&c
->reserved_buffers
)) {
1571 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1572 struct dm_buffer
, lru_list
);
1573 list_del(&b
->lru_list
);
1577 for (i
= 0; i
< LIST_SIZE
; i
++)
1578 if (c
->n_buffers
[i
])
1579 DMERR("leaked buffer count %d: %ld", i
, c
->n_buffers
[i
]);
1581 for (i
= 0; i
< LIST_SIZE
; i
++)
1582 BUG_ON(c
->n_buffers
[i
]);
1584 dm_io_client_destroy(c
->dm_io
);
1585 vfree(c
->cache_hash
);
1588 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy
);
1590 static void cleanup_old_buffers(void)
1592 unsigned long max_age
= ACCESS_ONCE(dm_bufio_max_age
);
1593 struct dm_bufio_client
*c
;
1595 if (max_age
> ULONG_MAX
/ HZ
)
1596 max_age
= ULONG_MAX
/ HZ
;
1598 mutex_lock(&dm_bufio_clients_lock
);
1599 list_for_each_entry(c
, &dm_bufio_all_clients
, client_list
) {
1600 if (!dm_bufio_trylock(c
))
1603 while (!list_empty(&c
->lru
[LIST_CLEAN
])) {
1604 struct dm_buffer
*b
;
1605 b
= list_entry(c
->lru
[LIST_CLEAN
].prev
,
1606 struct dm_buffer
, lru_list
);
1607 if (__cleanup_old_buffer(b
, 0, max_age
* HZ
))
1609 dm_bufio_cond_resched();
1613 dm_bufio_cond_resched();
1615 mutex_unlock(&dm_bufio_clients_lock
);
1618 static struct workqueue_struct
*dm_bufio_wq
;
1619 static struct delayed_work dm_bufio_work
;
1621 static void work_fn(struct work_struct
*w
)
1623 cleanup_old_buffers();
1625 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1626 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1629 /*----------------------------------------------------------------
1631 *--------------------------------------------------------------*/
1634 * This is called only once for the whole dm_bufio module.
1635 * It initializes memory limit.
1637 static int __init
dm_bufio_init(void)
1641 memset(&dm_bufio_caches
, 0, sizeof dm_bufio_caches
);
1642 memset(&dm_bufio_cache_names
, 0, sizeof dm_bufio_cache_names
);
1644 mem
= (__u64
)((totalram_pages
- totalhigh_pages
) *
1645 DM_BUFIO_MEMORY_PERCENT
/ 100) << PAGE_SHIFT
;
1647 if (mem
> ULONG_MAX
)
1652 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1653 * in fs/proc/internal.h
1655 if (mem
> (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100)
1656 mem
= (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100;
1659 dm_bufio_default_cache_size
= mem
;
1661 mutex_lock(&dm_bufio_clients_lock
);
1662 __cache_size_refresh();
1663 mutex_unlock(&dm_bufio_clients_lock
);
1665 dm_bufio_wq
= create_singlethread_workqueue("dm_bufio_cache");
1669 INIT_DELAYED_WORK(&dm_bufio_work
, work_fn
);
1670 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1671 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1677 * This is called once when unloading the dm_bufio module.
1679 static void __exit
dm_bufio_exit(void)
1684 cancel_delayed_work_sync(&dm_bufio_work
);
1685 destroy_workqueue(dm_bufio_wq
);
1687 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_caches
); i
++) {
1688 struct kmem_cache
*kc
= dm_bufio_caches
[i
];
1691 kmem_cache_destroy(kc
);
1694 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_cache_names
); i
++)
1695 kfree(dm_bufio_cache_names
[i
]);
1697 if (dm_bufio_client_count
) {
1698 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1699 __func__
, dm_bufio_client_count
);
1703 if (dm_bufio_current_allocated
) {
1704 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1705 __func__
, dm_bufio_current_allocated
);
1709 if (dm_bufio_allocated_get_free_pages
) {
1710 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1711 __func__
, dm_bufio_allocated_get_free_pages
);
1715 if (dm_bufio_allocated_vmalloc
) {
1716 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1717 __func__
, dm_bufio_allocated_vmalloc
);
1725 module_init(dm_bufio_init
)
1726 module_exit(dm_bufio_exit
)
1728 module_param_named(max_cache_size_bytes
, dm_bufio_cache_size
, ulong
, S_IRUGO
| S_IWUSR
);
1729 MODULE_PARM_DESC(max_cache_size_bytes
, "Size of metadata cache");
1731 module_param_named(max_age_seconds
, dm_bufio_max_age
, uint
, S_IRUGO
| S_IWUSR
);
1732 MODULE_PARM_DESC(max_age_seconds
, "Max age of a buffer in seconds");
1734 module_param_named(peak_allocated_bytes
, dm_bufio_peak_allocated
, ulong
, S_IRUGO
| S_IWUSR
);
1735 MODULE_PARM_DESC(peak_allocated_bytes
, "Tracks the maximum allocated memory");
1737 module_param_named(allocated_kmem_cache_bytes
, dm_bufio_allocated_kmem_cache
, ulong
, S_IRUGO
);
1738 MODULE_PARM_DESC(allocated_kmem_cache_bytes
, "Memory allocated with kmem_cache_alloc");
1740 module_param_named(allocated_get_free_pages_bytes
, dm_bufio_allocated_get_free_pages
, ulong
, S_IRUGO
);
1741 MODULE_PARM_DESC(allocated_get_free_pages_bytes
, "Memory allocated with get_free_pages");
1743 module_param_named(allocated_vmalloc_bytes
, dm_bufio_allocated_vmalloc
, ulong
, S_IRUGO
);
1744 MODULE_PARM_DESC(allocated_vmalloc_bytes
, "Memory allocated with vmalloc");
1746 module_param_named(current_allocated_bytes
, dm_bufio_current_allocated
, ulong
, S_IRUGO
);
1747 MODULE_PARM_DESC(current_allocated_bytes
, "Memory currently used by the cache");
1749 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1750 MODULE_DESCRIPTION(DM_NAME
" buffered I/O library");
1751 MODULE_LICENSE("GPL");