2 * Copyright (C) 2012 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-bio-prison.h"
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
15 /*----------------------------------------------------------------*/
19 struct hlist_head cells
;
22 struct dm_bio_prison
{
27 struct bucket
*buckets
;
30 /*----------------------------------------------------------------*/
32 static uint32_t calc_nr_buckets(unsigned nr_cells
)
37 nr_cells
= min(nr_cells
, 8192u);
45 static struct kmem_cache
*_cell_cache
;
47 static void init_bucket(struct bucket
*b
)
49 spin_lock_init(&b
->lock
);
50 INIT_HLIST_HEAD(&b
->cells
);
54 * @nr_cells should be the number of cells you want in use _concurrently_.
55 * Don't confuse it with the number of distinct keys.
57 struct dm_bio_prison
*dm_bio_prison_create(unsigned nr_cells
)
60 uint32_t nr_buckets
= calc_nr_buckets(nr_cells
);
61 size_t len
= sizeof(struct dm_bio_prison
) +
62 (sizeof(struct bucket
) * nr_buckets
);
63 struct dm_bio_prison
*prison
= kmalloc(len
, GFP_KERNEL
);
68 prison
->cell_pool
= mempool_create_slab_pool(nr_cells
, _cell_cache
);
69 if (!prison
->cell_pool
) {
74 prison
->nr_buckets
= nr_buckets
;
75 prison
->hash_mask
= nr_buckets
- 1;
76 prison
->buckets
= (struct bucket
*) (prison
+ 1);
77 for (i
= 0; i
< nr_buckets
; i
++)
78 init_bucket(prison
->buckets
+ i
);
82 EXPORT_SYMBOL_GPL(dm_bio_prison_create
);
84 void dm_bio_prison_destroy(struct dm_bio_prison
*prison
)
86 mempool_destroy(prison
->cell_pool
);
89 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy
);
91 struct dm_bio_prison_cell
*dm_bio_prison_alloc_cell(struct dm_bio_prison
*prison
, gfp_t gfp
)
93 return mempool_alloc(prison
->cell_pool
, gfp
);
95 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell
);
97 void dm_bio_prison_free_cell(struct dm_bio_prison
*prison
,
98 struct dm_bio_prison_cell
*cell
)
100 mempool_free(cell
, prison
->cell_pool
);
102 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell
);
104 static uint32_t hash_key(struct dm_bio_prison
*prison
, struct dm_cell_key
*key
)
106 const unsigned long BIG_PRIME
= 4294967291UL;
107 uint64_t hash
= key
->block
* BIG_PRIME
;
109 return (uint32_t) (hash
& prison
->hash_mask
);
112 static int keys_equal(struct dm_cell_key
*lhs
, struct dm_cell_key
*rhs
)
114 return (lhs
->virtual == rhs
->virtual) &&
115 (lhs
->dev
== rhs
->dev
) &&
116 (lhs
->block
== rhs
->block
);
119 static struct bucket
*get_bucket(struct dm_bio_prison
*prison
,
120 struct dm_cell_key
*key
)
122 return prison
->buckets
+ hash_key(prison
, key
);
125 static struct dm_bio_prison_cell
*__search_bucket(struct bucket
*b
,
126 struct dm_cell_key
*key
)
128 struct dm_bio_prison_cell
*cell
;
130 hlist_for_each_entry(cell
, &b
->cells
, list
)
131 if (keys_equal(&cell
->key
, key
))
137 static void __setup_new_cell(struct bucket
*b
,
138 struct dm_cell_key
*key
,
140 struct dm_bio_prison_cell
*cell
)
142 memcpy(&cell
->key
, key
, sizeof(cell
->key
));
143 cell
->holder
= holder
;
144 bio_list_init(&cell
->bios
);
145 hlist_add_head(&cell
->list
, &b
->cells
);
148 static int __bio_detain(struct bucket
*b
,
149 struct dm_cell_key
*key
,
151 struct dm_bio_prison_cell
*cell_prealloc
,
152 struct dm_bio_prison_cell
**cell_result
)
154 struct dm_bio_prison_cell
*cell
;
156 cell
= __search_bucket(b
, key
);
159 bio_list_add(&cell
->bios
, inmate
);
164 __setup_new_cell(b
, key
, inmate
, cell_prealloc
);
165 *cell_result
= cell_prealloc
;
169 static int bio_detain(struct dm_bio_prison
*prison
,
170 struct dm_cell_key
*key
,
172 struct dm_bio_prison_cell
*cell_prealloc
,
173 struct dm_bio_prison_cell
**cell_result
)
177 struct bucket
*b
= get_bucket(prison
, key
);
179 spin_lock_irqsave(&b
->lock
, flags
);
180 r
= __bio_detain(b
, key
, inmate
, cell_prealloc
, cell_result
);
181 spin_unlock_irqrestore(&b
->lock
, flags
);
186 int dm_bio_detain(struct dm_bio_prison
*prison
,
187 struct dm_cell_key
*key
,
189 struct dm_bio_prison_cell
*cell_prealloc
,
190 struct dm_bio_prison_cell
**cell_result
)
192 return bio_detain(prison
, key
, inmate
, cell_prealloc
, cell_result
);
194 EXPORT_SYMBOL_GPL(dm_bio_detain
);
196 int dm_get_cell(struct dm_bio_prison
*prison
,
197 struct dm_cell_key
*key
,
198 struct dm_bio_prison_cell
*cell_prealloc
,
199 struct dm_bio_prison_cell
**cell_result
)
201 return bio_detain(prison
, key
, NULL
, cell_prealloc
, cell_result
);
203 EXPORT_SYMBOL_GPL(dm_get_cell
);
206 * @inmates must have been initialised prior to this call
208 static void __cell_release(struct dm_bio_prison_cell
*cell
,
209 struct bio_list
*inmates
)
211 hlist_del(&cell
->list
);
215 bio_list_add(inmates
, cell
->holder
);
216 bio_list_merge(inmates
, &cell
->bios
);
220 void dm_cell_release(struct dm_bio_prison
*prison
,
221 struct dm_bio_prison_cell
*cell
,
222 struct bio_list
*bios
)
225 struct bucket
*b
= get_bucket(prison
, &cell
->key
);
227 spin_lock_irqsave(&b
->lock
, flags
);
228 __cell_release(cell
, bios
);
229 spin_unlock_irqrestore(&b
->lock
, flags
);
231 EXPORT_SYMBOL_GPL(dm_cell_release
);
234 * Sometimes we don't want the holder, just the additional bios.
236 static void __cell_release_no_holder(struct dm_bio_prison_cell
*cell
,
237 struct bio_list
*inmates
)
239 hlist_del(&cell
->list
);
240 bio_list_merge(inmates
, &cell
->bios
);
243 void dm_cell_release_no_holder(struct dm_bio_prison
*prison
,
244 struct dm_bio_prison_cell
*cell
,
245 struct bio_list
*inmates
)
248 struct bucket
*b
= get_bucket(prison
, &cell
->key
);
250 spin_lock_irqsave(&b
->lock
, flags
);
251 __cell_release_no_holder(cell
, inmates
);
252 spin_unlock_irqrestore(&b
->lock
, flags
);
254 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder
);
256 void dm_cell_error(struct dm_bio_prison
*prison
,
257 struct dm_bio_prison_cell
*cell
, int error
)
259 struct bio_list bios
;
262 bio_list_init(&bios
);
263 dm_cell_release(prison
, cell
, &bios
);
265 while ((bio
= bio_list_pop(&bios
)))
266 bio_endio(bio
, error
);
268 EXPORT_SYMBOL_GPL(dm_cell_error
);
270 /*----------------------------------------------------------------*/
272 #define DEFERRED_SET_SIZE 64
274 struct dm_deferred_entry
{
275 struct dm_deferred_set
*ds
;
277 struct list_head work_items
;
280 struct dm_deferred_set
{
282 unsigned current_entry
;
284 struct dm_deferred_entry entries
[DEFERRED_SET_SIZE
];
287 struct dm_deferred_set
*dm_deferred_set_create(void)
290 struct dm_deferred_set
*ds
;
292 ds
= kmalloc(sizeof(*ds
), GFP_KERNEL
);
296 spin_lock_init(&ds
->lock
);
297 ds
->current_entry
= 0;
299 for (i
= 0; i
< DEFERRED_SET_SIZE
; i
++) {
300 ds
->entries
[i
].ds
= ds
;
301 ds
->entries
[i
].count
= 0;
302 INIT_LIST_HEAD(&ds
->entries
[i
].work_items
);
307 EXPORT_SYMBOL_GPL(dm_deferred_set_create
);
309 void dm_deferred_set_destroy(struct dm_deferred_set
*ds
)
313 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy
);
315 struct dm_deferred_entry
*dm_deferred_entry_inc(struct dm_deferred_set
*ds
)
318 struct dm_deferred_entry
*entry
;
320 spin_lock_irqsave(&ds
->lock
, flags
);
321 entry
= ds
->entries
+ ds
->current_entry
;
323 spin_unlock_irqrestore(&ds
->lock
, flags
);
327 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc
);
329 static unsigned ds_next(unsigned index
)
331 return (index
+ 1) % DEFERRED_SET_SIZE
;
334 static void __sweep(struct dm_deferred_set
*ds
, struct list_head
*head
)
336 while ((ds
->sweeper
!= ds
->current_entry
) &&
337 !ds
->entries
[ds
->sweeper
].count
) {
338 list_splice_init(&ds
->entries
[ds
->sweeper
].work_items
, head
);
339 ds
->sweeper
= ds_next(ds
->sweeper
);
342 if ((ds
->sweeper
== ds
->current_entry
) && !ds
->entries
[ds
->sweeper
].count
)
343 list_splice_init(&ds
->entries
[ds
->sweeper
].work_items
, head
);
346 void dm_deferred_entry_dec(struct dm_deferred_entry
*entry
, struct list_head
*head
)
350 spin_lock_irqsave(&entry
->ds
->lock
, flags
);
351 BUG_ON(!entry
->count
);
353 __sweep(entry
->ds
, head
);
354 spin_unlock_irqrestore(&entry
->ds
->lock
, flags
);
356 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec
);
359 * Returns 1 if deferred or 0 if no pending items to delay job.
361 int dm_deferred_set_add_work(struct dm_deferred_set
*ds
, struct list_head
*work
)
367 spin_lock_irqsave(&ds
->lock
, flags
);
368 if ((ds
->sweeper
== ds
->current_entry
) &&
369 !ds
->entries
[ds
->current_entry
].count
)
372 list_add(work
, &ds
->entries
[ds
->current_entry
].work_items
);
373 next_entry
= ds_next(ds
->current_entry
);
374 if (!ds
->entries
[next_entry
].count
)
375 ds
->current_entry
= next_entry
;
377 spin_unlock_irqrestore(&ds
->lock
, flags
);
381 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work
);
383 /*----------------------------------------------------------------*/
385 static int __init
dm_bio_prison_init(void)
387 _cell_cache
= KMEM_CACHE(dm_bio_prison_cell
, 0);
394 static void __exit
dm_bio_prison_exit(void)
396 kmem_cache_destroy(_cell_cache
);
403 module_init(dm_bio_prison_init
);
404 module_exit(dm_bio_prison_exit
);
406 MODULE_DESCRIPTION(DM_NAME
" bio prison");
407 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
408 MODULE_LICENSE("GPL");