1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/random.h>
5 #include <linux/blk-mq.h>
8 #include "blk-mq-tag.h"
10 void blk_mq_wait_for_tags(struct blk_mq_hw_ctx
*hctx
, bool reserved
)
14 tag
= blk_mq_get_tag(hctx
, &zero
, __GFP_WAIT
, reserved
);
15 blk_mq_put_tag(hctx
, tag
, &zero
);
18 static bool bt_has_free_tags(struct blk_mq_bitmap_tags
*bt
)
22 for (i
= 0; i
< bt
->map_nr
; i
++) {
23 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
26 ret
= find_first_zero_bit(&bm
->word
, bm
->depth
);
34 bool blk_mq_has_free_tags(struct blk_mq_tags
*tags
)
39 return bt_has_free_tags(&tags
->bitmap_tags
);
42 static inline void bt_index_inc(unsigned int *index
)
44 *index
= (*index
+ 1) & (BT_WAIT_QUEUES
- 1);
48 * If a previously inactive queue goes active, bump the active user count.
50 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx
*hctx
)
52 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
) &&
53 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
54 atomic_inc(&hctx
->tags
->active_queues
);
60 * Wakeup all potentially sleeping on normal (non-reserved) tags
62 static void blk_mq_tag_wakeup_all(struct blk_mq_tags
*tags
)
64 struct blk_mq_bitmap_tags
*bt
;
67 bt
= &tags
->bitmap_tags
;
68 wake_index
= bt
->wake_index
;
69 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
70 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
72 if (waitqueue_active(&bs
->wait
))
75 bt_index_inc(&wake_index
);
80 * If a previously busy queue goes inactive, potential waiters could now
81 * be allowed to queue. Wake them up and check.
83 void __blk_mq_tag_idle(struct blk_mq_hw_ctx
*hctx
)
85 struct blk_mq_tags
*tags
= hctx
->tags
;
87 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
90 atomic_dec(&tags
->active_queues
);
92 blk_mq_tag_wakeup_all(tags
);
96 * For shared tag users, we track the number of currently active users
97 * and attempt to provide a fair share of the tag depth for each of them.
99 static inline bool hctx_may_queue(struct blk_mq_hw_ctx
*hctx
,
100 struct blk_mq_bitmap_tags
*bt
)
102 unsigned int depth
, users
;
104 if (!hctx
|| !(hctx
->flags
& BLK_MQ_F_TAG_SHARED
))
106 if (!test_bit(BLK_MQ_S_TAG_ACTIVE
, &hctx
->state
))
110 * Don't try dividing an ant
115 users
= atomic_read(&hctx
->tags
->active_queues
);
120 * Allow at least some tags
122 depth
= max((bt
->depth
+ users
- 1) / users
, 4U);
123 return atomic_read(&hctx
->nr_active
) < depth
;
126 static int __bt_get_word(struct blk_align_bitmap
*bm
, unsigned int last_tag
)
128 int tag
, org_last_tag
, end
;
130 org_last_tag
= last_tag
;
134 tag
= find_next_zero_bit(&bm
->word
, end
, last_tag
);
135 if (unlikely(tag
>= end
)) {
137 * We started with an offset, start from 0 to
140 if (org_last_tag
&& last_tag
) {
148 } while (test_and_set_bit_lock(tag
, &bm
->word
));
154 * Straight forward bitmap tag implementation, where each bit is a tag
155 * (cleared == free, and set == busy). The small twist is using per-cpu
156 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
157 * contexts. This enables us to drastically limit the space searched,
158 * without dirtying an extra shared cacheline like we would if we stored
159 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
160 * of that, each word of tags is in a separate cacheline. This means that
161 * multiple users will tend to stick to different cachelines, at least
162 * until the map is exhausted.
164 static int __bt_get(struct blk_mq_hw_ctx
*hctx
, struct blk_mq_bitmap_tags
*bt
,
165 unsigned int *tag_cache
)
167 unsigned int last_tag
, org_last_tag
;
170 if (!hctx_may_queue(hctx
, bt
))
173 last_tag
= org_last_tag
= *tag_cache
;
174 index
= TAG_TO_INDEX(bt
, last_tag
);
176 for (i
= 0; i
< bt
->map_nr
; i
++) {
177 tag
= __bt_get_word(&bt
->map
[index
], TAG_TO_BIT(bt
, last_tag
));
179 tag
+= (index
<< bt
->bits_per_word
);
184 if (++index
>= bt
->map_nr
)
192 * Only update the cache from the allocation path, if we ended
193 * up using the specific cached tag.
196 if (tag
== org_last_tag
) {
198 if (last_tag
>= bt
->depth
- 1)
201 *tag_cache
= last_tag
;
207 static struct bt_wait_state
*bt_wait_ptr(struct blk_mq_bitmap_tags
*bt
,
208 struct blk_mq_hw_ctx
*hctx
)
210 struct bt_wait_state
*bs
;
215 bs
= &bt
->bs
[hctx
->wait_index
];
216 bt_index_inc(&hctx
->wait_index
);
220 static int bt_get(struct blk_mq_bitmap_tags
*bt
, struct blk_mq_hw_ctx
*hctx
,
221 unsigned int *last_tag
, gfp_t gfp
)
223 struct bt_wait_state
*bs
;
227 tag
= __bt_get(hctx
, bt
, last_tag
);
231 if (!(gfp
& __GFP_WAIT
))
234 bs
= bt_wait_ptr(bt
, hctx
);
238 was_empty
= list_empty(&wait
.task_list
);
239 prepare_to_wait(&bs
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
241 tag
= __bt_get(hctx
, bt
, last_tag
);
246 atomic_set(&bs
->wait_cnt
, bt
->wake_cnt
);
251 finish_wait(&bs
->wait
, &wait
);
255 static unsigned int __blk_mq_get_tag(struct blk_mq_tags
*tags
,
256 struct blk_mq_hw_ctx
*hctx
,
257 unsigned int *last_tag
, gfp_t gfp
)
261 tag
= bt_get(&tags
->bitmap_tags
, hctx
, last_tag
, gfp
);
263 return tag
+ tags
->nr_reserved_tags
;
265 return BLK_MQ_TAG_FAIL
;
268 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags
*tags
,
273 if (unlikely(!tags
->nr_reserved_tags
)) {
275 return BLK_MQ_TAG_FAIL
;
278 tag
= bt_get(&tags
->breserved_tags
, NULL
, &zero
, gfp
);
280 return BLK_MQ_TAG_FAIL
;
285 unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx
*hctx
, unsigned int *last_tag
,
286 gfp_t gfp
, bool reserved
)
289 return __blk_mq_get_tag(hctx
->tags
, hctx
, last_tag
, gfp
);
291 return __blk_mq_get_reserved_tag(hctx
->tags
, gfp
);
294 static struct bt_wait_state
*bt_wake_ptr(struct blk_mq_bitmap_tags
*bt
)
298 wake_index
= bt
->wake_index
;
299 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++) {
300 struct bt_wait_state
*bs
= &bt
->bs
[wake_index
];
302 if (waitqueue_active(&bs
->wait
)) {
303 if (wake_index
!= bt
->wake_index
)
304 bt
->wake_index
= wake_index
;
309 bt_index_inc(&wake_index
);
315 static void bt_clear_tag(struct blk_mq_bitmap_tags
*bt
, unsigned int tag
)
317 const int index
= TAG_TO_INDEX(bt
, tag
);
318 struct bt_wait_state
*bs
;
321 * The unlock memory barrier need to order access to req in free
322 * path and clearing tag bit
324 clear_bit_unlock(TAG_TO_BIT(bt
, tag
), &bt
->map
[index
].word
);
326 bs
= bt_wake_ptr(bt
);
327 if (bs
&& atomic_dec_and_test(&bs
->wait_cnt
)) {
328 atomic_set(&bs
->wait_cnt
, bt
->wake_cnt
);
329 bt_index_inc(&bt
->wake_index
);
334 static void __blk_mq_put_tag(struct blk_mq_tags
*tags
, unsigned int tag
)
336 BUG_ON(tag
>= tags
->nr_tags
);
338 bt_clear_tag(&tags
->bitmap_tags
, tag
);
341 static void __blk_mq_put_reserved_tag(struct blk_mq_tags
*tags
,
344 BUG_ON(tag
>= tags
->nr_reserved_tags
);
346 bt_clear_tag(&tags
->breserved_tags
, tag
);
349 void blk_mq_put_tag(struct blk_mq_hw_ctx
*hctx
, unsigned int tag
,
350 unsigned int *last_tag
)
352 struct blk_mq_tags
*tags
= hctx
->tags
;
354 if (tag
>= tags
->nr_reserved_tags
) {
355 const int real_tag
= tag
- tags
->nr_reserved_tags
;
357 __blk_mq_put_tag(tags
, real_tag
);
358 *last_tag
= real_tag
;
360 __blk_mq_put_reserved_tag(tags
, tag
);
363 static void bt_for_each_free(struct blk_mq_bitmap_tags
*bt
,
364 unsigned long *free_map
, unsigned int off
)
368 for (i
= 0; i
< bt
->map_nr
; i
++) {
369 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
373 bit
= find_next_zero_bit(&bm
->word
, bm
->depth
, bit
);
374 if (bit
>= bm
->depth
)
377 __set_bit(bit
+ off
, free_map
);
381 off
+= (1 << bt
->bits_per_word
);
385 void blk_mq_tag_busy_iter(struct blk_mq_tags
*tags
,
386 void (*fn
)(void *, unsigned long *), void *data
)
388 unsigned long *tag_map
;
391 map_size
= ALIGN(tags
->nr_tags
, BITS_PER_LONG
) / BITS_PER_LONG
;
392 tag_map
= kzalloc(map_size
* sizeof(unsigned long), GFP_ATOMIC
);
396 bt_for_each_free(&tags
->bitmap_tags
, tag_map
, tags
->nr_reserved_tags
);
397 if (tags
->nr_reserved_tags
)
398 bt_for_each_free(&tags
->breserved_tags
, tag_map
, 0);
404 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags
*bt
)
406 unsigned int i
, used
;
408 for (i
= 0, used
= 0; i
< bt
->map_nr
; i
++) {
409 struct blk_align_bitmap
*bm
= &bt
->map
[i
];
411 used
+= bitmap_weight(&bm
->word
, bm
->depth
);
414 return bt
->depth
- used
;
417 static void bt_update_count(struct blk_mq_bitmap_tags
*bt
,
420 unsigned int tags_per_word
= 1U << bt
->bits_per_word
;
421 unsigned int map_depth
= depth
;
426 for (i
= 0; i
< bt
->map_nr
; i
++) {
427 bt
->map
[i
].depth
= min(map_depth
, tags_per_word
);
428 map_depth
-= bt
->map
[i
].depth
;
432 bt
->wake_cnt
= BT_WAIT_BATCH
;
433 if (bt
->wake_cnt
> depth
/ 4)
434 bt
->wake_cnt
= max(1U, depth
/ 4);
439 static int bt_alloc(struct blk_mq_bitmap_tags
*bt
, unsigned int depth
,
440 int node
, bool reserved
)
444 bt
->bits_per_word
= ilog2(BITS_PER_LONG
);
447 * Depth can be zero for reserved tags, that's not a failure
451 unsigned int nr
, tags_per_word
;
453 tags_per_word
= (1 << bt
->bits_per_word
);
456 * If the tag space is small, shrink the number of tags
457 * per word so we spread over a few cachelines, at least.
458 * If less than 4 tags, just forget about it, it's not
459 * going to work optimally anyway.
462 while (tags_per_word
* 4 > depth
) {
464 tags_per_word
= (1 << bt
->bits_per_word
);
468 nr
= ALIGN(depth
, tags_per_word
) / tags_per_word
;
469 bt
->map
= kzalloc_node(nr
* sizeof(struct blk_align_bitmap
),
477 bt
->bs
= kzalloc(BT_WAIT_QUEUES
* sizeof(*bt
->bs
), GFP_KERNEL
);
483 for (i
= 0; i
< BT_WAIT_QUEUES
; i
++)
484 init_waitqueue_head(&bt
->bs
[i
].wait
);
486 bt_update_count(bt
, depth
);
490 static void bt_free(struct blk_mq_bitmap_tags
*bt
)
496 static struct blk_mq_tags
*blk_mq_init_bitmap_tags(struct blk_mq_tags
*tags
,
499 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
501 if (bt_alloc(&tags
->bitmap_tags
, depth
, node
, false))
503 if (bt_alloc(&tags
->breserved_tags
, tags
->nr_reserved_tags
, node
, true))
508 bt_free(&tags
->bitmap_tags
);
513 struct blk_mq_tags
*blk_mq_init_tags(unsigned int total_tags
,
514 unsigned int reserved_tags
, int node
)
516 struct blk_mq_tags
*tags
;
518 if (total_tags
> BLK_MQ_TAG_MAX
) {
519 pr_err("blk-mq: tag depth too large\n");
523 tags
= kzalloc_node(sizeof(*tags
), GFP_KERNEL
, node
);
527 tags
->nr_tags
= total_tags
;
528 tags
->nr_reserved_tags
= reserved_tags
;
530 return blk_mq_init_bitmap_tags(tags
, node
);
533 void blk_mq_free_tags(struct blk_mq_tags
*tags
)
535 bt_free(&tags
->bitmap_tags
);
536 bt_free(&tags
->breserved_tags
);
540 void blk_mq_tag_init_last_tag(struct blk_mq_tags
*tags
, unsigned int *tag
)
542 unsigned int depth
= tags
->nr_tags
- tags
->nr_reserved_tags
;
544 *tag
= prandom_u32() % depth
;
547 int blk_mq_tag_update_depth(struct blk_mq_tags
*tags
, unsigned int tdepth
)
549 tdepth
-= tags
->nr_reserved_tags
;
550 if (tdepth
> tags
->nr_tags
)
554 * Don't need (or can't) update reserved tags here, they remain
555 * static and should never need resizing.
557 bt_update_count(&tags
->bitmap_tags
, tdepth
);
558 blk_mq_tag_wakeup_all(tags
);
562 ssize_t
blk_mq_tag_sysfs_show(struct blk_mq_tags
*tags
, char *page
)
564 char *orig_page
= page
;
565 unsigned int free
, res
;
570 page
+= sprintf(page
, "nr_tags=%u, reserved_tags=%u, "
571 "bits_per_word=%u\n",
572 tags
->nr_tags
, tags
->nr_reserved_tags
,
573 tags
->bitmap_tags
.bits_per_word
);
575 free
= bt_unused_tags(&tags
->bitmap_tags
);
576 res
= bt_unused_tags(&tags
->breserved_tags
);
578 page
+= sprintf(page
, "nr_free=%u, nr_reserved=%u\n", free
, res
);
579 page
+= sprintf(page
, "active_queues=%u\n", atomic_read(&tags
->active_queues
));
581 return page
- orig_page
;
This page took 0.065066 seconds and 6 git commands to generate.