2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 static struct blkio_policy_type blkio_policy_throtl
;
26 /* A workqueue to queue throttle related work */
27 static struct workqueue_struct
*kthrotld_workqueue
;
28 static void throtl_schedule_delayed_work(struct throtl_data
*td
,
31 struct throtl_rb_root
{
35 unsigned long min_disptime
;
38 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
39 .count = 0, .min_disptime = 0}
41 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
44 /* active throtl group service_tree member */
45 struct rb_node rb_node
;
48 * Dispatch time in jiffies. This is the estimated time when group
49 * will unthrottle and is ready to dispatch more bio. It is used as
50 * key to sort active groups in service tree.
52 unsigned long disptime
;
56 /* Two lists for READ and WRITE */
57 struct bio_list bio_lists
[2];
59 /* Number of queued bios on READ and WRITE lists */
60 unsigned int nr_queued
[2];
62 /* bytes per second rate limits */
68 /* Number of bytes disptached in current slice */
69 uint64_t bytes_disp
[2];
70 /* Number of bio's dispatched in current slice */
71 unsigned int io_disp
[2];
73 /* When did we start a new slice */
74 unsigned long slice_start
[2];
75 unsigned long slice_end
[2];
77 /* Some throttle limits got updated for the group */
83 /* service tree for active throtl groups */
84 struct throtl_rb_root tg_service_tree
;
86 struct throtl_grp
*root_tg
;
87 struct request_queue
*queue
;
89 /* Total Number of queued bios on READ and WRITE lists */
90 unsigned int nr_queued
[2];
93 * number of total undestroyed groups
95 unsigned int nr_undestroyed_grps
;
97 /* Work for dispatching throttled bios */
98 struct delayed_work throtl_work
;
103 static inline struct throtl_grp
*blkg_to_tg(struct blkio_group
*blkg
)
105 return blkg_to_pdata(blkg
, &blkio_policy_throtl
);
108 static inline struct blkio_group
*tg_to_blkg(struct throtl_grp
*tg
)
110 return pdata_to_blkg(tg
, &blkio_policy_throtl
);
113 enum tg_state_flags
{
114 THROTL_TG_FLAG_on_rr
= 0, /* on round-robin busy list */
117 #define THROTL_TG_FNS(name) \
118 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
120 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
122 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
124 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
126 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
128 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
131 THROTL_TG_FNS(on_rr
);
133 #define throtl_log_tg(td, tg, fmt, args...) \
134 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
135 blkg_path(tg_to_blkg(tg)), ##args); \
137 #define throtl_log(td, fmt, args...) \
138 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
140 static inline unsigned int total_nr_queued(struct throtl_data
*td
)
142 return td
->nr_queued
[0] + td
->nr_queued
[1];
145 static void throtl_init_blkio_group(struct blkio_group
*blkg
)
147 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
149 RB_CLEAR_NODE(&tg
->rb_node
);
150 bio_list_init(&tg
->bio_lists
[0]);
151 bio_list_init(&tg
->bio_lists
[1]);
152 tg
->limits_changed
= false;
157 tg
->iops
[WRITE
] = -1;
161 throtl_grp
*throtl_lookup_tg(struct throtl_data
*td
, struct blkio_cgroup
*blkcg
)
164 * This is the common case when there are no blkio cgroups.
165 * Avoid lookup in this case
167 if (blkcg
== &blkio_root_cgroup
)
170 return blkg_to_tg(blkg_lookup(blkcg
, td
->queue
, BLKIO_POLICY_THROTL
));
173 static struct throtl_grp
*throtl_lookup_create_tg(struct throtl_data
*td
,
174 struct blkio_cgroup
*blkcg
)
176 struct request_queue
*q
= td
->queue
;
177 struct throtl_grp
*tg
= NULL
;
180 * This is the common case when there are no blkio cgroups.
181 * Avoid lookup in this case
183 if (blkcg
== &blkio_root_cgroup
) {
186 struct blkio_group
*blkg
;
188 blkg
= blkg_lookup_create(blkcg
, q
, BLKIO_POLICY_THROTL
, false);
190 /* if %NULL and @q is alive, fall back to root_tg */
192 tg
= blkg_to_tg(blkg
);
193 else if (!blk_queue_dead(q
))
200 static struct throtl_grp
*throtl_rb_first(struct throtl_rb_root
*root
)
202 /* Service tree is empty */
207 root
->left
= rb_first(&root
->rb
);
210 return rb_entry_tg(root
->left
);
215 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
221 static void throtl_rb_erase(struct rb_node
*n
, struct throtl_rb_root
*root
)
225 rb_erase_init(n
, &root
->rb
);
229 static void update_min_dispatch_time(struct throtl_rb_root
*st
)
231 struct throtl_grp
*tg
;
233 tg
= throtl_rb_first(st
);
237 st
->min_disptime
= tg
->disptime
;
241 tg_service_tree_add(struct throtl_rb_root
*st
, struct throtl_grp
*tg
)
243 struct rb_node
**node
= &st
->rb
.rb_node
;
244 struct rb_node
*parent
= NULL
;
245 struct throtl_grp
*__tg
;
246 unsigned long key
= tg
->disptime
;
249 while (*node
!= NULL
) {
251 __tg
= rb_entry_tg(parent
);
253 if (time_before(key
, __tg
->disptime
))
254 node
= &parent
->rb_left
;
256 node
= &parent
->rb_right
;
262 st
->left
= &tg
->rb_node
;
264 rb_link_node(&tg
->rb_node
, parent
, node
);
265 rb_insert_color(&tg
->rb_node
, &st
->rb
);
268 static void __throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
270 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
272 tg_service_tree_add(st
, tg
);
273 throtl_mark_tg_on_rr(tg
);
277 static void throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
279 if (!throtl_tg_on_rr(tg
))
280 __throtl_enqueue_tg(td
, tg
);
283 static void __throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
285 throtl_rb_erase(&tg
->rb_node
, &td
->tg_service_tree
);
286 throtl_clear_tg_on_rr(tg
);
289 static void throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
291 if (throtl_tg_on_rr(tg
))
292 __throtl_dequeue_tg(td
, tg
);
295 static void throtl_schedule_next_dispatch(struct throtl_data
*td
)
297 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
300 * If there are more bios pending, schedule more work.
302 if (!total_nr_queued(td
))
307 update_min_dispatch_time(st
);
309 if (time_before_eq(st
->min_disptime
, jiffies
))
310 throtl_schedule_delayed_work(td
, 0);
312 throtl_schedule_delayed_work(td
, (st
->min_disptime
- jiffies
));
316 throtl_start_new_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
318 tg
->bytes_disp
[rw
] = 0;
320 tg
->slice_start
[rw
] = jiffies
;
321 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
322 throtl_log_tg(td
, tg
, "[%c] new slice start=%lu end=%lu jiffies=%lu",
323 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
324 tg
->slice_end
[rw
], jiffies
);
327 static inline void throtl_set_slice_end(struct throtl_data
*td
,
328 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
330 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
333 static inline void throtl_extend_slice(struct throtl_data
*td
,
334 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
336 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
337 throtl_log_tg(td
, tg
, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
338 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
339 tg
->slice_end
[rw
], jiffies
);
342 /* Determine if previously allocated or extended slice is complete or not */
344 throtl_slice_used(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
346 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
352 /* Trim the used slices and adjust slice start accordingly */
354 throtl_trim_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
356 unsigned long nr_slices
, time_elapsed
, io_trim
;
359 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
362 * If bps are unlimited (-1), then time slice don't get
363 * renewed. Don't try to trim the slice if slice is used. A new
364 * slice will start when appropriate.
366 if (throtl_slice_used(td
, tg
, rw
))
370 * A bio has been dispatched. Also adjust slice_end. It might happen
371 * that initially cgroup limit was very low resulting in high
372 * slice_end, but later limit was bumped up and bio was dispached
373 * sooner, then we need to reduce slice_end. A high bogus slice_end
374 * is bad because it does not allow new slice to start.
377 throtl_set_slice_end(td
, tg
, rw
, jiffies
+ throtl_slice
);
379 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
381 nr_slices
= time_elapsed
/ throtl_slice
;
385 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
389 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
391 if (!bytes_trim
&& !io_trim
)
394 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
395 tg
->bytes_disp
[rw
] -= bytes_trim
;
397 tg
->bytes_disp
[rw
] = 0;
399 if (tg
->io_disp
[rw
] >= io_trim
)
400 tg
->io_disp
[rw
] -= io_trim
;
404 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
406 throtl_log_tg(td
, tg
, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
407 " start=%lu end=%lu jiffies=%lu",
408 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
409 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
412 static bool tg_with_in_iops_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
413 struct bio
*bio
, unsigned long *wait
)
415 bool rw
= bio_data_dir(bio
);
416 unsigned int io_allowed
;
417 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
420 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
422 /* Slice has just started. Consider one slice interval */
424 jiffy_elapsed_rnd
= throtl_slice
;
426 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
429 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
430 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
431 * will allow dispatch after 1 second and after that slice should
435 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
439 io_allowed
= UINT_MAX
;
443 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
449 /* Calc approx time to dispatch */
450 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
452 if (jiffy_wait
> jiffy_elapsed
)
453 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
462 static bool tg_with_in_bps_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
463 struct bio
*bio
, unsigned long *wait
)
465 bool rw
= bio_data_dir(bio
);
466 u64 bytes_allowed
, extra_bytes
, tmp
;
467 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
469 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
471 /* Slice has just started. Consider one slice interval */
473 jiffy_elapsed_rnd
= throtl_slice
;
475 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
477 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
481 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
487 /* Calc approx time to dispatch */
488 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
489 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
495 * This wait time is without taking into consideration the rounding
496 * up we did. Add that time also.
498 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
504 static bool tg_no_rule_group(struct throtl_grp
*tg
, bool rw
) {
505 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1)
511 * Returns whether one can dispatch a bio or not. Also returns approx number
512 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
514 static bool tg_may_dispatch(struct throtl_data
*td
, struct throtl_grp
*tg
,
515 struct bio
*bio
, unsigned long *wait
)
517 bool rw
= bio_data_dir(bio
);
518 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
521 * Currently whole state machine of group depends on first bio
522 * queued in the group bio list. So one should not be calling
523 * this function with a different bio if there are other bios
526 BUG_ON(tg
->nr_queued
[rw
] && bio
!= bio_list_peek(&tg
->bio_lists
[rw
]));
528 /* If tg->bps = -1, then BW is unlimited */
529 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
536 * If previous slice expired, start a new one otherwise renew/extend
537 * existing slice to make sure it is at least throtl_slice interval
540 if (throtl_slice_used(td
, tg
, rw
))
541 throtl_start_new_slice(td
, tg
, rw
);
543 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
544 throtl_extend_slice(td
, tg
, rw
, jiffies
+ throtl_slice
);
547 if (tg_with_in_bps_limit(td
, tg
, bio
, &bps_wait
)
548 && tg_with_in_iops_limit(td
, tg
, bio
, &iops_wait
)) {
554 max_wait
= max(bps_wait
, iops_wait
);
559 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
560 throtl_extend_slice(td
, tg
, rw
, jiffies
+ max_wait
);
565 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
567 bool rw
= bio_data_dir(bio
);
568 bool sync
= rw_is_sync(bio
->bi_rw
);
570 /* Charge the bio to the group */
571 tg
->bytes_disp
[rw
] += bio
->bi_size
;
574 blkiocg_update_dispatch_stats(tg_to_blkg(tg
), &blkio_policy_throtl
,
575 bio
->bi_size
, rw
, sync
);
578 static void throtl_add_bio_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
581 bool rw
= bio_data_dir(bio
);
583 bio_list_add(&tg
->bio_lists
[rw
], bio
);
584 /* Take a bio reference on tg */
585 blkg_get(tg_to_blkg(tg
));
588 throtl_enqueue_tg(td
, tg
);
591 static void tg_update_disptime(struct throtl_data
*td
, struct throtl_grp
*tg
)
593 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
596 if ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
597 tg_may_dispatch(td
, tg
, bio
, &read_wait
);
599 if ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
600 tg_may_dispatch(td
, tg
, bio
, &write_wait
);
602 min_wait
= min(read_wait
, write_wait
);
603 disptime
= jiffies
+ min_wait
;
605 /* Update dispatch time */
606 throtl_dequeue_tg(td
, tg
);
607 tg
->disptime
= disptime
;
608 throtl_enqueue_tg(td
, tg
);
611 static void tg_dispatch_one_bio(struct throtl_data
*td
, struct throtl_grp
*tg
,
612 bool rw
, struct bio_list
*bl
)
616 bio
= bio_list_pop(&tg
->bio_lists
[rw
]);
618 /* Drop bio reference on blkg */
619 blkg_put(tg_to_blkg(tg
));
621 BUG_ON(td
->nr_queued
[rw
] <= 0);
624 throtl_charge_bio(tg
, bio
);
625 bio_list_add(bl
, bio
);
626 bio
->bi_rw
|= REQ_THROTTLED
;
628 throtl_trim_slice(td
, tg
, rw
);
631 static int throtl_dispatch_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
634 unsigned int nr_reads
= 0, nr_writes
= 0;
635 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
636 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
639 /* Try to dispatch 75% READS and 25% WRITES */
641 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
]))
642 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
644 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
647 if (nr_reads
>= max_nr_reads
)
651 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
]))
652 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
654 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
657 if (nr_writes
>= max_nr_writes
)
661 return nr_reads
+ nr_writes
;
664 static int throtl_select_dispatch(struct throtl_data
*td
, struct bio_list
*bl
)
666 unsigned int nr_disp
= 0;
667 struct throtl_grp
*tg
;
668 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
671 tg
= throtl_rb_first(st
);
676 if (time_before(jiffies
, tg
->disptime
))
679 throtl_dequeue_tg(td
, tg
);
681 nr_disp
+= throtl_dispatch_tg(td
, tg
, bl
);
683 if (tg
->nr_queued
[0] || tg
->nr_queued
[1]) {
684 tg_update_disptime(td
, tg
);
685 throtl_enqueue_tg(td
, tg
);
688 if (nr_disp
>= throtl_quantum
)
695 static void throtl_process_limit_change(struct throtl_data
*td
)
697 struct request_queue
*q
= td
->queue
;
698 struct blkio_group
*blkg
, *n
;
700 if (!td
->limits_changed
)
703 xchg(&td
->limits_changed
, false);
705 throtl_log(td
, "limits changed");
707 list_for_each_entry_safe(blkg
, n
, &q
->blkg_list
[BLKIO_POLICY_THROTL
],
708 q_node
[BLKIO_POLICY_THROTL
]) {
709 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
711 if (!tg
->limits_changed
)
714 if (!xchg(&tg
->limits_changed
, false))
717 throtl_log_tg(td
, tg
, "limit change rbps=%llu wbps=%llu"
718 " riops=%u wiops=%u", tg
->bps
[READ
], tg
->bps
[WRITE
],
719 tg
->iops
[READ
], tg
->iops
[WRITE
]);
722 * Restart the slices for both READ and WRITES. It
723 * might happen that a group's limit are dropped
724 * suddenly and we don't want to account recently
725 * dispatched IO with new low rate
727 throtl_start_new_slice(td
, tg
, 0);
728 throtl_start_new_slice(td
, tg
, 1);
730 if (throtl_tg_on_rr(tg
))
731 tg_update_disptime(td
, tg
);
735 /* Dispatch throttled bios. Should be called without queue lock held. */
736 static int throtl_dispatch(struct request_queue
*q
)
738 struct throtl_data
*td
= q
->td
;
739 unsigned int nr_disp
= 0;
740 struct bio_list bio_list_on_stack
;
742 struct blk_plug plug
;
744 spin_lock_irq(q
->queue_lock
);
746 throtl_process_limit_change(td
);
748 if (!total_nr_queued(td
))
751 bio_list_init(&bio_list_on_stack
);
753 throtl_log(td
, "dispatch nr_queued=%u read=%u write=%u",
754 total_nr_queued(td
), td
->nr_queued
[READ
],
755 td
->nr_queued
[WRITE
]);
757 nr_disp
= throtl_select_dispatch(td
, &bio_list_on_stack
);
760 throtl_log(td
, "bios disp=%u", nr_disp
);
762 throtl_schedule_next_dispatch(td
);
764 spin_unlock_irq(q
->queue_lock
);
767 * If we dispatched some requests, unplug the queue to make sure
771 blk_start_plug(&plug
);
772 while((bio
= bio_list_pop(&bio_list_on_stack
)))
773 generic_make_request(bio
);
774 blk_finish_plug(&plug
);
779 void blk_throtl_work(struct work_struct
*work
)
781 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
783 struct request_queue
*q
= td
->queue
;
788 /* Call with queue lock held */
790 throtl_schedule_delayed_work(struct throtl_data
*td
, unsigned long delay
)
793 struct delayed_work
*dwork
= &td
->throtl_work
;
795 /* schedule work if limits changed even if no bio is queued */
796 if (total_nr_queued(td
) || td
->limits_changed
) {
798 * We might have a work scheduled to be executed in future.
799 * Cancel that and schedule a new one.
801 __cancel_delayed_work(dwork
);
802 queue_delayed_work(kthrotld_workqueue
, dwork
, delay
);
803 throtl_log(td
, "schedule work. delay=%lu jiffies=%lu",
808 static void throtl_update_blkio_group_common(struct throtl_data
*td
,
809 struct throtl_grp
*tg
)
811 xchg(&tg
->limits_changed
, true);
812 xchg(&td
->limits_changed
, true);
813 /* Schedule a work now to process the limit change */
814 throtl_schedule_delayed_work(td
, 0);
818 * For all update functions, @q should be a valid pointer because these
819 * update functions are called under blkcg_lock, that means, blkg is
820 * valid and in turn @q is valid. queue exit path can not race because
823 * Can not take queue lock in update functions as queue lock under blkcg_lock
824 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
826 static void throtl_update_blkio_group_read_bps(struct request_queue
*q
,
827 struct blkio_group
*blkg
, u64 read_bps
)
829 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
831 tg
->bps
[READ
] = read_bps
;
832 throtl_update_blkio_group_common(q
->td
, tg
);
835 static void throtl_update_blkio_group_write_bps(struct request_queue
*q
,
836 struct blkio_group
*blkg
, u64 write_bps
)
838 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
840 tg
->bps
[WRITE
] = write_bps
;
841 throtl_update_blkio_group_common(q
->td
, tg
);
844 static void throtl_update_blkio_group_read_iops(struct request_queue
*q
,
845 struct blkio_group
*blkg
, unsigned int read_iops
)
847 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
849 tg
->iops
[READ
] = read_iops
;
850 throtl_update_blkio_group_common(q
->td
, tg
);
853 static void throtl_update_blkio_group_write_iops(struct request_queue
*q
,
854 struct blkio_group
*blkg
, unsigned int write_iops
)
856 struct throtl_grp
*tg
= blkg_to_tg(blkg
);
858 tg
->iops
[WRITE
] = write_iops
;
859 throtl_update_blkio_group_common(q
->td
, tg
);
862 static void throtl_shutdown_wq(struct request_queue
*q
)
864 struct throtl_data
*td
= q
->td
;
866 cancel_delayed_work_sync(&td
->throtl_work
);
869 static struct blkio_policy_type blkio_policy_throtl
= {
871 .blkio_init_group_fn
= throtl_init_blkio_group
,
872 .blkio_update_group_read_bps_fn
=
873 throtl_update_blkio_group_read_bps
,
874 .blkio_update_group_write_bps_fn
=
875 throtl_update_blkio_group_write_bps
,
876 .blkio_update_group_read_iops_fn
=
877 throtl_update_blkio_group_read_iops
,
878 .blkio_update_group_write_iops_fn
=
879 throtl_update_blkio_group_write_iops
,
881 .plid
= BLKIO_POLICY_THROTL
,
882 .pdata_size
= sizeof(struct throtl_grp
),
885 bool blk_throtl_bio(struct request_queue
*q
, struct bio
*bio
)
887 struct throtl_data
*td
= q
->td
;
888 struct throtl_grp
*tg
;
889 bool rw
= bio_data_dir(bio
), update_disptime
= true;
890 struct blkio_cgroup
*blkcg
;
891 bool throttled
= false;
893 if (bio
->bi_rw
& REQ_THROTTLED
) {
894 bio
->bi_rw
&= ~REQ_THROTTLED
;
899 * A throtl_grp pointer retrieved under rcu can be used to access
900 * basic fields like stats and io rates. If a group has no rules,
901 * just update the dispatch stats in lockless manner and return.
904 blkcg
= task_blkio_cgroup(current
);
905 tg
= throtl_lookup_tg(td
, blkcg
);
907 if (tg_no_rule_group(tg
, rw
)) {
908 blkiocg_update_dispatch_stats(tg_to_blkg(tg
),
909 &blkio_policy_throtl
,
911 rw_is_sync(bio
->bi_rw
));
917 * Either group has not been allocated yet or it is not an unlimited
920 spin_lock_irq(q
->queue_lock
);
921 tg
= throtl_lookup_create_tg(td
, blkcg
);
925 if (tg
->nr_queued
[rw
]) {
927 * There is already another bio queued in same dir. No
928 * need to update dispatch time.
930 update_disptime
= false;
935 /* Bio is with-in rate limit of group */
936 if (tg_may_dispatch(td
, tg
, bio
, NULL
)) {
937 throtl_charge_bio(tg
, bio
);
940 * We need to trim slice even when bios are not being queued
941 * otherwise it might happen that a bio is not queued for
942 * a long time and slice keeps on extending and trim is not
943 * called for a long time. Now if limits are reduced suddenly
944 * we take into account all the IO dispatched so far at new
945 * low rate and * newly queued IO gets a really long dispatch
948 * So keep on trimming slice even if bio is not queued.
950 throtl_trim_slice(td
, tg
, rw
);
955 throtl_log_tg(td
, tg
, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
956 " iodisp=%u iops=%u queued=%d/%d",
957 rw
== READ
? 'R' : 'W',
958 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
959 tg
->io_disp
[rw
], tg
->iops
[rw
],
960 tg
->nr_queued
[READ
], tg
->nr_queued
[WRITE
]);
962 throtl_add_bio_tg(q
->td
, tg
, bio
);
965 if (update_disptime
) {
966 tg_update_disptime(td
, tg
);
967 throtl_schedule_next_dispatch(td
);
971 spin_unlock_irq(q
->queue_lock
);
979 * blk_throtl_drain - drain throttled bios
980 * @q: request_queue to drain throttled bios for
982 * Dispatch all currently throttled bios on @q through ->make_request_fn().
984 void blk_throtl_drain(struct request_queue
*q
)
985 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
987 struct throtl_data
*td
= q
->td
;
988 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
989 struct throtl_grp
*tg
;
993 WARN_ON_ONCE(!queue_is_locked(q
));
997 while ((tg
= throtl_rb_first(st
))) {
998 throtl_dequeue_tg(td
, tg
);
1000 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
1001 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1002 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
1003 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1005 spin_unlock_irq(q
->queue_lock
);
1007 while ((bio
= bio_list_pop(&bl
)))
1008 generic_make_request(bio
);
1010 spin_lock_irq(q
->queue_lock
);
1013 int blk_throtl_init(struct request_queue
*q
)
1015 struct throtl_data
*td
;
1016 struct blkio_group
*blkg
;
1018 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1022 td
->tg_service_tree
= THROTL_RB_ROOT
;
1023 td
->limits_changed
= false;
1024 INIT_DELAYED_WORK(&td
->throtl_work
, blk_throtl_work
);
1029 /* alloc and init root group. */
1031 spin_lock_irq(q
->queue_lock
);
1033 blkg
= blkg_lookup_create(&blkio_root_cgroup
, q
, BLKIO_POLICY_THROTL
,
1036 td
->root_tg
= blkg_to_tg(blkg
);
1038 spin_unlock_irq(q
->queue_lock
);
1048 void blk_throtl_exit(struct request_queue
*q
)
1050 struct throtl_data
*td
= q
->td
;
1055 throtl_shutdown_wq(q
);
1057 blkg_destroy_all(q
, BLKIO_POLICY_THROTL
, true);
1059 /* If there are other groups */
1060 spin_lock_irq(q
->queue_lock
);
1061 wait
= q
->nr_blkgs
[BLKIO_POLICY_THROTL
];
1062 spin_unlock_irq(q
->queue_lock
);
1065 * Wait for tg_to_blkg(tg)->q accessors to exit their grace periods.
1066 * Do this wait only if there are other undestroyed groups out
1067 * there (other than root group). This can happen if cgroup deletion
1068 * path claimed the responsibility of cleaning up a group before
1069 * queue cleanup code get to the group.
1071 * Do not call synchronize_rcu() unconditionally as there are drivers
1072 * which create/delete request queue hundreds of times during scan/boot
1073 * and synchronize_rcu() can take significant time and slow down boot.
1079 * Just being safe to make sure after previous flush if some body did
1080 * update limits through cgroup and another work got queued, cancel
1083 throtl_shutdown_wq(q
);
1088 static int __init
throtl_init(void)
1090 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1091 if (!kthrotld_workqueue
)
1092 panic("Failed to create kthrotld\n");
1094 blkio_policy_register(&blkio_policy_throtl
);
1098 module_init(throtl_init
);