2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
14 /* Max dispatch from a group in 1 round */
15 static int throtl_grp_quantum
= 8;
17 /* Total max dispatch from all groups in one round */
18 static int throtl_quantum
= 32;
20 /* Throttling is performed over 100ms slice and after that slice is renewed */
21 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
23 struct throtl_rb_root
{
27 unsigned long min_disptime
;
30 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
31 .count = 0, .min_disptime = 0}
33 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
36 /* List of throtl groups on the request queue*/
37 struct hlist_node tg_node
;
39 /* active throtl group service_tree member */
40 struct rb_node rb_node
;
43 * Dispatch time in jiffies. This is the estimated time when group
44 * will unthrottle and is ready to dispatch more bio. It is used as
45 * key to sort active groups in service tree.
47 unsigned long disptime
;
49 struct blkio_group blkg
;
53 /* Two lists for READ and WRITE */
54 struct bio_list bio_lists
[2];
56 /* Number of queued bios on READ and WRITE lists */
57 unsigned int nr_queued
[2];
59 /* bytes per second rate limits */
65 /* Number of bytes disptached in current slice */
66 uint64_t bytes_disp
[2];
67 /* Number of bio's dispatched in current slice */
68 unsigned int io_disp
[2];
70 /* When did we start a new slice */
71 unsigned long slice_start
[2];
72 unsigned long slice_end
[2];
77 /* List of throtl groups */
78 struct hlist_head tg_list
;
80 /* service tree for active throtl groups */
81 struct throtl_rb_root tg_service_tree
;
83 struct throtl_grp root_tg
;
84 struct request_queue
*queue
;
86 /* Total Number of queued bios on READ and WRITE lists */
87 unsigned int nr_queued
[2];
90 * number of total undestroyed groups
92 unsigned int nr_undestroyed_grps
;
94 /* Work for dispatching throttled bios */
95 struct delayed_work throtl_work
;
99 THROTL_TG_FLAG_on_rr
= 0, /* on round-robin busy list */
102 #define THROTL_TG_FNS(name) \
103 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
105 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
107 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
109 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
111 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
113 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
116 THROTL_TG_FNS(on_rr
);
118 #define throtl_log_tg(td, tg, fmt, args...) \
119 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
120 blkg_path(&(tg)->blkg), ##args); \
122 #define throtl_log(td, fmt, args...) \
123 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
125 static inline struct throtl_grp
*tg_of_blkg(struct blkio_group
*blkg
)
128 return container_of(blkg
, struct throtl_grp
, blkg
);
133 static inline int total_nr_queued(struct throtl_data
*td
)
135 return (td
->nr_queued
[0] + td
->nr_queued
[1]);
138 static inline struct throtl_grp
*throtl_ref_get_tg(struct throtl_grp
*tg
)
140 atomic_inc(&tg
->ref
);
144 static void throtl_put_tg(struct throtl_grp
*tg
)
146 BUG_ON(atomic_read(&tg
->ref
) <= 0);
147 if (!atomic_dec_and_test(&tg
->ref
))
152 static struct throtl_grp
* throtl_find_alloc_tg(struct throtl_data
*td
,
153 struct cgroup
*cgroup
)
155 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
156 struct throtl_grp
*tg
= NULL
;
158 struct backing_dev_info
*bdi
= &td
->queue
->backing_dev_info
;
159 unsigned int major
, minor
;
162 * TODO: Speed up blkiocg_lookup_group() by maintaining a radix
163 * tree of blkg (instead of traversing through hash list all
166 tg
= tg_of_blkg(blkiocg_lookup_group(blkcg
, key
));
168 /* Fill in device details for root group */
169 if (tg
&& !tg
->blkg
.dev
&& bdi
->dev
&& dev_name(bdi
->dev
)) {
170 sscanf(dev_name(bdi
->dev
), "%u:%u", &major
, &minor
);
171 tg
->blkg
.dev
= MKDEV(major
, minor
);
178 tg
= kzalloc_node(sizeof(*tg
), GFP_ATOMIC
, td
->queue
->node
);
182 INIT_HLIST_NODE(&tg
->tg_node
);
183 RB_CLEAR_NODE(&tg
->rb_node
);
184 bio_list_init(&tg
->bio_lists
[0]);
185 bio_list_init(&tg
->bio_lists
[1]);
188 * Take the initial reference that will be released on destroy
189 * This can be thought of a joint reference by cgroup and
190 * request queue which will be dropped by either request queue
191 * exit or cgroup deletion path depending on who is exiting first.
193 atomic_set(&tg
->ref
, 1);
195 /* Add group onto cgroup list */
196 sscanf(dev_name(bdi
->dev
), "%u:%u", &major
, &minor
);
197 blkiocg_add_blkio_group(blkcg
, &tg
->blkg
, (void *)td
,
198 MKDEV(major
, minor
), BLKIO_POLICY_THROTL
);
200 tg
->bps
[READ
] = blkcg_get_read_bps(blkcg
, tg
->blkg
.dev
);
201 tg
->bps
[WRITE
] = blkcg_get_write_bps(blkcg
, tg
->blkg
.dev
);
202 tg
->iops
[READ
] = blkcg_get_read_iops(blkcg
, tg
->blkg
.dev
);
203 tg
->iops
[WRITE
] = blkcg_get_write_iops(blkcg
, tg
->blkg
.dev
);
205 hlist_add_head(&tg
->tg_node
, &td
->tg_list
);
206 td
->nr_undestroyed_grps
++;
211 static struct throtl_grp
* throtl_get_tg(struct throtl_data
*td
)
213 struct cgroup
*cgroup
;
214 struct throtl_grp
*tg
= NULL
;
217 cgroup
= task_cgroup(current
, blkio_subsys_id
);
218 tg
= throtl_find_alloc_tg(td
, cgroup
);
225 static struct throtl_grp
*throtl_rb_first(struct throtl_rb_root
*root
)
227 /* Service tree is empty */
232 root
->left
= rb_first(&root
->rb
);
235 return rb_entry_tg(root
->left
);
240 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
246 static void throtl_rb_erase(struct rb_node
*n
, struct throtl_rb_root
*root
)
250 rb_erase_init(n
, &root
->rb
);
254 static void update_min_dispatch_time(struct throtl_rb_root
*st
)
256 struct throtl_grp
*tg
;
258 tg
= throtl_rb_first(st
);
262 st
->min_disptime
= tg
->disptime
;
266 tg_service_tree_add(struct throtl_rb_root
*st
, struct throtl_grp
*tg
)
268 struct rb_node
**node
= &st
->rb
.rb_node
;
269 struct rb_node
*parent
= NULL
;
270 struct throtl_grp
*__tg
;
271 unsigned long key
= tg
->disptime
;
274 while (*node
!= NULL
) {
276 __tg
= rb_entry_tg(parent
);
278 if (time_before(key
, __tg
->disptime
))
279 node
= &parent
->rb_left
;
281 node
= &parent
->rb_right
;
287 st
->left
= &tg
->rb_node
;
289 rb_link_node(&tg
->rb_node
, parent
, node
);
290 rb_insert_color(&tg
->rb_node
, &st
->rb
);
293 static void __throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
295 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
297 tg_service_tree_add(st
, tg
);
298 throtl_mark_tg_on_rr(tg
);
302 static void throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
304 if (!throtl_tg_on_rr(tg
))
305 __throtl_enqueue_tg(td
, tg
);
308 static void __throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
310 throtl_rb_erase(&tg
->rb_node
, &td
->tg_service_tree
);
311 throtl_clear_tg_on_rr(tg
);
314 static void throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
316 if (throtl_tg_on_rr(tg
))
317 __throtl_dequeue_tg(td
, tg
);
320 static void throtl_schedule_next_dispatch(struct throtl_data
*td
)
322 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
325 * If there are more bios pending, schedule more work.
327 if (!total_nr_queued(td
))
332 update_min_dispatch_time(st
);
334 if (time_before_eq(st
->min_disptime
, jiffies
))
335 throtl_schedule_delayed_work(td
->queue
, 0);
337 throtl_schedule_delayed_work(td
->queue
,
338 (st
->min_disptime
- jiffies
));
342 throtl_start_new_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
344 tg
->bytes_disp
[rw
] = 0;
346 tg
->slice_start
[rw
] = jiffies
;
347 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
348 throtl_log_tg(td
, tg
, "[%c] new slice start=%lu end=%lu jiffies=%lu",
349 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
350 tg
->slice_end
[rw
], jiffies
);
353 static inline void throtl_extend_slice(struct throtl_data
*td
,
354 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
356 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
357 throtl_log_tg(td
, tg
, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
358 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
359 tg
->slice_end
[rw
], jiffies
);
362 /* Determine if previously allocated or extended slice is complete or not */
364 throtl_slice_used(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
366 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
372 /* Trim the used slices and adjust slice start accordingly */
374 throtl_trim_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
376 unsigned long nr_slices
, bytes_trim
, time_elapsed
, io_trim
;
378 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
381 * If bps are unlimited (-1), then time slice don't get
382 * renewed. Don't try to trim the slice if slice is used. A new
383 * slice will start when appropriate.
385 if (throtl_slice_used(td
, tg
, rw
))
388 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
390 nr_slices
= time_elapsed
/ throtl_slice
;
395 bytes_trim
= (tg
->bps
[rw
] * throtl_slice
* nr_slices
)/HZ
;
396 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
398 if (!bytes_trim
&& !io_trim
)
401 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
402 tg
->bytes_disp
[rw
] -= bytes_trim
;
404 tg
->bytes_disp
[rw
] = 0;
406 if (tg
->io_disp
[rw
] >= io_trim
)
407 tg
->io_disp
[rw
] -= io_trim
;
411 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
413 throtl_log_tg(td
, tg
, "[%c] trim slice nr=%lu bytes=%lu io=%lu"
414 " start=%lu end=%lu jiffies=%lu",
415 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
416 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
419 static bool tg_with_in_iops_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
420 struct bio
*bio
, unsigned long *wait
)
422 bool rw
= bio_data_dir(bio
);
423 unsigned int io_allowed
;
424 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
426 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
428 /* Slice has just started. Consider one slice interval */
430 jiffy_elapsed_rnd
= throtl_slice
;
432 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
434 io_allowed
= (tg
->iops
[rw
] * jiffies_to_msecs(jiffy_elapsed_rnd
))
437 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
443 /* Calc approx time to dispatch */
444 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
446 if (jiffy_wait
> jiffy_elapsed
)
447 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
456 static bool tg_with_in_bps_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
457 struct bio
*bio
, unsigned long *wait
)
459 bool rw
= bio_data_dir(bio
);
460 u64 bytes_allowed
, extra_bytes
;
461 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
463 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
465 /* Slice has just started. Consider one slice interval */
467 jiffy_elapsed_rnd
= throtl_slice
;
469 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
471 bytes_allowed
= (tg
->bps
[rw
] * jiffies_to_msecs(jiffy_elapsed_rnd
))
474 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
480 /* Calc approx time to dispatch */
481 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
482 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
488 * This wait time is without taking into consideration the rounding
489 * up we did. Add that time also.
491 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
498 * Returns whether one can dispatch a bio or not. Also returns approx number
499 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
501 static bool tg_may_dispatch(struct throtl_data
*td
, struct throtl_grp
*tg
,
502 struct bio
*bio
, unsigned long *wait
)
504 bool rw
= bio_data_dir(bio
);
505 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
508 * Currently whole state machine of group depends on first bio
509 * queued in the group bio list. So one should not be calling
510 * this function with a different bio if there are other bios
513 BUG_ON(tg
->nr_queued
[rw
] && bio
!= bio_list_peek(&tg
->bio_lists
[rw
]));
515 /* If tg->bps = -1, then BW is unlimited */
516 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
523 * If previous slice expired, start a new one otherwise renew/extend
524 * existing slice to make sure it is at least throtl_slice interval
527 if (throtl_slice_used(td
, tg
, rw
))
528 throtl_start_new_slice(td
, tg
, rw
);
530 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
531 throtl_extend_slice(td
, tg
, rw
, jiffies
+ throtl_slice
);
534 if (tg_with_in_bps_limit(td
, tg
, bio
, &bps_wait
)
535 && tg_with_in_iops_limit(td
, tg
, bio
, &iops_wait
)) {
541 max_wait
= max(bps_wait
, iops_wait
);
546 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
547 throtl_extend_slice(td
, tg
, rw
, jiffies
+ max_wait
);
552 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
554 bool rw
= bio_data_dir(bio
);
555 bool sync
= bio
->bi_rw
& REQ_SYNC
;
557 /* Charge the bio to the group */
558 tg
->bytes_disp
[rw
] += bio
->bi_size
;
562 * TODO: This will take blkg->stats_lock. Figure out a way
563 * to avoid this cost.
565 blkiocg_update_dispatch_stats(&tg
->blkg
, bio
->bi_size
, rw
, sync
);
568 static void throtl_add_bio_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
571 bool rw
= bio_data_dir(bio
);
573 bio_list_add(&tg
->bio_lists
[rw
], bio
);
574 /* Take a bio reference on tg */
575 throtl_ref_get_tg(tg
);
578 throtl_enqueue_tg(td
, tg
);
581 static void tg_update_disptime(struct throtl_data
*td
, struct throtl_grp
*tg
)
583 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
586 if ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
587 tg_may_dispatch(td
, tg
, bio
, &read_wait
);
589 if ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
590 tg_may_dispatch(td
, tg
, bio
, &write_wait
);
592 min_wait
= min(read_wait
, write_wait
);
593 disptime
= jiffies
+ min_wait
;
596 * If group is already on active tree, then update dispatch time
597 * only if it is lesser than existing dispatch time. Otherwise
598 * always update the dispatch time
601 if (throtl_tg_on_rr(tg
) && time_before(disptime
, tg
->disptime
))
604 /* Update dispatch time */
605 throtl_dequeue_tg(td
, tg
);
606 tg
->disptime
= disptime
;
607 throtl_enqueue_tg(td
, tg
);
610 static void tg_dispatch_one_bio(struct throtl_data
*td
, struct throtl_grp
*tg
,
611 bool rw
, struct bio_list
*bl
)
615 bio
= bio_list_pop(&tg
->bio_lists
[rw
]);
617 /* Drop bio reference on tg */
620 BUG_ON(td
->nr_queued
[rw
] <= 0);
623 throtl_charge_bio(tg
, bio
);
624 bio_list_add(bl
, bio
);
625 bio
->bi_rw
|= REQ_THROTTLED
;
627 throtl_trim_slice(td
, tg
, rw
);
630 static int throtl_dispatch_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
633 unsigned int nr_reads
= 0, nr_writes
= 0;
634 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
635 unsigned int max_nr_writes
= throtl_grp_quantum
- nr_reads
;
638 /* Try to dispatch 75% READS and 25% WRITES */
640 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
]))
641 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
643 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
646 if (nr_reads
>= max_nr_reads
)
650 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
]))
651 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
653 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
656 if (nr_writes
>= max_nr_writes
)
660 return nr_reads
+ nr_writes
;
663 static int throtl_select_dispatch(struct throtl_data
*td
, struct bio_list
*bl
)
665 unsigned int nr_disp
= 0;
666 struct throtl_grp
*tg
;
667 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
670 tg
= throtl_rb_first(st
);
675 if (time_before(jiffies
, tg
->disptime
))
678 throtl_dequeue_tg(td
, tg
);
680 nr_disp
+= throtl_dispatch_tg(td
, tg
, bl
);
682 if (tg
->nr_queued
[0] || tg
->nr_queued
[1]) {
683 tg_update_disptime(td
, tg
);
684 throtl_enqueue_tg(td
, tg
);
687 if (nr_disp
>= throtl_quantum
)
694 /* Dispatch throttled bios. Should be called without queue lock held. */
695 static int throtl_dispatch(struct request_queue
*q
)
697 struct throtl_data
*td
= q
->td
;
698 unsigned int nr_disp
= 0;
699 struct bio_list bio_list_on_stack
;
702 spin_lock_irq(q
->queue_lock
);
704 if (!total_nr_queued(td
))
707 bio_list_init(&bio_list_on_stack
);
709 throtl_log(td
, "dispatch nr_queued=%lu read=%u write=%u",
710 total_nr_queued(td
), td
->nr_queued
[READ
],
711 td
->nr_queued
[WRITE
]);
713 nr_disp
= throtl_select_dispatch(td
, &bio_list_on_stack
);
716 throtl_log(td
, "bios disp=%u", nr_disp
);
718 throtl_schedule_next_dispatch(td
);
720 spin_unlock_irq(q
->queue_lock
);
723 * If we dispatched some requests, unplug the queue to make sure
727 while((bio
= bio_list_pop(&bio_list_on_stack
)))
728 generic_make_request(bio
);
734 void blk_throtl_work(struct work_struct
*work
)
736 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
738 struct request_queue
*q
= td
->queue
;
743 /* Call with queue lock held */
744 void throtl_schedule_delayed_work(struct request_queue
*q
, unsigned long delay
)
747 struct throtl_data
*td
= q
->td
;
748 struct delayed_work
*dwork
= &td
->throtl_work
;
750 if (total_nr_queued(td
) > 0) {
752 * We might have a work scheduled to be executed in future.
753 * Cancel that and schedule a new one.
755 __cancel_delayed_work(dwork
);
756 kblockd_schedule_delayed_work(q
, dwork
, delay
);
757 throtl_log(td
, "schedule work. delay=%lu jiffies=%lu",
761 EXPORT_SYMBOL(throtl_schedule_delayed_work
);
764 throtl_destroy_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
766 /* Something wrong if we are trying to remove same group twice */
767 BUG_ON(hlist_unhashed(&tg
->tg_node
));
769 hlist_del_init(&tg
->tg_node
);
772 * Put the reference taken at the time of creation so that when all
773 * queues are gone, group can be destroyed.
776 td
->nr_undestroyed_grps
--;
779 static void throtl_release_tgs(struct throtl_data
*td
)
781 struct hlist_node
*pos
, *n
;
782 struct throtl_grp
*tg
;
784 hlist_for_each_entry_safe(tg
, pos
, n
, &td
->tg_list
, tg_node
) {
786 * If cgroup removal path got to blk_group first and removed
787 * it from cgroup list, then it will take care of destroying
790 if (!blkiocg_del_blkio_group(&tg
->blkg
))
791 throtl_destroy_tg(td
, tg
);
795 static void throtl_td_free(struct throtl_data
*td
)
801 * Blk cgroup controller notification saying that blkio_group object is being
802 * delinked as associated cgroup object is going away. That also means that
803 * no new IO will come in this group. So get rid of this group as soon as
804 * any pending IO in the group is finished.
806 * This function is called under rcu_read_lock(). key is the rcu protected
807 * pointer. That means "key" is a valid throtl_data pointer as long as we are
810 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
811 * it should not be NULL as even if queue was going away, cgroup deltion
812 * path got to it first.
814 void throtl_unlink_blkio_group(void *key
, struct blkio_group
*blkg
)
817 struct throtl_data
*td
= key
;
819 spin_lock_irqsave(td
->queue
->queue_lock
, flags
);
820 throtl_destroy_tg(td
, tg_of_blkg(blkg
));
821 spin_unlock_irqrestore(td
->queue
->queue_lock
, flags
);
824 static void throtl_update_blkio_group_read_bps (struct blkio_group
*blkg
,
827 tg_of_blkg(blkg
)->bps
[READ
] = read_bps
;
830 static void throtl_update_blkio_group_write_bps (struct blkio_group
*blkg
,
833 tg_of_blkg(blkg
)->bps
[WRITE
] = write_bps
;
836 static void throtl_update_blkio_group_read_iops (struct blkio_group
*blkg
,
837 unsigned int read_iops
)
839 tg_of_blkg(blkg
)->iops
[READ
] = read_iops
;
842 static void throtl_update_blkio_group_write_iops (struct blkio_group
*blkg
,
843 unsigned int write_iops
)
845 tg_of_blkg(blkg
)->iops
[WRITE
] = write_iops
;
848 void throtl_shutdown_timer_wq(struct request_queue
*q
)
850 struct throtl_data
*td
= q
->td
;
852 cancel_delayed_work_sync(&td
->throtl_work
);
855 static struct blkio_policy_type blkio_policy_throtl
= {
857 .blkio_unlink_group_fn
= throtl_unlink_blkio_group
,
858 .blkio_update_group_read_bps_fn
=
859 throtl_update_blkio_group_read_bps
,
860 .blkio_update_group_write_bps_fn
=
861 throtl_update_blkio_group_write_bps
,
862 .blkio_update_group_read_iops_fn
=
863 throtl_update_blkio_group_read_iops
,
864 .blkio_update_group_write_iops_fn
=
865 throtl_update_blkio_group_write_iops
,
867 .plid
= BLKIO_POLICY_THROTL
,
870 int blk_throtl_bio(struct request_queue
*q
, struct bio
**biop
)
872 struct throtl_data
*td
= q
->td
;
873 struct throtl_grp
*tg
;
874 struct bio
*bio
= *biop
;
875 bool rw
= bio_data_dir(bio
), update_disptime
= true;
877 if (bio
->bi_rw
& REQ_THROTTLED
) {
878 bio
->bi_rw
&= ~REQ_THROTTLED
;
882 spin_lock_irq(q
->queue_lock
);
883 tg
= throtl_get_tg(td
);
885 if (tg
->nr_queued
[rw
]) {
887 * There is already another bio queued in same dir. No
888 * need to update dispatch time.
890 update_disptime
= false;
894 /* Bio is with-in rate limit of group */
895 if (tg_may_dispatch(td
, tg
, bio
, NULL
)) {
896 throtl_charge_bio(tg
, bio
);
901 throtl_log_tg(td
, tg
, "[%c] bio. bdisp=%u sz=%u bps=%llu"
902 " iodisp=%u iops=%u queued=%d/%d",
903 rw
== READ
? 'R' : 'W',
904 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
905 tg
->io_disp
[rw
], tg
->iops
[rw
],
906 tg
->nr_queued
[READ
], tg
->nr_queued
[WRITE
]);
908 throtl_add_bio_tg(q
->td
, tg
, bio
);
911 if (update_disptime
) {
912 tg_update_disptime(td
, tg
);
913 throtl_schedule_next_dispatch(td
);
917 spin_unlock_irq(q
->queue_lock
);
921 int blk_throtl_init(struct request_queue
*q
)
923 struct throtl_data
*td
;
924 struct throtl_grp
*tg
;
926 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
930 INIT_HLIST_HEAD(&td
->tg_list
);
931 td
->tg_service_tree
= THROTL_RB_ROOT
;
933 /* Init root group */
935 INIT_HLIST_NODE(&tg
->tg_node
);
936 RB_CLEAR_NODE(&tg
->rb_node
);
937 bio_list_init(&tg
->bio_lists
[0]);
938 bio_list_init(&tg
->bio_lists
[1]);
940 /* Practically unlimited BW */
941 tg
->bps
[0] = tg
->bps
[1] = -1;
942 tg
->iops
[0] = tg
->iops
[1] = -1;
945 * Set root group reference to 2. One reference will be dropped when
946 * all groups on tg_list are being deleted during queue exit. Other
947 * reference will remain there as we don't want to delete this group
948 * as it is statically allocated and gets destroyed when throtl_data
951 atomic_set(&tg
->ref
, 2);
952 hlist_add_head(&tg
->tg_node
, &td
->tg_list
);
953 td
->nr_undestroyed_grps
++;
955 INIT_DELAYED_WORK(&td
->throtl_work
, blk_throtl_work
);
958 blkiocg_add_blkio_group(&blkio_root_cgroup
, &tg
->blkg
, (void *)td
,
959 0, BLKIO_POLICY_THROTL
);
962 /* Attach throtl data to request queue */
968 void blk_throtl_exit(struct request_queue
*q
)
970 struct throtl_data
*td
= q
->td
;
975 throtl_shutdown_timer_wq(q
);
977 spin_lock_irq(q
->queue_lock
);
978 throtl_release_tgs(td
);
980 /* If there are other groups */
981 if (td
->nr_undestroyed_grps
> 0)
984 spin_unlock_irq(q
->queue_lock
);
987 * Wait for tg->blkg->key accessors to exit their grace periods.
988 * Do this wait only if there are other undestroyed groups out
989 * there (other than root group). This can happen if cgroup deletion
990 * path claimed the responsibility of cleaning up a group before
991 * queue cleanup code get to the group.
993 * Do not call synchronize_rcu() unconditionally as there are drivers
994 * which create/delete request queue hundreds of times during scan/boot
995 * and synchronize_rcu() can take significant time and slow down boot.
1002 static int __init
throtl_init(void)
1004 blkio_policy_register(&blkio_policy_throtl
);
1008 module_init(throtl_init
);