Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 VG |
14 | #include <linux/seq_file.h> |
15 | #include <linux/kdev_t.h> | |
9d6a986c | 16 | #include <linux/module.h> |
accee785 | 17 | #include <linux/err.h> |
9195291e | 18 | #include <linux/blkdev.h> |
5a0e3ad6 | 19 | #include <linux/slab.h> |
34d0f179 | 20 | #include <linux/genhd.h> |
72e06c25 | 21 | #include <linux/delay.h> |
9a9e8a26 | 22 | #include <linux/atomic.h> |
72e06c25 | 23 | #include "blk-cgroup.h" |
5efd6113 | 24 | #include "blk.h" |
3e252066 | 25 | |
84c124da DS |
26 | #define MAX_KEY_LEN 100 |
27 | ||
3e252066 VG |
28 | static DEFINE_SPINLOCK(blkio_list_lock); |
29 | static LIST_HEAD(blkio_list); | |
b1c35769 | 30 | |
923adde1 TH |
31 | static DEFINE_MUTEX(all_q_mutex); |
32 | static LIST_HEAD(all_q_list); | |
33 | ||
1cd9e039 VG |
34 | /* List of groups pending per cpu stats allocation */ |
35 | static DEFINE_SPINLOCK(alloc_list_lock); | |
36 | static LIST_HEAD(alloc_list); | |
37 | ||
38 | static void blkio_stat_alloc_fn(struct work_struct *); | |
39 | static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn); | |
40 | ||
31e4c28d | 41 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
9d6a986c VG |
42 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
43 | ||
035d10b2 TH |
44 | static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; |
45 | ||
062a644d VG |
46 | /* for encoding cft->private value on file */ |
47 | #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) | |
48 | /* What policy owns the file, proportional or throttle */ | |
49 | #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) | |
50 | #define BLKIOFILE_ATTR(val) ((val) & 0xffff) | |
51 | ||
31e4c28d VG |
52 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
53 | { | |
54 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
55 | struct blkio_cgroup, css); | |
56 | } | |
9d6a986c | 57 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
31e4c28d | 58 | |
4f85cb96 | 59 | static struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk) |
70087dc3 VG |
60 | { |
61 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
62 | struct blkio_cgroup, css); | |
63 | } | |
4f85cb96 TH |
64 | |
65 | struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio) | |
66 | { | |
67 | if (bio && bio->bi_css) | |
68 | return container_of(bio->bi_css, struct blkio_cgroup, css); | |
69 | return task_blkio_cgroup(current); | |
70 | } | |
71 | EXPORT_SYMBOL_GPL(bio_blkio_cgroup); | |
70087dc3 | 72 | |
c1768268 TH |
73 | static inline void blkio_update_group_weight(struct blkio_group *blkg, |
74 | int plid, unsigned int weight) | |
062a644d VG |
75 | { |
76 | struct blkio_policy_type *blkiop; | |
77 | ||
78 | list_for_each_entry(blkiop, &blkio_list, list) { | |
79 | /* If this policy does not own the blkg, do not send updates */ | |
c1768268 | 80 | if (blkiop->plid != plid) |
062a644d VG |
81 | continue; |
82 | if (blkiop->ops.blkio_update_group_weight_fn) | |
ca32aefc | 83 | blkiop->ops.blkio_update_group_weight_fn(blkg->q, |
fe071437 | 84 | blkg, weight); |
062a644d VG |
85 | } |
86 | } | |
87 | ||
c1768268 TH |
88 | static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid, |
89 | u64 bps, int fileid) | |
4c9eefa1 VG |
90 | { |
91 | struct blkio_policy_type *blkiop; | |
92 | ||
93 | list_for_each_entry(blkiop, &blkio_list, list) { | |
94 | ||
95 | /* If this policy does not own the blkg, do not send updates */ | |
c1768268 | 96 | if (blkiop->plid != plid) |
4c9eefa1 VG |
97 | continue; |
98 | ||
99 | if (fileid == BLKIO_THROTL_read_bps_device | |
100 | && blkiop->ops.blkio_update_group_read_bps_fn) | |
ca32aefc | 101 | blkiop->ops.blkio_update_group_read_bps_fn(blkg->q, |
fe071437 | 102 | blkg, bps); |
4c9eefa1 VG |
103 | |
104 | if (fileid == BLKIO_THROTL_write_bps_device | |
105 | && blkiop->ops.blkio_update_group_write_bps_fn) | |
ca32aefc | 106 | blkiop->ops.blkio_update_group_write_bps_fn(blkg->q, |
fe071437 | 107 | blkg, bps); |
4c9eefa1 VG |
108 | } |
109 | } | |
110 | ||
7702e8f4 | 111 | static inline void blkio_update_group_iops(struct blkio_group *blkg, |
c1768268 TH |
112 | int plid, unsigned int iops, |
113 | int fileid) | |
7702e8f4 VG |
114 | { |
115 | struct blkio_policy_type *blkiop; | |
116 | ||
117 | list_for_each_entry(blkiop, &blkio_list, list) { | |
118 | ||
119 | /* If this policy does not own the blkg, do not send updates */ | |
c1768268 | 120 | if (blkiop->plid != plid) |
7702e8f4 VG |
121 | continue; |
122 | ||
123 | if (fileid == BLKIO_THROTL_read_iops_device | |
124 | && blkiop->ops.blkio_update_group_read_iops_fn) | |
ca32aefc | 125 | blkiop->ops.blkio_update_group_read_iops_fn(blkg->q, |
fe071437 | 126 | blkg, iops); |
7702e8f4 VG |
127 | |
128 | if (fileid == BLKIO_THROTL_write_iops_device | |
129 | && blkiop->ops.blkio_update_group_write_iops_fn) | |
ca32aefc | 130 | blkiop->ops.blkio_update_group_write_iops_fn(blkg->q, |
fe071437 | 131 | blkg,iops); |
7702e8f4 VG |
132 | } |
133 | } | |
134 | ||
cdc1184c | 135 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
edf1b879 | 136 | /* This should be called with the queue_lock held. */ |
812df48d | 137 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, |
c1768268 TH |
138 | struct blkio_policy_type *pol, |
139 | struct blkio_group *curr_blkg) | |
812df48d | 140 | { |
c1768268 | 141 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; |
549d3aa8 TH |
142 | |
143 | if (blkio_blkg_waiting(&pd->stats)) | |
812df48d DS |
144 | return; |
145 | if (blkg == curr_blkg) | |
146 | return; | |
549d3aa8 TH |
147 | pd->stats.start_group_wait_time = sched_clock(); |
148 | blkio_mark_blkg_waiting(&pd->stats); | |
812df48d DS |
149 | } |
150 | ||
edf1b879 | 151 | /* This should be called with the queue_lock held. */ |
812df48d DS |
152 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) |
153 | { | |
154 | unsigned long long now; | |
155 | ||
156 | if (!blkio_blkg_waiting(stats)) | |
157 | return; | |
158 | ||
159 | now = sched_clock(); | |
160 | if (time_after64(now, stats->start_group_wait_time)) | |
edcb0722 TH |
161 | blkg_stat_add(&stats->group_wait_time, |
162 | now - stats->start_group_wait_time); | |
812df48d DS |
163 | blkio_clear_blkg_waiting(stats); |
164 | } | |
165 | ||
edf1b879 | 166 | /* This should be called with the queue_lock held. */ |
812df48d DS |
167 | static void blkio_end_empty_time(struct blkio_group_stats *stats) |
168 | { | |
169 | unsigned long long now; | |
170 | ||
171 | if (!blkio_blkg_empty(stats)) | |
172 | return; | |
173 | ||
174 | now = sched_clock(); | |
175 | if (time_after64(now, stats->start_empty_time)) | |
edcb0722 TH |
176 | blkg_stat_add(&stats->empty_time, |
177 | now - stats->start_empty_time); | |
812df48d DS |
178 | blkio_clear_blkg_empty(stats); |
179 | } | |
180 | ||
c1768268 TH |
181 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, |
182 | struct blkio_policy_type *pol) | |
812df48d | 183 | { |
edf1b879 | 184 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
812df48d | 185 | |
edf1b879 TH |
186 | lockdep_assert_held(blkg->q->queue_lock); |
187 | BUG_ON(blkio_blkg_idling(stats)); | |
188 | ||
189 | stats->start_idle_time = sched_clock(); | |
190 | blkio_mark_blkg_idling(stats); | |
812df48d DS |
191 | } |
192 | EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); | |
193 | ||
c1768268 TH |
194 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg, |
195 | struct blkio_policy_type *pol) | |
812df48d | 196 | { |
edf1b879 TH |
197 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
198 | ||
199 | lockdep_assert_held(blkg->q->queue_lock); | |
812df48d | 200 | |
812df48d | 201 | if (blkio_blkg_idling(stats)) { |
edf1b879 TH |
202 | unsigned long long now = sched_clock(); |
203 | ||
edcb0722 TH |
204 | if (time_after64(now, stats->start_idle_time)) |
205 | blkg_stat_add(&stats->idle_time, | |
206 | now - stats->start_idle_time); | |
812df48d DS |
207 | blkio_clear_blkg_idling(stats); |
208 | } | |
812df48d DS |
209 | } |
210 | EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); | |
211 | ||
c1768268 TH |
212 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, |
213 | struct blkio_policy_type *pol) | |
cdc1184c | 214 | { |
edf1b879 | 215 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
cdc1184c | 216 | |
edf1b879 TH |
217 | lockdep_assert_held(blkg->q->queue_lock); |
218 | ||
edcb0722 TH |
219 | blkg_stat_add(&stats->avg_queue_size_sum, |
220 | blkg_rwstat_sum(&stats->queued)); | |
221 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | |
812df48d | 222 | blkio_update_group_wait_time(stats); |
cdc1184c | 223 | } |
a11cdaa7 DS |
224 | EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); |
225 | ||
c1768268 TH |
226 | void blkiocg_set_start_empty_time(struct blkio_group *blkg, |
227 | struct blkio_policy_type *pol) | |
28baf442 | 228 | { |
edf1b879 | 229 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
28baf442 | 230 | |
edf1b879 | 231 | lockdep_assert_held(blkg->q->queue_lock); |
28baf442 | 232 | |
edcb0722 | 233 | if (blkg_rwstat_sum(&stats->queued)) |
28baf442 | 234 | return; |
28baf442 DS |
235 | |
236 | /* | |
e5ff082e VG |
237 | * group is already marked empty. This can happen if cfqq got new |
238 | * request in parent group and moved to this group while being added | |
239 | * to service tree. Just ignore the event and move on. | |
28baf442 | 240 | */ |
edf1b879 | 241 | if (blkio_blkg_empty(stats)) |
e5ff082e | 242 | return; |
e5ff082e | 243 | |
28baf442 DS |
244 | stats->start_empty_time = sched_clock(); |
245 | blkio_mark_blkg_empty(stats); | |
28baf442 DS |
246 | } |
247 | EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); | |
248 | ||
a11cdaa7 | 249 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
c1768268 TH |
250 | struct blkio_policy_type *pol, |
251 | unsigned long dequeue) | |
a11cdaa7 | 252 | { |
c1768268 | 253 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; |
549d3aa8 | 254 | |
edf1b879 TH |
255 | lockdep_assert_held(blkg->q->queue_lock); |
256 | ||
edcb0722 | 257 | blkg_stat_add(&pd->stats.dequeue, dequeue); |
a11cdaa7 DS |
258 | } |
259 | EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); | |
812df48d DS |
260 | #else |
261 | static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, | |
c1768268 TH |
262 | struct blkio_policy_type *pol, |
263 | struct blkio_group *curr_blkg) { } | |
264 | static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { } | |
cdc1184c DS |
265 | #endif |
266 | ||
a11cdaa7 | 267 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
c1768268 TH |
268 | struct blkio_policy_type *pol, |
269 | struct blkio_group *curr_blkg, bool direction, | |
270 | bool sync) | |
cdc1184c | 271 | { |
edf1b879 | 272 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
edcb0722 | 273 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
edf1b879 TH |
274 | |
275 | lockdep_assert_held(blkg->q->queue_lock); | |
276 | ||
edcb0722 | 277 | blkg_rwstat_add(&stats->queued, rw, 1); |
edf1b879 | 278 | blkio_end_empty_time(stats); |
c1768268 | 279 | blkio_set_start_group_wait_time(blkg, pol, curr_blkg); |
cdc1184c | 280 | } |
a11cdaa7 | 281 | EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); |
cdc1184c | 282 | |
a11cdaa7 | 283 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
c1768268 TH |
284 | struct blkio_policy_type *pol, |
285 | bool direction, bool sync) | |
cdc1184c | 286 | { |
edf1b879 | 287 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
edcb0722 | 288 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
edf1b879 TH |
289 | |
290 | lockdep_assert_held(blkg->q->queue_lock); | |
cdc1184c | 291 | |
edcb0722 | 292 | blkg_rwstat_add(&stats->queued, rw, -1); |
cdc1184c | 293 | } |
a11cdaa7 | 294 | EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); |
cdc1184c | 295 | |
c1768268 TH |
296 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
297 | struct blkio_policy_type *pol, | |
298 | unsigned long time, | |
299 | unsigned long unaccounted_time) | |
22084190 | 300 | { |
edf1b879 TH |
301 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
302 | ||
303 | lockdep_assert_held(blkg->q->queue_lock); | |
303a3acb | 304 | |
edcb0722 | 305 | blkg_stat_add(&stats->time, time); |
a23e6869 | 306 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
edcb0722 | 307 | blkg_stat_add(&stats->unaccounted_time, unaccounted_time); |
a23e6869 | 308 | #endif |
22084190 | 309 | } |
303a3acb | 310 | EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); |
22084190 | 311 | |
5624a4e4 VG |
312 | /* |
313 | * should be called under rcu read lock or queue lock to make sure blkg pointer | |
314 | * is valid. | |
315 | */ | |
84c124da | 316 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
c1768268 TH |
317 | struct blkio_policy_type *pol, |
318 | uint64_t bytes, bool direction, bool sync) | |
9195291e | 319 | { |
edcb0722 | 320 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
c1768268 | 321 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; |
5624a4e4 | 322 | struct blkio_group_stats_cpu *stats_cpu; |
575969a0 VG |
323 | unsigned long flags; |
324 | ||
1cd9e039 VG |
325 | /* If per cpu stats are not allocated yet, don't do any accounting. */ |
326 | if (pd->stats_cpu == NULL) | |
327 | return; | |
328 | ||
575969a0 VG |
329 | /* |
330 | * Disabling interrupts to provide mutual exclusion between two | |
331 | * writes on same cpu. It probably is not needed for 64bit. Not | |
332 | * optimizing that case yet. | |
333 | */ | |
334 | local_irq_save(flags); | |
9195291e | 335 | |
549d3aa8 | 336 | stats_cpu = this_cpu_ptr(pd->stats_cpu); |
5624a4e4 | 337 | |
edcb0722 TH |
338 | blkg_stat_add(&stats_cpu->sectors, bytes >> 9); |
339 | blkg_rwstat_add(&stats_cpu->serviced, rw, 1); | |
340 | blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes); | |
341 | ||
575969a0 | 342 | local_irq_restore(flags); |
9195291e | 343 | } |
84c124da | 344 | EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); |
9195291e | 345 | |
84c124da | 346 | void blkiocg_update_completion_stats(struct blkio_group *blkg, |
c1768268 TH |
347 | struct blkio_policy_type *pol, |
348 | uint64_t start_time, | |
349 | uint64_t io_start_time, bool direction, | |
350 | bool sync) | |
9195291e | 351 | { |
edf1b879 | 352 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
9195291e | 353 | unsigned long long now = sched_clock(); |
edcb0722 | 354 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
9195291e | 355 | |
edf1b879 TH |
356 | lockdep_assert_held(blkg->q->queue_lock); |
357 | ||
84c124da | 358 | if (time_after64(now, io_start_time)) |
edcb0722 | 359 | blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); |
84c124da | 360 | if (time_after64(io_start_time, start_time)) |
edcb0722 TH |
361 | blkg_rwstat_add(&stats->wait_time, rw, |
362 | io_start_time - start_time); | |
9195291e | 363 | } |
84c124da | 364 | EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); |
9195291e | 365 | |
317389a7 | 366 | /* Merged stats are per cpu. */ |
c1768268 TH |
367 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, |
368 | struct blkio_policy_type *pol, | |
369 | bool direction, bool sync) | |
812d4026 | 370 | { |
edf1b879 | 371 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; |
edcb0722 | 372 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); |
edf1b879 TH |
373 | |
374 | lockdep_assert_held(blkg->q->queue_lock); | |
812d4026 | 375 | |
edcb0722 | 376 | blkg_rwstat_add(&stats->merged, rw, 1); |
812d4026 DS |
377 | } |
378 | EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); | |
379 | ||
1cd9e039 VG |
380 | /* |
381 | * Worker for allocating per cpu stat for blk groups. This is scheduled on | |
382 | * the system_nrt_wq once there are some groups on the alloc_list waiting | |
383 | * for allocation. | |
384 | */ | |
385 | static void blkio_stat_alloc_fn(struct work_struct *work) | |
386 | { | |
387 | static void *pcpu_stats[BLKIO_NR_POLICIES]; | |
388 | struct delayed_work *dwork = to_delayed_work(work); | |
389 | struct blkio_group *blkg; | |
390 | int i; | |
391 | bool empty = false; | |
392 | ||
393 | alloc_stats: | |
394 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
395 | if (pcpu_stats[i] != NULL) | |
396 | continue; | |
397 | ||
398 | pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu); | |
399 | ||
400 | /* Allocation failed. Try again after some time. */ | |
401 | if (pcpu_stats[i] == NULL) { | |
402 | queue_delayed_work(system_nrt_wq, dwork, | |
403 | msecs_to_jiffies(10)); | |
404 | return; | |
405 | } | |
406 | } | |
407 | ||
408 | spin_lock_irq(&blkio_list_lock); | |
409 | spin_lock(&alloc_list_lock); | |
410 | ||
411 | /* cgroup got deleted or queue exited. */ | |
412 | if (!list_empty(&alloc_list)) { | |
413 | blkg = list_first_entry(&alloc_list, struct blkio_group, | |
414 | alloc_node); | |
415 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { | |
416 | struct blkg_policy_data *pd = blkg->pd[i]; | |
417 | ||
418 | if (blkio_policy[i] && pd && !pd->stats_cpu) | |
419 | swap(pd->stats_cpu, pcpu_stats[i]); | |
420 | } | |
421 | ||
422 | list_del_init(&blkg->alloc_node); | |
423 | } | |
424 | ||
425 | empty = list_empty(&alloc_list); | |
426 | ||
427 | spin_unlock(&alloc_list_lock); | |
428 | spin_unlock_irq(&blkio_list_lock); | |
429 | ||
430 | if (!empty) | |
431 | goto alloc_stats; | |
432 | } | |
433 | ||
0381411e TH |
434 | /** |
435 | * blkg_free - free a blkg | |
436 | * @blkg: blkg to free | |
437 | * | |
438 | * Free @blkg which may be partially allocated. | |
439 | */ | |
440 | static void blkg_free(struct blkio_group *blkg) | |
441 | { | |
e8989fae | 442 | int i; |
549d3aa8 TH |
443 | |
444 | if (!blkg) | |
445 | return; | |
446 | ||
e8989fae TH |
447 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
448 | struct blkg_policy_data *pd = blkg->pd[i]; | |
449 | ||
450 | if (pd) { | |
451 | free_percpu(pd->stats_cpu); | |
452 | kfree(pd); | |
453 | } | |
0381411e | 454 | } |
e8989fae | 455 | |
549d3aa8 | 456 | kfree(blkg); |
0381411e TH |
457 | } |
458 | ||
459 | /** | |
460 | * blkg_alloc - allocate a blkg | |
461 | * @blkcg: block cgroup the new blkg is associated with | |
462 | * @q: request_queue the new blkg is associated with | |
0381411e | 463 | * |
e8989fae | 464 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e TH |
465 | */ |
466 | static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg, | |
e8989fae | 467 | struct request_queue *q) |
0381411e TH |
468 | { |
469 | struct blkio_group *blkg; | |
e8989fae | 470 | int i; |
0381411e TH |
471 | |
472 | /* alloc and init base part */ | |
473 | blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node); | |
474 | if (!blkg) | |
475 | return NULL; | |
476 | ||
c875f4d0 | 477 | blkg->q = q; |
e8989fae | 478 | INIT_LIST_HEAD(&blkg->q_node); |
1cd9e039 | 479 | INIT_LIST_HEAD(&blkg->alloc_node); |
0381411e | 480 | blkg->blkcg = blkcg; |
1adaf3dd | 481 | blkg->refcnt = 1; |
0381411e TH |
482 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); |
483 | ||
e8989fae TH |
484 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
485 | struct blkio_policy_type *pol = blkio_policy[i]; | |
486 | struct blkg_policy_data *pd; | |
0381411e | 487 | |
e8989fae TH |
488 | if (!pol) |
489 | continue; | |
490 | ||
491 | /* alloc per-policy data and attach it to blkg */ | |
492 | pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC, | |
493 | q->node); | |
494 | if (!pd) { | |
495 | blkg_free(blkg); | |
496 | return NULL; | |
497 | } | |
549d3aa8 | 498 | |
e8989fae TH |
499 | blkg->pd[i] = pd; |
500 | pd->blkg = blkg; | |
0381411e TH |
501 | } |
502 | ||
549d3aa8 | 503 | /* invoke per-policy init */ |
e8989fae TH |
504 | for (i = 0; i < BLKIO_NR_POLICIES; i++) { |
505 | struct blkio_policy_type *pol = blkio_policy[i]; | |
506 | ||
507 | if (pol) | |
508 | pol->ops.blkio_init_group_fn(blkg); | |
509 | } | |
510 | ||
0381411e TH |
511 | return blkg; |
512 | } | |
513 | ||
cd1604fa TH |
514 | struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, |
515 | struct request_queue *q, | |
cd1604fa TH |
516 | bool for_root) |
517 | __releases(q->queue_lock) __acquires(q->queue_lock) | |
5624a4e4 | 518 | { |
1cd9e039 | 519 | struct blkio_group *blkg; |
5624a4e4 | 520 | |
cd1604fa TH |
521 | WARN_ON_ONCE(!rcu_read_lock_held()); |
522 | lockdep_assert_held(q->queue_lock); | |
523 | ||
524 | /* | |
525 | * This could be the first entry point of blkcg implementation and | |
526 | * we shouldn't allow anything to go through for a bypassing queue. | |
527 | * The following can be removed if blkg lookup is guaranteed to | |
528 | * fail on a bypassing queue. | |
529 | */ | |
530 | if (unlikely(blk_queue_bypass(q)) && !for_root) | |
531 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
532 | ||
e8989fae | 533 | blkg = blkg_lookup(blkcg, q); |
cd1604fa TH |
534 | if (blkg) |
535 | return blkg; | |
536 | ||
7ee9c562 | 537 | /* blkg holds a reference to blkcg */ |
cd1604fa TH |
538 | if (!css_tryget(&blkcg->css)) |
539 | return ERR_PTR(-EINVAL); | |
540 | ||
541 | /* | |
542 | * Allocate and initialize. | |
cd1604fa | 543 | */ |
1cd9e039 | 544 | blkg = blkg_alloc(blkcg, q); |
cd1604fa TH |
545 | |
546 | /* did alloc fail? */ | |
1cd9e039 | 547 | if (unlikely(!blkg)) { |
cd1604fa TH |
548 | blkg = ERR_PTR(-ENOMEM); |
549 | goto out; | |
550 | } | |
551 | ||
552 | /* insert */ | |
553 | spin_lock(&blkcg->lock); | |
31e4c28d | 554 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
e8989fae | 555 | list_add(&blkg->q_node, &q->blkg_list); |
cd1604fa | 556 | spin_unlock(&blkcg->lock); |
1cd9e039 VG |
557 | |
558 | spin_lock(&alloc_list_lock); | |
559 | list_add(&blkg->alloc_node, &alloc_list); | |
560 | /* Queue per cpu stat allocation from worker thread. */ | |
561 | queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0); | |
562 | spin_unlock(&alloc_list_lock); | |
cd1604fa | 563 | out: |
cd1604fa | 564 | return blkg; |
31e4c28d | 565 | } |
cd1604fa | 566 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 567 | |
31e4c28d | 568 | /* called under rcu_read_lock(). */ |
cd1604fa | 569 | struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, |
e8989fae | 570 | struct request_queue *q) |
31e4c28d VG |
571 | { |
572 | struct blkio_group *blkg; | |
573 | struct hlist_node *n; | |
31e4c28d | 574 | |
ca32aefc | 575 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) |
e8989fae | 576 | if (blkg->q == q) |
31e4c28d | 577 | return blkg; |
31e4c28d VG |
578 | return NULL; |
579 | } | |
cd1604fa | 580 | EXPORT_SYMBOL_GPL(blkg_lookup); |
31e4c28d | 581 | |
e8989fae | 582 | static void blkg_destroy(struct blkio_group *blkg) |
03aa264a TH |
583 | { |
584 | struct request_queue *q = blkg->q; | |
9f13ef67 | 585 | struct blkio_cgroup *blkcg = blkg->blkcg; |
03aa264a TH |
586 | |
587 | lockdep_assert_held(q->queue_lock); | |
9f13ef67 | 588 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
589 | |
590 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 591 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 592 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
e8989fae | 593 | list_del_init(&blkg->q_node); |
9f13ef67 | 594 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 595 | |
1cd9e039 VG |
596 | spin_lock(&alloc_list_lock); |
597 | list_del_init(&blkg->alloc_node); | |
598 | spin_unlock(&alloc_list_lock); | |
599 | ||
03aa264a TH |
600 | /* |
601 | * Put the reference taken at the time of creation so that when all | |
602 | * queues are gone, group can be destroyed. | |
603 | */ | |
604 | blkg_put(blkg); | |
605 | } | |
606 | ||
e8989fae TH |
607 | /* |
608 | * XXX: This updates blkg policy data in-place for root blkg, which is | |
609 | * necessary across elevator switch and policy registration as root blkgs | |
610 | * aren't shot down. This broken and racy implementation is temporary. | |
611 | * Eventually, blkg shoot down will be replaced by proper in-place update. | |
612 | */ | |
613 | void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid) | |
614 | { | |
615 | struct blkio_policy_type *pol = blkio_policy[plid]; | |
616 | struct blkio_group *blkg = blkg_lookup(&blkio_root_cgroup, q); | |
617 | struct blkg_policy_data *pd; | |
618 | ||
619 | if (!blkg) | |
620 | return; | |
621 | ||
622 | kfree(blkg->pd[plid]); | |
623 | blkg->pd[plid] = NULL; | |
624 | ||
625 | if (!pol) | |
626 | return; | |
627 | ||
628 | pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); | |
629 | WARN_ON_ONCE(!pd); | |
630 | ||
631 | pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu); | |
632 | WARN_ON_ONCE(!pd->stats_cpu); | |
633 | ||
634 | blkg->pd[plid] = pd; | |
635 | pd->blkg = blkg; | |
636 | pol->ops.blkio_init_group_fn(blkg); | |
637 | } | |
638 | EXPORT_SYMBOL_GPL(update_root_blkg_pd); | |
639 | ||
9f13ef67 TH |
640 | /** |
641 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
642 | * @q: request_queue of interest | |
643 | * @destroy_root: whether to destroy root blkg or not | |
644 | * | |
645 | * Destroy blkgs associated with @q. If @destroy_root is %true, all are | |
646 | * destroyed; otherwise, root blkg is left alone. | |
647 | */ | |
e8989fae | 648 | void blkg_destroy_all(struct request_queue *q, bool destroy_root) |
72e06c25 | 649 | { |
03aa264a | 650 | struct blkio_group *blkg, *n; |
72e06c25 | 651 | |
9f13ef67 | 652 | spin_lock_irq(q->queue_lock); |
72e06c25 | 653 | |
9f13ef67 TH |
654 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
655 | struct blkio_cgroup *blkcg = blkg->blkcg; | |
72e06c25 | 656 | |
9f13ef67 TH |
657 | /* skip root? */ |
658 | if (!destroy_root && blkg->blkcg == &blkio_root_cgroup) | |
659 | continue; | |
72e06c25 | 660 | |
9f13ef67 TH |
661 | spin_lock(&blkcg->lock); |
662 | blkg_destroy(blkg); | |
663 | spin_unlock(&blkcg->lock); | |
72e06c25 | 664 | } |
9f13ef67 TH |
665 | |
666 | spin_unlock_irq(q->queue_lock); | |
72e06c25 | 667 | } |
03aa264a | 668 | EXPORT_SYMBOL_GPL(blkg_destroy_all); |
72e06c25 | 669 | |
1adaf3dd TH |
670 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
671 | { | |
672 | blkg_free(container_of(rcu_head, struct blkio_group, rcu_head)); | |
673 | } | |
674 | ||
675 | void __blkg_release(struct blkio_group *blkg) | |
676 | { | |
677 | /* release the extra blkcg reference this blkg has been holding */ | |
678 | css_put(&blkg->blkcg->css); | |
679 | ||
680 | /* | |
681 | * A group is freed in rcu manner. But having an rcu lock does not | |
682 | * mean that one can access all the fields of blkg and assume these | |
683 | * are valid. For example, don't try to follow throtl_data and | |
684 | * request queue links. | |
685 | * | |
686 | * Having a reference to blkg under an rcu allows acess to only | |
687 | * values local to groups like group stats and group rate limits | |
688 | */ | |
689 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
690 | } | |
691 | EXPORT_SYMBOL_GPL(__blkg_release); | |
692 | ||
c1768268 | 693 | static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid) |
f0bdc8cd | 694 | { |
c1768268 | 695 | struct blkg_policy_data *pd = blkg->pd[plid]; |
997a026c | 696 | int cpu; |
1cd9e039 VG |
697 | |
698 | if (pd->stats_cpu == NULL) | |
699 | return; | |
997a026c TH |
700 | |
701 | for_each_possible_cpu(cpu) { | |
702 | struct blkio_group_stats_cpu *sc = | |
703 | per_cpu_ptr(pd->stats_cpu, cpu); | |
704 | ||
edcb0722 TH |
705 | blkg_rwstat_reset(&sc->service_bytes); |
706 | blkg_rwstat_reset(&sc->serviced); | |
707 | blkg_stat_reset(&sc->sectors); | |
f0bdc8cd VG |
708 | } |
709 | } | |
710 | ||
303a3acb | 711 | static int |
84c124da | 712 | blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) |
303a3acb | 713 | { |
997a026c | 714 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); |
303a3acb DS |
715 | struct blkio_group *blkg; |
716 | struct hlist_node *n; | |
303a3acb | 717 | |
e8989fae | 718 | spin_lock(&blkio_list_lock); |
303a3acb | 719 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
720 | |
721 | /* | |
722 | * Note that stat reset is racy - it doesn't synchronize against | |
723 | * stat updates. This is a debug feature which shouldn't exist | |
724 | * anyway. If you get hit by a race, retry. | |
725 | */ | |
303a3acb | 726 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
e8989fae | 727 | struct blkio_policy_type *pol; |
549d3aa8 | 728 | |
e8989fae TH |
729 | list_for_each_entry(pol, &blkio_list, list) { |
730 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | |
997a026c TH |
731 | struct blkio_group_stats *stats = &pd->stats; |
732 | ||
733 | /* queued stats shouldn't be cleared */ | |
edcb0722 TH |
734 | blkg_rwstat_reset(&stats->merged); |
735 | blkg_rwstat_reset(&stats->service_time); | |
736 | blkg_rwstat_reset(&stats->wait_time); | |
737 | blkg_stat_reset(&stats->time); | |
812df48d | 738 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
edcb0722 TH |
739 | blkg_stat_reset(&stats->unaccounted_time); |
740 | blkg_stat_reset(&stats->avg_queue_size_sum); | |
741 | blkg_stat_reset(&stats->avg_queue_size_samples); | |
742 | blkg_stat_reset(&stats->dequeue); | |
743 | blkg_stat_reset(&stats->group_wait_time); | |
744 | blkg_stat_reset(&stats->idle_time); | |
745 | blkg_stat_reset(&stats->empty_time); | |
812df48d | 746 | #endif |
e8989fae TH |
747 | blkio_reset_stats_cpu(blkg, pol->plid); |
748 | } | |
303a3acb | 749 | } |
f0bdc8cd | 750 | |
303a3acb | 751 | spin_unlock_irq(&blkcg->lock); |
e8989fae | 752 | spin_unlock(&blkio_list_lock); |
303a3acb DS |
753 | return 0; |
754 | } | |
755 | ||
edcb0722 | 756 | static void blkio_get_key_name(enum blkg_rwstat_type type, const char *dname, |
7a4dd281 | 757 | char *str, int chars_left, bool diskname_only) |
303a3acb | 758 | { |
7a4dd281 | 759 | snprintf(str, chars_left, "%s", dname); |
303a3acb DS |
760 | chars_left -= strlen(str); |
761 | if (chars_left <= 0) { | |
762 | printk(KERN_WARNING | |
763 | "Possibly incorrect cgroup stat display format"); | |
764 | return; | |
765 | } | |
84c124da DS |
766 | if (diskname_only) |
767 | return; | |
303a3acb | 768 | switch (type) { |
edcb0722 | 769 | case BLKG_RWSTAT_READ: |
303a3acb DS |
770 | strlcat(str, " Read", chars_left); |
771 | break; | |
edcb0722 | 772 | case BLKG_RWSTAT_WRITE: |
303a3acb DS |
773 | strlcat(str, " Write", chars_left); |
774 | break; | |
edcb0722 | 775 | case BLKG_RWSTAT_SYNC: |
303a3acb DS |
776 | strlcat(str, " Sync", chars_left); |
777 | break; | |
edcb0722 | 778 | case BLKG_RWSTAT_ASYNC: |
303a3acb DS |
779 | strlcat(str, " Async", chars_left); |
780 | break; | |
edcb0722 | 781 | case BLKG_RWSTAT_TOTAL: |
303a3acb DS |
782 | strlcat(str, " Total", chars_left); |
783 | break; | |
784 | default: | |
785 | strlcat(str, " Invalid", chars_left); | |
786 | } | |
787 | } | |
788 | ||
c1768268 | 789 | static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid, |
edcb0722 TH |
790 | enum stat_type_cpu type, |
791 | enum blkg_rwstat_type sub_type) | |
5624a4e4 | 792 | { |
c1768268 | 793 | struct blkg_policy_data *pd = blkg->pd[plid]; |
edcb0722 | 794 | u64 val = 0; |
5624a4e4 | 795 | int cpu; |
5624a4e4 | 796 | |
1cd9e039 VG |
797 | if (pd->stats_cpu == NULL) |
798 | return val; | |
799 | ||
5624a4e4 | 800 | for_each_possible_cpu(cpu) { |
edcb0722 TH |
801 | struct blkio_group_stats_cpu *stats_cpu = |
802 | per_cpu_ptr(pd->stats_cpu, cpu); | |
803 | struct blkg_rwstat rws; | |
804 | ||
805 | switch (type) { | |
806 | case BLKIO_STAT_CPU_SECTORS: | |
807 | val += blkg_stat_read(&stats_cpu->sectors); | |
808 | break; | |
809 | case BLKIO_STAT_CPU_SERVICE_BYTES: | |
810 | rws = blkg_rwstat_read(&stats_cpu->service_bytes); | |
811 | val += rws.cnt[sub_type]; | |
812 | break; | |
813 | case BLKIO_STAT_CPU_SERVICED: | |
814 | rws = blkg_rwstat_read(&stats_cpu->serviced); | |
815 | val += rws.cnt[sub_type]; | |
816 | break; | |
817 | } | |
5624a4e4 VG |
818 | } |
819 | ||
820 | return val; | |
821 | } | |
822 | ||
c1768268 | 823 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid, |
7a4dd281 TH |
824 | struct cgroup_map_cb *cb, const char *dname, |
825 | enum stat_type_cpu type) | |
5624a4e4 VG |
826 | { |
827 | uint64_t disk_total, val; | |
828 | char key_str[MAX_KEY_LEN]; | |
edcb0722 | 829 | enum blkg_rwstat_type sub_type; |
5624a4e4 VG |
830 | |
831 | if (type == BLKIO_STAT_CPU_SECTORS) { | |
c1768268 | 832 | val = blkio_read_stat_cpu(blkg, plid, type, 0); |
c4c76a05 TH |
833 | blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true); |
834 | cb->fill(cb, key_str, val); | |
835 | return val; | |
5624a4e4 VG |
836 | } |
837 | ||
edcb0722 | 838 | for (sub_type = BLKG_RWSTAT_READ; sub_type < BLKG_RWSTAT_NR; |
5624a4e4 | 839 | sub_type++) { |
7a4dd281 TH |
840 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, |
841 | false); | |
c1768268 | 842 | val = blkio_read_stat_cpu(blkg, plid, type, sub_type); |
5624a4e4 VG |
843 | cb->fill(cb, key_str, val); |
844 | } | |
845 | ||
edcb0722 TH |
846 | disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKG_RWSTAT_READ) + |
847 | blkio_read_stat_cpu(blkg, plid, type, BLKG_RWSTAT_WRITE); | |
5624a4e4 | 848 | |
edcb0722 | 849 | blkio_get_key_name(BLKG_RWSTAT_TOTAL, dname, key_str, MAX_KEY_LEN, |
7a4dd281 | 850 | false); |
5624a4e4 VG |
851 | cb->fill(cb, key_str, disk_total); |
852 | return disk_total; | |
853 | } | |
854 | ||
c1768268 | 855 | static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid, |
7a4dd281 TH |
856 | struct cgroup_map_cb *cb, const char *dname, |
857 | enum stat_type type) | |
303a3acb | 858 | { |
c4c76a05 TH |
859 | struct blkio_group_stats *stats = &blkg->pd[plid]->stats; |
860 | uint64_t v = 0, disk_total = 0; | |
303a3acb | 861 | char key_str[MAX_KEY_LEN]; |
edcb0722 | 862 | struct blkg_rwstat rws = { }; |
c4c76a05 | 863 | int st; |
84c124da | 864 | |
c4c76a05 | 865 | if (type >= BLKIO_STAT_ARR_NR) { |
edcb0722 TH |
866 | switch (type) { |
867 | case BLKIO_STAT_TIME: | |
868 | v = blkg_stat_read(&stats->time); | |
869 | break; | |
9026e521 | 870 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
edcb0722 TH |
871 | case BLKIO_STAT_UNACCOUNTED_TIME: |
872 | v = blkg_stat_read(&stats->unaccounted_time); | |
873 | break; | |
874 | case BLKIO_STAT_AVG_QUEUE_SIZE: { | |
875 | uint64_t samples; | |
876 | ||
877 | samples = blkg_stat_read(&stats->avg_queue_size_samples); | |
878 | if (samples) { | |
879 | v = blkg_stat_read(&stats->avg_queue_size_sum); | |
880 | do_div(v, samples); | |
c4c76a05 | 881 | } |
edcb0722 TH |
882 | break; |
883 | } | |
884 | case BLKIO_STAT_IDLE_TIME: | |
885 | v = blkg_stat_read(&stats->idle_time); | |
886 | break; | |
887 | case BLKIO_STAT_EMPTY_TIME: | |
888 | v = blkg_stat_read(&stats->empty_time); | |
889 | break; | |
890 | case BLKIO_STAT_DEQUEUE: | |
891 | v = blkg_stat_read(&stats->dequeue); | |
892 | break; | |
893 | case BLKIO_STAT_GROUP_WAIT_TIME: | |
894 | v = blkg_stat_read(&stats->group_wait_time); | |
895 | break; | |
84c124da | 896 | #endif |
edcb0722 TH |
897 | default: |
898 | WARN_ON_ONCE(1); | |
899 | } | |
303a3acb | 900 | |
c4c76a05 TH |
901 | blkio_get_key_name(0, dname, key_str, MAX_KEY_LEN, true); |
902 | cb->fill(cb, key_str, v); | |
903 | return v; | |
303a3acb | 904 | } |
c4c76a05 | 905 | |
edcb0722 TH |
906 | switch (type) { |
907 | case BLKIO_STAT_MERGED: | |
908 | rws = blkg_rwstat_read(&stats->merged); | |
909 | break; | |
910 | case BLKIO_STAT_SERVICE_TIME: | |
911 | rws = blkg_rwstat_read(&stats->service_time); | |
912 | break; | |
913 | case BLKIO_STAT_WAIT_TIME: | |
914 | rws = blkg_rwstat_read(&stats->wait_time); | |
915 | break; | |
916 | case BLKIO_STAT_QUEUED: | |
917 | rws = blkg_rwstat_read(&stats->queued); | |
918 | break; | |
919 | default: | |
920 | WARN_ON_ONCE(true); | |
921 | break; | |
922 | } | |
c4c76a05 | 923 | |
edcb0722 | 924 | for (st = BLKG_RWSTAT_READ; st < BLKG_RWSTAT_NR; st++) { |
c4c76a05 | 925 | blkio_get_key_name(st, dname, key_str, MAX_KEY_LEN, false); |
edcb0722 TH |
926 | cb->fill(cb, key_str, rws.cnt[st]); |
927 | if (st == BLKG_RWSTAT_READ || st == BLKG_RWSTAT_WRITE) | |
928 | disk_total += rws.cnt[st]; | |
c4c76a05 TH |
929 | } |
930 | ||
edcb0722 | 931 | blkio_get_key_name(BLKG_RWSTAT_TOTAL, dname, key_str, MAX_KEY_LEN, |
7a4dd281 | 932 | false); |
303a3acb DS |
933 | cb->fill(cb, key_str, disk_total); |
934 | return disk_total; | |
935 | } | |
936 | ||
4bfd482e TH |
937 | static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid, |
938 | int fileid, struct blkio_cgroup *blkcg) | |
34d0f179 | 939 | { |
ece84241 | 940 | struct gendisk *disk = NULL; |
e56da7e2 | 941 | struct blkio_group *blkg = NULL; |
549d3aa8 | 942 | struct blkg_policy_data *pd; |
34d0f179 | 943 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; |
d11bb446 | 944 | unsigned long major, minor; |
ece84241 TH |
945 | int i = 0, ret = -EINVAL; |
946 | int part; | |
34d0f179 | 947 | dev_t dev; |
d11bb446 | 948 | u64 temp; |
34d0f179 GJ |
949 | |
950 | memset(s, 0, sizeof(s)); | |
951 | ||
952 | while ((p = strsep(&buf, " ")) != NULL) { | |
953 | if (!*p) | |
954 | continue; | |
955 | ||
956 | s[i++] = p; | |
957 | ||
958 | /* Prevent from inputing too many things */ | |
959 | if (i == 3) | |
960 | break; | |
961 | } | |
962 | ||
963 | if (i != 2) | |
ece84241 | 964 | goto out; |
34d0f179 GJ |
965 | |
966 | p = strsep(&s[0], ":"); | |
967 | if (p != NULL) | |
968 | major_s = p; | |
969 | else | |
ece84241 | 970 | goto out; |
34d0f179 GJ |
971 | |
972 | minor_s = s[0]; | |
973 | if (!minor_s) | |
ece84241 | 974 | goto out; |
34d0f179 | 975 | |
ece84241 TH |
976 | if (strict_strtoul(major_s, 10, &major)) |
977 | goto out; | |
34d0f179 | 978 | |
ece84241 TH |
979 | if (strict_strtoul(minor_s, 10, &minor)) |
980 | goto out; | |
34d0f179 GJ |
981 | |
982 | dev = MKDEV(major, minor); | |
983 | ||
ece84241 TH |
984 | if (strict_strtoull(s[1], 10, &temp)) |
985 | goto out; | |
34d0f179 | 986 | |
e56da7e2 | 987 | disk = get_gendisk(dev, &part); |
4bfd482e | 988 | if (!disk || part) |
e56da7e2 | 989 | goto out; |
e56da7e2 TH |
990 | |
991 | rcu_read_lock(); | |
992 | ||
4bfd482e | 993 | spin_lock_irq(disk->queue->queue_lock); |
aaec55a0 | 994 | blkg = blkg_lookup_create(blkcg, disk->queue, false); |
4bfd482e | 995 | spin_unlock_irq(disk->queue->queue_lock); |
e56da7e2 | 996 | |
4bfd482e TH |
997 | if (IS_ERR(blkg)) { |
998 | ret = PTR_ERR(blkg); | |
999 | goto out_unlock; | |
d11bb446 | 1000 | } |
34d0f179 | 1001 | |
549d3aa8 TH |
1002 | pd = blkg->pd[plid]; |
1003 | ||
062a644d VG |
1004 | switch (plid) { |
1005 | case BLKIO_POLICY_PROP: | |
d11bb446 WG |
1006 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || |
1007 | temp > BLKIO_WEIGHT_MAX) | |
e56da7e2 | 1008 | goto out_unlock; |
34d0f179 | 1009 | |
549d3aa8 | 1010 | pd->conf.weight = temp; |
c1768268 | 1011 | blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight); |
4c9eefa1 VG |
1012 | break; |
1013 | case BLKIO_POLICY_THROTL: | |
7702e8f4 VG |
1014 | switch(fileid) { |
1015 | case BLKIO_THROTL_read_bps_device: | |
549d3aa8 | 1016 | pd->conf.bps[READ] = temp; |
c1768268 | 1017 | blkio_update_group_bps(blkg, plid, temp ?: -1, fileid); |
e56da7e2 | 1018 | break; |
7702e8f4 | 1019 | case BLKIO_THROTL_write_bps_device: |
549d3aa8 | 1020 | pd->conf.bps[WRITE] = temp; |
c1768268 | 1021 | blkio_update_group_bps(blkg, plid, temp ?: -1, fileid); |
7702e8f4 VG |
1022 | break; |
1023 | case BLKIO_THROTL_read_iops_device: | |
e56da7e2 TH |
1024 | if (temp > THROTL_IOPS_MAX) |
1025 | goto out_unlock; | |
549d3aa8 | 1026 | pd->conf.iops[READ] = temp; |
c1768268 | 1027 | blkio_update_group_iops(blkg, plid, temp ?: -1, fileid); |
e56da7e2 | 1028 | break; |
7702e8f4 | 1029 | case BLKIO_THROTL_write_iops_device: |
d11bb446 | 1030 | if (temp > THROTL_IOPS_MAX) |
e56da7e2 | 1031 | goto out_unlock; |
549d3aa8 | 1032 | pd->conf.iops[WRITE] = temp; |
c1768268 | 1033 | blkio_update_group_iops(blkg, plid, temp ?: -1, fileid); |
7702e8f4 VG |
1034 | break; |
1035 | } | |
062a644d VG |
1036 | break; |
1037 | default: | |
1038 | BUG(); | |
1039 | } | |
ece84241 | 1040 | ret = 0; |
e56da7e2 TH |
1041 | out_unlock: |
1042 | rcu_read_unlock(); | |
ece84241 TH |
1043 | out: |
1044 | put_disk(disk); | |
e56da7e2 TH |
1045 | |
1046 | /* | |
1047 | * If queue was bypassing, we should retry. Do so after a short | |
1048 | * msleep(). It isn't strictly necessary but queue can be | |
1049 | * bypassing for some time and it's always nice to avoid busy | |
1050 | * looping. | |
1051 | */ | |
1052 | if (ret == -EBUSY) { | |
1053 | msleep(10); | |
1054 | return restart_syscall(); | |
1055 | } | |
ece84241 | 1056 | return ret; |
34d0f179 GJ |
1057 | } |
1058 | ||
062a644d VG |
1059 | static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, |
1060 | const char *buffer) | |
34d0f179 GJ |
1061 | { |
1062 | int ret = 0; | |
1063 | char *buf; | |
e56da7e2 | 1064 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
062a644d VG |
1065 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); |
1066 | int fileid = BLKIOFILE_ATTR(cft->private); | |
34d0f179 GJ |
1067 | |
1068 | buf = kstrdup(buffer, GFP_KERNEL); | |
1069 | if (!buf) | |
1070 | return -ENOMEM; | |
1071 | ||
4bfd482e | 1072 | ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg); |
34d0f179 GJ |
1073 | kfree(buf); |
1074 | return ret; | |
1075 | } | |
1076 | ||
92616b5b VG |
1077 | static const char *blkg_dev_name(struct blkio_group *blkg) |
1078 | { | |
1079 | /* some drivers (floppy) instantiate a queue w/o disk registered */ | |
1080 | if (blkg->q->backing_dev_info.dev) | |
1081 | return dev_name(blkg->q->backing_dev_info.dev); | |
1082 | return NULL; | |
1083 | } | |
1084 | ||
4bfd482e TH |
1085 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, |
1086 | struct seq_file *m) | |
34d0f179 | 1087 | { |
c1768268 | 1088 | int plid = BLKIOFILE_POLICY(cft->private); |
4bfd482e | 1089 | int fileid = BLKIOFILE_ATTR(cft->private); |
c1768268 TH |
1090 | struct blkg_policy_data *pd = blkg->pd[plid]; |
1091 | const char *dname = blkg_dev_name(blkg); | |
4bfd482e TH |
1092 | int rw = WRITE; |
1093 | ||
92616b5b VG |
1094 | if (!dname) |
1095 | return; | |
1096 | ||
c1768268 | 1097 | switch (plid) { |
062a644d | 1098 | case BLKIO_POLICY_PROP: |
549d3aa8 | 1099 | if (pd->conf.weight) |
7a4dd281 | 1100 | seq_printf(m, "%s\t%u\n", |
549d3aa8 | 1101 | dname, pd->conf.weight); |
4c9eefa1 VG |
1102 | break; |
1103 | case BLKIO_POLICY_THROTL: | |
4bfd482e | 1104 | switch (fileid) { |
7702e8f4 | 1105 | case BLKIO_THROTL_read_bps_device: |
4bfd482e | 1106 | rw = READ; |
7702e8f4 | 1107 | case BLKIO_THROTL_write_bps_device: |
549d3aa8 | 1108 | if (pd->conf.bps[rw]) |
7a4dd281 | 1109 | seq_printf(m, "%s\t%llu\n", |
549d3aa8 | 1110 | dname, pd->conf.bps[rw]); |
7702e8f4 VG |
1111 | break; |
1112 | case BLKIO_THROTL_read_iops_device: | |
4bfd482e | 1113 | rw = READ; |
7702e8f4 | 1114 | case BLKIO_THROTL_write_iops_device: |
549d3aa8 | 1115 | if (pd->conf.iops[rw]) |
7a4dd281 | 1116 | seq_printf(m, "%s\t%u\n", |
549d3aa8 | 1117 | dname, pd->conf.iops[rw]); |
7702e8f4 VG |
1118 | break; |
1119 | } | |
062a644d VG |
1120 | break; |
1121 | default: | |
1122 | BUG(); | |
1123 | } | |
1124 | } | |
34d0f179 | 1125 | |
062a644d | 1126 | /* cgroup files which read their data from policy nodes end up here */ |
4bfd482e TH |
1127 | static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg, |
1128 | struct seq_file *m) | |
34d0f179 | 1129 | { |
4bfd482e TH |
1130 | struct blkio_group *blkg; |
1131 | struct hlist_node *n; | |
34d0f179 | 1132 | |
4bfd482e TH |
1133 | spin_lock_irq(&blkcg->lock); |
1134 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
e8989fae | 1135 | blkio_print_group_conf(cft, blkg, m); |
4bfd482e | 1136 | spin_unlock_irq(&blkcg->lock); |
062a644d VG |
1137 | } |
1138 | ||
1139 | static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft, | |
1140 | struct seq_file *m) | |
1141 | { | |
1142 | struct blkio_cgroup *blkcg; | |
1143 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1144 | int name = BLKIOFILE_ATTR(cft->private); | |
1145 | ||
1146 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1147 | ||
1148 | switch(plid) { | |
1149 | case BLKIO_POLICY_PROP: | |
1150 | switch(name) { | |
1151 | case BLKIO_PROP_weight_device: | |
4bfd482e | 1152 | blkio_read_conf(cft, blkcg, m); |
062a644d VG |
1153 | return 0; |
1154 | default: | |
1155 | BUG(); | |
1156 | } | |
1157 | break; | |
4c9eefa1 VG |
1158 | case BLKIO_POLICY_THROTL: |
1159 | switch(name){ | |
1160 | case BLKIO_THROTL_read_bps_device: | |
1161 | case BLKIO_THROTL_write_bps_device: | |
7702e8f4 VG |
1162 | case BLKIO_THROTL_read_iops_device: |
1163 | case BLKIO_THROTL_write_iops_device: | |
4bfd482e | 1164 | blkio_read_conf(cft, blkcg, m); |
4c9eefa1 VG |
1165 | return 0; |
1166 | default: | |
1167 | BUG(); | |
1168 | } | |
1169 | break; | |
062a644d VG |
1170 | default: |
1171 | BUG(); | |
1172 | } | |
1173 | ||
1174 | return 0; | |
1175 | } | |
1176 | ||
1177 | static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | |
5624a4e4 VG |
1178 | struct cftype *cft, struct cgroup_map_cb *cb, |
1179 | enum stat_type type, bool show_total, bool pcpu) | |
062a644d VG |
1180 | { |
1181 | struct blkio_group *blkg; | |
1182 | struct hlist_node *n; | |
1183 | uint64_t cgroup_total = 0; | |
1184 | ||
c875f4d0 TH |
1185 | spin_lock_irq(&blkcg->lock); |
1186 | ||
1187 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
92616b5b | 1188 | const char *dname = blkg_dev_name(blkg); |
c1768268 | 1189 | int plid = BLKIOFILE_POLICY(cft->private); |
7a4dd281 | 1190 | |
e8989fae | 1191 | if (!dname) |
7a4dd281 | 1192 | continue; |
edf1b879 | 1193 | if (pcpu) |
c1768268 TH |
1194 | cgroup_total += blkio_get_stat_cpu(blkg, plid, |
1195 | cb, dname, type); | |
edf1b879 | 1196 | else |
c1768268 TH |
1197 | cgroup_total += blkio_get_stat(blkg, plid, |
1198 | cb, dname, type); | |
062a644d VG |
1199 | } |
1200 | if (show_total) | |
1201 | cb->fill(cb, "Total", cgroup_total); | |
c875f4d0 TH |
1202 | |
1203 | spin_unlock_irq(&blkcg->lock); | |
062a644d VG |
1204 | return 0; |
1205 | } | |
1206 | ||
1207 | /* All map kind of cgroup file get serviced by this function */ | |
1208 | static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft, | |
1209 | struct cgroup_map_cb *cb) | |
1210 | { | |
1211 | struct blkio_cgroup *blkcg; | |
1212 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1213 | int name = BLKIOFILE_ATTR(cft->private); | |
1214 | ||
1215 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1216 | ||
1217 | switch(plid) { | |
1218 | case BLKIO_POLICY_PROP: | |
1219 | switch(name) { | |
1220 | case BLKIO_PROP_time: | |
1221 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1222 | BLKIO_STAT_TIME, 0, 0); |
062a644d VG |
1223 | case BLKIO_PROP_sectors: |
1224 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1225 | BLKIO_STAT_CPU_SECTORS, 0, 1); |
062a644d VG |
1226 | case BLKIO_PROP_io_service_bytes: |
1227 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1228 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
062a644d VG |
1229 | case BLKIO_PROP_io_serviced: |
1230 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1231 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
062a644d VG |
1232 | case BLKIO_PROP_io_service_time: |
1233 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1234 | BLKIO_STAT_SERVICE_TIME, 1, 0); |
062a644d VG |
1235 | case BLKIO_PROP_io_wait_time: |
1236 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1237 | BLKIO_STAT_WAIT_TIME, 1, 0); |
062a644d VG |
1238 | case BLKIO_PROP_io_merged: |
1239 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5fe224d2 | 1240 | BLKIO_STAT_MERGED, 1, 0); |
062a644d VG |
1241 | case BLKIO_PROP_io_queued: |
1242 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1243 | BLKIO_STAT_QUEUED, 1, 0); |
062a644d | 1244 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
9026e521 JT |
1245 | case BLKIO_PROP_unaccounted_time: |
1246 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1247 | BLKIO_STAT_UNACCOUNTED_TIME, 0, 0); |
062a644d VG |
1248 | case BLKIO_PROP_dequeue: |
1249 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1250 | BLKIO_STAT_DEQUEUE, 0, 0); |
062a644d VG |
1251 | case BLKIO_PROP_avg_queue_size: |
1252 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1253 | BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0); |
062a644d VG |
1254 | case BLKIO_PROP_group_wait_time: |
1255 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1256 | BLKIO_STAT_GROUP_WAIT_TIME, 0, 0); |
062a644d VG |
1257 | case BLKIO_PROP_idle_time: |
1258 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1259 | BLKIO_STAT_IDLE_TIME, 0, 0); |
062a644d VG |
1260 | case BLKIO_PROP_empty_time: |
1261 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1262 | BLKIO_STAT_EMPTY_TIME, 0, 0); |
062a644d VG |
1263 | #endif |
1264 | default: | |
1265 | BUG(); | |
1266 | } | |
1267 | break; | |
4c9eefa1 VG |
1268 | case BLKIO_POLICY_THROTL: |
1269 | switch(name){ | |
1270 | case BLKIO_THROTL_io_service_bytes: | |
1271 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1272 | BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1); |
4c9eefa1 VG |
1273 | case BLKIO_THROTL_io_serviced: |
1274 | return blkio_read_blkg_stats(blkcg, cft, cb, | |
5624a4e4 | 1275 | BLKIO_STAT_CPU_SERVICED, 1, 1); |
4c9eefa1 VG |
1276 | default: |
1277 | BUG(); | |
1278 | } | |
1279 | break; | |
062a644d VG |
1280 | default: |
1281 | BUG(); | |
1282 | } | |
1283 | ||
1284 | return 0; | |
1285 | } | |
1286 | ||
4bfd482e | 1287 | static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val) |
062a644d VG |
1288 | { |
1289 | struct blkio_group *blkg; | |
1290 | struct hlist_node *n; | |
062a644d VG |
1291 | |
1292 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
1293 | return -EINVAL; | |
1294 | ||
1295 | spin_lock(&blkio_list_lock); | |
1296 | spin_lock_irq(&blkcg->lock); | |
1297 | blkcg->weight = (unsigned int)val; | |
1298 | ||
549d3aa8 | 1299 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
e8989fae | 1300 | struct blkg_policy_data *pd = blkg->pd[plid]; |
549d3aa8 | 1301 | |
e8989fae | 1302 | if (!pd->conf.weight) |
c1768268 | 1303 | blkio_update_group_weight(blkg, plid, blkcg->weight); |
549d3aa8 | 1304 | } |
062a644d | 1305 | |
062a644d VG |
1306 | spin_unlock_irq(&blkcg->lock); |
1307 | spin_unlock(&blkio_list_lock); | |
1308 | return 0; | |
1309 | } | |
1310 | ||
1311 | static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) { | |
1312 | struct blkio_cgroup *blkcg; | |
1313 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1314 | int name = BLKIOFILE_ATTR(cft->private); | |
1315 | ||
1316 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1317 | ||
1318 | switch(plid) { | |
1319 | case BLKIO_POLICY_PROP: | |
1320 | switch(name) { | |
1321 | case BLKIO_PROP_weight: | |
1322 | return (u64)blkcg->weight; | |
1323 | } | |
1324 | break; | |
1325 | default: | |
1326 | BUG(); | |
1327 | } | |
1328 | return 0; | |
1329 | } | |
1330 | ||
1331 | static int | |
1332 | blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |
1333 | { | |
1334 | struct blkio_cgroup *blkcg; | |
1335 | enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private); | |
1336 | int name = BLKIOFILE_ATTR(cft->private); | |
1337 | ||
1338 | blkcg = cgroup_to_blkio_cgroup(cgrp); | |
1339 | ||
1340 | switch(plid) { | |
1341 | case BLKIO_POLICY_PROP: | |
1342 | switch(name) { | |
1343 | case BLKIO_PROP_weight: | |
4bfd482e | 1344 | return blkio_weight_write(blkcg, plid, val); |
062a644d VG |
1345 | } |
1346 | break; | |
1347 | default: | |
1348 | BUG(); | |
1349 | } | |
34d0f179 | 1350 | |
34d0f179 GJ |
1351 | return 0; |
1352 | } | |
1353 | ||
31e4c28d | 1354 | struct cftype blkio_files[] = { |
34d0f179 GJ |
1355 | { |
1356 | .name = "weight_device", | |
062a644d VG |
1357 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1358 | BLKIO_PROP_weight_device), | |
1359 | .read_seq_string = blkiocg_file_read, | |
1360 | .write_string = blkiocg_file_write, | |
34d0f179 GJ |
1361 | .max_write_len = 256, |
1362 | }, | |
31e4c28d VG |
1363 | { |
1364 | .name = "weight", | |
062a644d VG |
1365 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1366 | BLKIO_PROP_weight), | |
1367 | .read_u64 = blkiocg_file_read_u64, | |
1368 | .write_u64 = blkiocg_file_write_u64, | |
31e4c28d | 1369 | }, |
22084190 VG |
1370 | { |
1371 | .name = "time", | |
062a644d VG |
1372 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1373 | BLKIO_PROP_time), | |
1374 | .read_map = blkiocg_file_read_map, | |
22084190 VG |
1375 | }, |
1376 | { | |
1377 | .name = "sectors", | |
062a644d VG |
1378 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1379 | BLKIO_PROP_sectors), | |
1380 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1381 | }, |
1382 | { | |
1383 | .name = "io_service_bytes", | |
062a644d VG |
1384 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1385 | BLKIO_PROP_io_service_bytes), | |
1386 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1387 | }, |
1388 | { | |
1389 | .name = "io_serviced", | |
062a644d VG |
1390 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1391 | BLKIO_PROP_io_serviced), | |
1392 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1393 | }, |
1394 | { | |
1395 | .name = "io_service_time", | |
062a644d VG |
1396 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1397 | BLKIO_PROP_io_service_time), | |
1398 | .read_map = blkiocg_file_read_map, | |
303a3acb DS |
1399 | }, |
1400 | { | |
1401 | .name = "io_wait_time", | |
062a644d VG |
1402 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1403 | BLKIO_PROP_io_wait_time), | |
1404 | .read_map = blkiocg_file_read_map, | |
84c124da | 1405 | }, |
812d4026 DS |
1406 | { |
1407 | .name = "io_merged", | |
062a644d VG |
1408 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1409 | BLKIO_PROP_io_merged), | |
1410 | .read_map = blkiocg_file_read_map, | |
812d4026 | 1411 | }, |
cdc1184c DS |
1412 | { |
1413 | .name = "io_queued", | |
062a644d VG |
1414 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1415 | BLKIO_PROP_io_queued), | |
1416 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1417 | }, |
84c124da DS |
1418 | { |
1419 | .name = "reset_stats", | |
1420 | .write_u64 = blkiocg_reset_stats, | |
22084190 | 1421 | }, |
13f98250 VG |
1422 | #ifdef CONFIG_BLK_DEV_THROTTLING |
1423 | { | |
1424 | .name = "throttle.read_bps_device", | |
1425 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1426 | BLKIO_THROTL_read_bps_device), | |
1427 | .read_seq_string = blkiocg_file_read, | |
1428 | .write_string = blkiocg_file_write, | |
1429 | .max_write_len = 256, | |
1430 | }, | |
1431 | ||
1432 | { | |
1433 | .name = "throttle.write_bps_device", | |
1434 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1435 | BLKIO_THROTL_write_bps_device), | |
1436 | .read_seq_string = blkiocg_file_read, | |
1437 | .write_string = blkiocg_file_write, | |
1438 | .max_write_len = 256, | |
1439 | }, | |
1440 | ||
1441 | { | |
1442 | .name = "throttle.read_iops_device", | |
1443 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1444 | BLKIO_THROTL_read_iops_device), | |
1445 | .read_seq_string = blkiocg_file_read, | |
1446 | .write_string = blkiocg_file_write, | |
1447 | .max_write_len = 256, | |
1448 | }, | |
1449 | ||
1450 | { | |
1451 | .name = "throttle.write_iops_device", | |
1452 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1453 | BLKIO_THROTL_write_iops_device), | |
1454 | .read_seq_string = blkiocg_file_read, | |
1455 | .write_string = blkiocg_file_write, | |
1456 | .max_write_len = 256, | |
1457 | }, | |
1458 | { | |
1459 | .name = "throttle.io_service_bytes", | |
1460 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1461 | BLKIO_THROTL_io_service_bytes), | |
1462 | .read_map = blkiocg_file_read_map, | |
1463 | }, | |
1464 | { | |
1465 | .name = "throttle.io_serviced", | |
1466 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL, | |
1467 | BLKIO_THROTL_io_serviced), | |
1468 | .read_map = blkiocg_file_read_map, | |
1469 | }, | |
1470 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | |
1471 | ||
22084190 | 1472 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
cdc1184c DS |
1473 | { |
1474 | .name = "avg_queue_size", | |
062a644d VG |
1475 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1476 | BLKIO_PROP_avg_queue_size), | |
1477 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1478 | }, |
812df48d DS |
1479 | { |
1480 | .name = "group_wait_time", | |
062a644d VG |
1481 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1482 | BLKIO_PROP_group_wait_time), | |
1483 | .read_map = blkiocg_file_read_map, | |
812df48d DS |
1484 | }, |
1485 | { | |
1486 | .name = "idle_time", | |
062a644d VG |
1487 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1488 | BLKIO_PROP_idle_time), | |
1489 | .read_map = blkiocg_file_read_map, | |
812df48d DS |
1490 | }, |
1491 | { | |
1492 | .name = "empty_time", | |
062a644d VG |
1493 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1494 | BLKIO_PROP_empty_time), | |
1495 | .read_map = blkiocg_file_read_map, | |
812df48d | 1496 | }, |
cdc1184c | 1497 | { |
22084190 | 1498 | .name = "dequeue", |
062a644d VG |
1499 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, |
1500 | BLKIO_PROP_dequeue), | |
1501 | .read_map = blkiocg_file_read_map, | |
cdc1184c | 1502 | }, |
9026e521 JT |
1503 | { |
1504 | .name = "unaccounted_time", | |
1505 | .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP, | |
1506 | BLKIO_PROP_unaccounted_time), | |
1507 | .read_map = blkiocg_file_read_map, | |
1508 | }, | |
22084190 | 1509 | #endif |
4baf6e33 | 1510 | { } /* terminate */ |
31e4c28d VG |
1511 | }; |
1512 | ||
9f13ef67 TH |
1513 | /** |
1514 | * blkiocg_pre_destroy - cgroup pre_destroy callback | |
9f13ef67 TH |
1515 | * @cgroup: cgroup of interest |
1516 | * | |
1517 | * This function is called when @cgroup is about to go away and responsible | |
1518 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
1519 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
1520 | * inside q lock, this function performs reverse double lock dancing. | |
1521 | * | |
1522 | * This is the blkcg counterpart of ioc_release_fn(). | |
1523 | */ | |
959d851c | 1524 | static int blkiocg_pre_destroy(struct cgroup *cgroup) |
31e4c28d VG |
1525 | { |
1526 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
b1c35769 | 1527 | |
9f13ef67 | 1528 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 1529 | |
9f13ef67 TH |
1530 | while (!hlist_empty(&blkcg->blkg_list)) { |
1531 | struct blkio_group *blkg = hlist_entry(blkcg->blkg_list.first, | |
1532 | struct blkio_group, blkcg_node); | |
c875f4d0 | 1533 | struct request_queue *q = blkg->q; |
b1c35769 | 1534 | |
9f13ef67 TH |
1535 | if (spin_trylock(q->queue_lock)) { |
1536 | blkg_destroy(blkg); | |
1537 | spin_unlock(q->queue_lock); | |
1538 | } else { | |
1539 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 1540 | cpu_relax(); |
a5567932 | 1541 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 1542 | } |
9f13ef67 | 1543 | } |
b1c35769 | 1544 | |
9f13ef67 | 1545 | spin_unlock_irq(&blkcg->lock); |
7ee9c562 TH |
1546 | return 0; |
1547 | } | |
1548 | ||
959d851c | 1549 | static void blkiocg_destroy(struct cgroup *cgroup) |
7ee9c562 TH |
1550 | { |
1551 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
1552 | ||
67523c48 BB |
1553 | if (blkcg != &blkio_root_cgroup) |
1554 | kfree(blkcg); | |
31e4c28d VG |
1555 | } |
1556 | ||
761b3ef5 | 1557 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) |
31e4c28d | 1558 | { |
9a9e8a26 | 1559 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
0341509f LZ |
1560 | struct blkio_cgroup *blkcg; |
1561 | struct cgroup *parent = cgroup->parent; | |
31e4c28d | 1562 | |
0341509f | 1563 | if (!parent) { |
31e4c28d VG |
1564 | blkcg = &blkio_root_cgroup; |
1565 | goto done; | |
1566 | } | |
1567 | ||
31e4c28d VG |
1568 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
1569 | if (!blkcg) | |
1570 | return ERR_PTR(-ENOMEM); | |
1571 | ||
1572 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
9a9e8a26 | 1573 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
31e4c28d VG |
1574 | done: |
1575 | spin_lock_init(&blkcg->lock); | |
1576 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
1577 | ||
1578 | return &blkcg->css; | |
1579 | } | |
1580 | ||
5efd6113 TH |
1581 | /** |
1582 | * blkcg_init_queue - initialize blkcg part of request queue | |
1583 | * @q: request_queue to initialize | |
1584 | * | |
1585 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
1586 | * part of new request_queue @q. | |
1587 | * | |
1588 | * RETURNS: | |
1589 | * 0 on success, -errno on failure. | |
1590 | */ | |
1591 | int blkcg_init_queue(struct request_queue *q) | |
1592 | { | |
923adde1 TH |
1593 | int ret; |
1594 | ||
5efd6113 TH |
1595 | might_sleep(); |
1596 | ||
923adde1 TH |
1597 | ret = blk_throtl_init(q); |
1598 | if (ret) | |
1599 | return ret; | |
1600 | ||
1601 | mutex_lock(&all_q_mutex); | |
1602 | INIT_LIST_HEAD(&q->all_q_node); | |
1603 | list_add_tail(&q->all_q_node, &all_q_list); | |
1604 | mutex_unlock(&all_q_mutex); | |
1605 | ||
1606 | return 0; | |
5efd6113 TH |
1607 | } |
1608 | ||
1609 | /** | |
1610 | * blkcg_drain_queue - drain blkcg part of request_queue | |
1611 | * @q: request_queue to drain | |
1612 | * | |
1613 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
1614 | */ | |
1615 | void blkcg_drain_queue(struct request_queue *q) | |
1616 | { | |
1617 | lockdep_assert_held(q->queue_lock); | |
1618 | ||
1619 | blk_throtl_drain(q); | |
1620 | } | |
1621 | ||
1622 | /** | |
1623 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
1624 | * @q: request_queue being released | |
1625 | * | |
1626 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
1627 | */ | |
1628 | void blkcg_exit_queue(struct request_queue *q) | |
1629 | { | |
923adde1 TH |
1630 | mutex_lock(&all_q_mutex); |
1631 | list_del_init(&q->all_q_node); | |
1632 | mutex_unlock(&all_q_mutex); | |
1633 | ||
e8989fae TH |
1634 | blkg_destroy_all(q, true); |
1635 | ||
5efd6113 TH |
1636 | blk_throtl_exit(q); |
1637 | } | |
1638 | ||
31e4c28d VG |
1639 | /* |
1640 | * We cannot support shared io contexts, as we have no mean to support | |
1641 | * two tasks with the same ioc in two different groups without major rework | |
1642 | * of the main cic data structures. For now we allow a task to change | |
1643 | * its cgroup only if it's the only owner of its ioc. | |
1644 | */ | |
761b3ef5 | 1645 | static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
31e4c28d | 1646 | { |
bb9d97b6 | 1647 | struct task_struct *task; |
31e4c28d VG |
1648 | struct io_context *ioc; |
1649 | int ret = 0; | |
1650 | ||
1651 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
1652 | cgroup_taskset_for_each(task, cgrp, tset) { |
1653 | task_lock(task); | |
1654 | ioc = task->io_context; | |
1655 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1656 | ret = -EINVAL; | |
1657 | task_unlock(task); | |
1658 | if (ret) | |
1659 | break; | |
1660 | } | |
31e4c28d VG |
1661 | return ret; |
1662 | } | |
1663 | ||
923adde1 TH |
1664 | static void blkcg_bypass_start(void) |
1665 | __acquires(&all_q_mutex) | |
1666 | { | |
1667 | struct request_queue *q; | |
1668 | ||
1669 | mutex_lock(&all_q_mutex); | |
1670 | ||
1671 | list_for_each_entry(q, &all_q_list, all_q_node) { | |
1672 | blk_queue_bypass_start(q); | |
e8989fae | 1673 | blkg_destroy_all(q, false); |
923adde1 TH |
1674 | } |
1675 | } | |
1676 | ||
1677 | static void blkcg_bypass_end(void) | |
1678 | __releases(&all_q_mutex) | |
1679 | { | |
1680 | struct request_queue *q; | |
1681 | ||
1682 | list_for_each_entry(q, &all_q_list, all_q_node) | |
1683 | blk_queue_bypass_end(q); | |
1684 | ||
1685 | mutex_unlock(&all_q_mutex); | |
1686 | } | |
1687 | ||
676f7c8f TH |
1688 | struct cgroup_subsys blkio_subsys = { |
1689 | .name = "blkio", | |
1690 | .create = blkiocg_create, | |
1691 | .can_attach = blkiocg_can_attach, | |
959d851c | 1692 | .pre_destroy = blkiocg_pre_destroy, |
676f7c8f | 1693 | .destroy = blkiocg_destroy, |
676f7c8f | 1694 | .subsys_id = blkio_subsys_id, |
4baf6e33 | 1695 | .base_cftypes = blkio_files, |
676f7c8f TH |
1696 | .module = THIS_MODULE, |
1697 | }; | |
1698 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
1699 | ||
3e252066 VG |
1700 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
1701 | { | |
e8989fae TH |
1702 | struct request_queue *q; |
1703 | ||
923adde1 | 1704 | blkcg_bypass_start(); |
3e252066 | 1705 | spin_lock(&blkio_list_lock); |
035d10b2 TH |
1706 | |
1707 | BUG_ON(blkio_policy[blkiop->plid]); | |
1708 | blkio_policy[blkiop->plid] = blkiop; | |
3e252066 | 1709 | list_add_tail(&blkiop->list, &blkio_list); |
035d10b2 | 1710 | |
3e252066 | 1711 | spin_unlock(&blkio_list_lock); |
e8989fae TH |
1712 | list_for_each_entry(q, &all_q_list, all_q_node) |
1713 | update_root_blkg_pd(q, blkiop->plid); | |
923adde1 | 1714 | blkcg_bypass_end(); |
3e252066 VG |
1715 | } |
1716 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
1717 | ||
1718 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
1719 | { | |
e8989fae TH |
1720 | struct request_queue *q; |
1721 | ||
923adde1 | 1722 | blkcg_bypass_start(); |
3e252066 | 1723 | spin_lock(&blkio_list_lock); |
035d10b2 TH |
1724 | |
1725 | BUG_ON(blkio_policy[blkiop->plid] != blkiop); | |
1726 | blkio_policy[blkiop->plid] = NULL; | |
3e252066 | 1727 | list_del_init(&blkiop->list); |
035d10b2 | 1728 | |
3e252066 | 1729 | spin_unlock(&blkio_list_lock); |
e8989fae TH |
1730 | list_for_each_entry(q, &all_q_list, all_q_node) |
1731 | update_root_blkg_pd(q, blkiop->plid); | |
923adde1 | 1732 | blkcg_bypass_end(); |
3e252066 VG |
1733 | } |
1734 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); |