2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
23 #define MAX_KEY_LEN 100
25 static DEFINE_SPINLOCK(blkio_list_lock
);
26 static LIST_HEAD(blkio_list
);
28 struct blkio_cgroup blkio_root_cgroup
= { .weight
= 2*BLKIO_WEIGHT_DEFAULT
};
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup
);
31 static struct cgroup_subsys_state
*blkiocg_create(struct cgroup_subsys
*,
33 static int blkiocg_can_attach(struct cgroup_subsys
*, struct cgroup
*,
34 struct task_struct
*, bool);
35 static void blkiocg_attach(struct cgroup_subsys
*, struct cgroup
*,
36 struct cgroup
*, struct task_struct
*, bool);
37 static void blkiocg_destroy(struct cgroup_subsys
*, struct cgroup
*);
38 static int blkiocg_populate(struct cgroup_subsys
*, struct cgroup
*);
40 /* for encoding cft->private value on file */
41 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42 /* What policy owns the file, proportional or throttle */
43 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
46 struct cgroup_subsys blkio_subsys
= {
48 .create
= blkiocg_create
,
49 .can_attach
= blkiocg_can_attach
,
50 .attach
= blkiocg_attach
,
51 .destroy
= blkiocg_destroy
,
52 .populate
= blkiocg_populate
,
53 #ifdef CONFIG_BLK_CGROUP
54 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
55 .subsys_id
= blkio_subsys_id
,
58 .module
= THIS_MODULE
,
60 EXPORT_SYMBOL_GPL(blkio_subsys
);
62 static inline void blkio_policy_insert_node(struct blkio_cgroup
*blkcg
,
63 struct blkio_policy_node
*pn
)
65 list_add(&pn
->node
, &blkcg
->policy_list
);
68 static inline bool cftype_blkg_same_policy(struct cftype
*cft
,
69 struct blkio_group
*blkg
)
71 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
73 if (blkg
->plid
== plid
)
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype
*cft
,
81 struct blkio_policy_node
*pn
)
83 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
84 int fileid
= BLKIOFILE_ATTR(cft
->private);
86 return (plid
== pn
->plid
&& fileid
== pn
->fileid
);
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node
*pn
)
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node
*
97 blkio_policy_search_node(const struct blkio_cgroup
*blkcg
, dev_t dev
,
98 enum blkio_policy_id plid
, int fileid
)
100 struct blkio_policy_node
*pn
;
102 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
103 if (pn
->dev
== dev
&& pn
->plid
== plid
&& pn
->fileid
== fileid
)
110 struct blkio_cgroup
*cgroup_to_blkio_cgroup(struct cgroup
*cgroup
)
112 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
113 struct blkio_cgroup
, css
);
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup
);
118 blkio_update_group_weight(struct blkio_group
*blkg
, unsigned int weight
)
120 struct blkio_policy_type
*blkiop
;
122 list_for_each_entry(blkiop
, &blkio_list
, list
) {
123 /* If this policy does not own the blkg, do not send updates */
124 if (blkiop
->plid
!= blkg
->plid
)
126 if (blkiop
->ops
.blkio_update_group_weight_fn
)
127 blkiop
->ops
.blkio_update_group_weight_fn(blkg
, weight
);
131 static inline void blkio_update_group_bps(struct blkio_group
*blkg
, u64 bps
,
134 struct blkio_policy_type
*blkiop
;
136 list_for_each_entry(blkiop
, &blkio_list
, list
) {
138 /* If this policy does not own the blkg, do not send updates */
139 if (blkiop
->plid
!= blkg
->plid
)
142 if (fileid
== BLKIO_THROTL_read_bps_device
143 && blkiop
->ops
.blkio_update_group_read_bps_fn
)
144 blkiop
->ops
.blkio_update_group_read_bps_fn(blkg
, bps
);
146 if (fileid
== BLKIO_THROTL_write_bps_device
147 && blkiop
->ops
.blkio_update_group_write_bps_fn
)
148 blkiop
->ops
.blkio_update_group_write_bps_fn(blkg
, bps
);
152 static inline void blkio_update_group_iops(struct blkio_group
*blkg
,
153 unsigned int iops
, int fileid
)
155 struct blkio_policy_type
*blkiop
;
157 list_for_each_entry(blkiop
, &blkio_list
, list
) {
159 /* If this policy does not own the blkg, do not send updates */
160 if (blkiop
->plid
!= blkg
->plid
)
163 if (fileid
== BLKIO_THROTL_read_iops_device
164 && blkiop
->ops
.blkio_update_group_read_iops_fn
)
165 blkiop
->ops
.blkio_update_group_read_iops_fn(blkg
, iops
);
167 if (fileid
== BLKIO_THROTL_write_iops_device
168 && blkiop
->ops
.blkio_update_group_write_iops_fn
)
169 blkiop
->ops
.blkio_update_group_write_iops_fn(blkg
,iops
);
174 * Add to the appropriate stat variable depending on the request type.
175 * This should be called with the blkg->stats_lock held.
177 static void blkio_add_stat(uint64_t *stat
, uint64_t add
, bool direction
,
181 stat
[BLKIO_STAT_WRITE
] += add
;
183 stat
[BLKIO_STAT_READ
] += add
;
185 stat
[BLKIO_STAT_SYNC
] += add
;
187 stat
[BLKIO_STAT_ASYNC
] += add
;
191 * Decrements the appropriate stat variable if non-zero depending on the
192 * request type. Panics on value being zero.
193 * This should be called with the blkg->stats_lock held.
195 static void blkio_check_and_dec_stat(uint64_t *stat
, bool direction
, bool sync
)
198 BUG_ON(stat
[BLKIO_STAT_WRITE
] == 0);
199 stat
[BLKIO_STAT_WRITE
]--;
201 BUG_ON(stat
[BLKIO_STAT_READ
] == 0);
202 stat
[BLKIO_STAT_READ
]--;
205 BUG_ON(stat
[BLKIO_STAT_SYNC
] == 0);
206 stat
[BLKIO_STAT_SYNC
]--;
208 BUG_ON(stat
[BLKIO_STAT_ASYNC
] == 0);
209 stat
[BLKIO_STAT_ASYNC
]--;
213 #ifdef CONFIG_DEBUG_BLK_CGROUP
214 /* This should be called with the blkg->stats_lock held. */
215 static void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
216 struct blkio_group
*curr_blkg
)
218 if (blkio_blkg_waiting(&blkg
->stats
))
220 if (blkg
== curr_blkg
)
222 blkg
->stats
.start_group_wait_time
= sched_clock();
223 blkio_mark_blkg_waiting(&blkg
->stats
);
226 /* This should be called with the blkg->stats_lock held. */
227 static void blkio_update_group_wait_time(struct blkio_group_stats
*stats
)
229 unsigned long long now
;
231 if (!blkio_blkg_waiting(stats
))
235 if (time_after64(now
, stats
->start_group_wait_time
))
236 stats
->group_wait_time
+= now
- stats
->start_group_wait_time
;
237 blkio_clear_blkg_waiting(stats
);
240 /* This should be called with the blkg->stats_lock held. */
241 static void blkio_end_empty_time(struct blkio_group_stats
*stats
)
243 unsigned long long now
;
245 if (!blkio_blkg_empty(stats
))
249 if (time_after64(now
, stats
->start_empty_time
))
250 stats
->empty_time
+= now
- stats
->start_empty_time
;
251 blkio_clear_blkg_empty(stats
);
254 void blkiocg_update_set_idle_time_stats(struct blkio_group
*blkg
)
258 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
259 BUG_ON(blkio_blkg_idling(&blkg
->stats
));
260 blkg
->stats
.start_idle_time
= sched_clock();
261 blkio_mark_blkg_idling(&blkg
->stats
);
262 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
264 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats
);
266 void blkiocg_update_idle_time_stats(struct blkio_group
*blkg
)
269 unsigned long long now
;
270 struct blkio_group_stats
*stats
;
272 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
273 stats
= &blkg
->stats
;
274 if (blkio_blkg_idling(stats
)) {
276 if (time_after64(now
, stats
->start_idle_time
))
277 stats
->idle_time
+= now
- stats
->start_idle_time
;
278 blkio_clear_blkg_idling(stats
);
280 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
282 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats
);
284 void blkiocg_update_avg_queue_size_stats(struct blkio_group
*blkg
)
287 struct blkio_group_stats
*stats
;
289 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
290 stats
= &blkg
->stats
;
291 stats
->avg_queue_size_sum
+=
292 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] +
293 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
];
294 stats
->avg_queue_size_samples
++;
295 blkio_update_group_wait_time(stats
);
296 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
298 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats
);
300 void blkiocg_set_start_empty_time(struct blkio_group
*blkg
)
303 struct blkio_group_stats
*stats
;
305 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
306 stats
= &blkg
->stats
;
308 if (stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_READ
] ||
309 stats
->stat_arr
[BLKIO_STAT_QUEUED
][BLKIO_STAT_WRITE
]) {
310 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
315 * group is already marked empty. This can happen if cfqq got new
316 * request in parent group and moved to this group while being added
317 * to service tree. Just ignore the event and move on.
319 if(blkio_blkg_empty(stats
)) {
320 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
324 stats
->start_empty_time
= sched_clock();
325 blkio_mark_blkg_empty(stats
);
326 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
328 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time
);
330 void blkiocg_update_dequeue_stats(struct blkio_group
*blkg
,
331 unsigned long dequeue
)
333 blkg
->stats
.dequeue
+= dequeue
;
335 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats
);
337 static inline void blkio_set_start_group_wait_time(struct blkio_group
*blkg
,
338 struct blkio_group
*curr_blkg
) {}
339 static inline void blkio_end_empty_time(struct blkio_group_stats
*stats
) {}
342 void blkiocg_update_io_add_stats(struct blkio_group
*blkg
,
343 struct blkio_group
*curr_blkg
, bool direction
,
348 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
349 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
], 1, direction
,
351 blkio_end_empty_time(&blkg
->stats
);
352 blkio_set_start_group_wait_time(blkg
, curr_blkg
);
353 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
355 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats
);
357 void blkiocg_update_io_remove_stats(struct blkio_group
*blkg
,
358 bool direction
, bool sync
)
362 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
363 blkio_check_and_dec_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_QUEUED
],
365 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
367 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats
);
369 void blkiocg_update_timeslice_used(struct blkio_group
*blkg
, unsigned long time
)
373 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
374 blkg
->stats
.time
+= time
;
375 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
377 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used
);
379 void blkiocg_update_dispatch_stats(struct blkio_group
*blkg
,
380 uint64_t bytes
, bool direction
, bool sync
)
382 struct blkio_group_stats
*stats
;
385 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
386 stats
= &blkg
->stats
;
387 stats
->sectors
+= bytes
>> 9;
388 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICED
], 1, direction
,
390 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_BYTES
], bytes
,
392 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
394 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats
);
396 void blkiocg_update_completion_stats(struct blkio_group
*blkg
,
397 uint64_t start_time
, uint64_t io_start_time
, bool direction
, bool sync
)
399 struct blkio_group_stats
*stats
;
401 unsigned long long now
= sched_clock();
403 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
404 stats
= &blkg
->stats
;
405 if (time_after64(now
, io_start_time
))
406 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_SERVICE_TIME
],
407 now
- io_start_time
, direction
, sync
);
408 if (time_after64(io_start_time
, start_time
))
409 blkio_add_stat(stats
->stat_arr
[BLKIO_STAT_WAIT_TIME
],
410 io_start_time
- start_time
, direction
, sync
);
411 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
413 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats
);
415 void blkiocg_update_io_merged_stats(struct blkio_group
*blkg
, bool direction
,
420 spin_lock_irqsave(&blkg
->stats_lock
, flags
);
421 blkio_add_stat(blkg
->stats
.stat_arr
[BLKIO_STAT_MERGED
], 1, direction
,
423 spin_unlock_irqrestore(&blkg
->stats_lock
, flags
);
425 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats
);
427 void blkiocg_add_blkio_group(struct blkio_cgroup
*blkcg
,
428 struct blkio_group
*blkg
, void *key
, dev_t dev
,
429 enum blkio_policy_id plid
)
433 spin_lock_irqsave(&blkcg
->lock
, flags
);
434 spin_lock_init(&blkg
->stats_lock
);
435 rcu_assign_pointer(blkg
->key
, key
);
436 blkg
->blkcg_id
= css_id(&blkcg
->css
);
437 hlist_add_head_rcu(&blkg
->blkcg_node
, &blkcg
->blkg_list
);
439 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
440 /* Need to take css reference ? */
441 cgroup_path(blkcg
->css
.cgroup
, blkg
->path
, sizeof(blkg
->path
));
444 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group
);
446 static void __blkiocg_del_blkio_group(struct blkio_group
*blkg
)
448 hlist_del_init_rcu(&blkg
->blkcg_node
);
453 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
454 * indicating that blk_group was unhashed by the time we got to it.
456 int blkiocg_del_blkio_group(struct blkio_group
*blkg
)
458 struct blkio_cgroup
*blkcg
;
460 struct cgroup_subsys_state
*css
;
464 css
= css_lookup(&blkio_subsys
, blkg
->blkcg_id
);
466 blkcg
= container_of(css
, struct blkio_cgroup
, css
);
467 spin_lock_irqsave(&blkcg
->lock
, flags
);
468 if (!hlist_unhashed(&blkg
->blkcg_node
)) {
469 __blkiocg_del_blkio_group(blkg
);
472 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
478 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group
);
480 /* called under rcu_read_lock(). */
481 struct blkio_group
*blkiocg_lookup_group(struct blkio_cgroup
*blkcg
, void *key
)
483 struct blkio_group
*blkg
;
484 struct hlist_node
*n
;
487 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
495 EXPORT_SYMBOL_GPL(blkiocg_lookup_group
);
498 blkiocg_reset_stats(struct cgroup
*cgroup
, struct cftype
*cftype
, u64 val
)
500 struct blkio_cgroup
*blkcg
;
501 struct blkio_group
*blkg
;
502 struct blkio_group_stats
*stats
;
503 struct hlist_node
*n
;
504 uint64_t queued
[BLKIO_STAT_TOTAL
];
506 #ifdef CONFIG_DEBUG_BLK_CGROUP
507 bool idling
, waiting
, empty
;
508 unsigned long long now
= sched_clock();
511 blkcg
= cgroup_to_blkio_cgroup(cgroup
);
512 spin_lock_irq(&blkcg
->lock
);
513 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
514 spin_lock(&blkg
->stats_lock
);
515 stats
= &blkg
->stats
;
516 #ifdef CONFIG_DEBUG_BLK_CGROUP
517 idling
= blkio_blkg_idling(stats
);
518 waiting
= blkio_blkg_waiting(stats
);
519 empty
= blkio_blkg_empty(stats
);
521 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
522 queued
[i
] = stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
];
523 memset(stats
, 0, sizeof(struct blkio_group_stats
));
524 for (i
= 0; i
< BLKIO_STAT_TOTAL
; i
++)
525 stats
->stat_arr
[BLKIO_STAT_QUEUED
][i
] = queued
[i
];
526 #ifdef CONFIG_DEBUG_BLK_CGROUP
528 blkio_mark_blkg_idling(stats
);
529 stats
->start_idle_time
= now
;
532 blkio_mark_blkg_waiting(stats
);
533 stats
->start_group_wait_time
= now
;
536 blkio_mark_blkg_empty(stats
);
537 stats
->start_empty_time
= now
;
540 spin_unlock(&blkg
->stats_lock
);
542 spin_unlock_irq(&blkcg
->lock
);
546 static void blkio_get_key_name(enum stat_sub_type type
, dev_t dev
, char *str
,
547 int chars_left
, bool diskname_only
)
549 snprintf(str
, chars_left
, "%d:%d", MAJOR(dev
), MINOR(dev
));
550 chars_left
-= strlen(str
);
551 if (chars_left
<= 0) {
553 "Possibly incorrect cgroup stat display format");
559 case BLKIO_STAT_READ
:
560 strlcat(str
, " Read", chars_left
);
562 case BLKIO_STAT_WRITE
:
563 strlcat(str
, " Write", chars_left
);
565 case BLKIO_STAT_SYNC
:
566 strlcat(str
, " Sync", chars_left
);
568 case BLKIO_STAT_ASYNC
:
569 strlcat(str
, " Async", chars_left
);
571 case BLKIO_STAT_TOTAL
:
572 strlcat(str
, " Total", chars_left
);
575 strlcat(str
, " Invalid", chars_left
);
579 static uint64_t blkio_fill_stat(char *str
, int chars_left
, uint64_t val
,
580 struct cgroup_map_cb
*cb
, dev_t dev
)
582 blkio_get_key_name(0, dev
, str
, chars_left
, true);
583 cb
->fill(cb
, str
, val
);
587 /* This should be called with blkg->stats_lock held */
588 static uint64_t blkio_get_stat(struct blkio_group
*blkg
,
589 struct cgroup_map_cb
*cb
, dev_t dev
, enum stat_type type
)
592 char key_str
[MAX_KEY_LEN
];
593 enum stat_sub_type sub_type
;
595 if (type
== BLKIO_STAT_TIME
)
596 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
597 blkg
->stats
.time
, cb
, dev
);
598 if (type
== BLKIO_STAT_SECTORS
)
599 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
600 blkg
->stats
.sectors
, cb
, dev
);
601 #ifdef CONFIG_DEBUG_BLK_CGROUP
602 if (type
== BLKIO_STAT_AVG_QUEUE_SIZE
) {
603 uint64_t sum
= blkg
->stats
.avg_queue_size_sum
;
604 uint64_t samples
= blkg
->stats
.avg_queue_size_samples
;
606 do_div(sum
, samples
);
609 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1, sum
, cb
, dev
);
611 if (type
== BLKIO_STAT_GROUP_WAIT_TIME
)
612 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
613 blkg
->stats
.group_wait_time
, cb
, dev
);
614 if (type
== BLKIO_STAT_IDLE_TIME
)
615 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
616 blkg
->stats
.idle_time
, cb
, dev
);
617 if (type
== BLKIO_STAT_EMPTY_TIME
)
618 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
619 blkg
->stats
.empty_time
, cb
, dev
);
620 if (type
== BLKIO_STAT_DEQUEUE
)
621 return blkio_fill_stat(key_str
, MAX_KEY_LEN
- 1,
622 blkg
->stats
.dequeue
, cb
, dev
);
625 for (sub_type
= BLKIO_STAT_READ
; sub_type
< BLKIO_STAT_TOTAL
;
627 blkio_get_key_name(sub_type
, dev
, key_str
, MAX_KEY_LEN
, false);
628 cb
->fill(cb
, key_str
, blkg
->stats
.stat_arr
[type
][sub_type
]);
630 disk_total
= blkg
->stats
.stat_arr
[type
][BLKIO_STAT_READ
] +
631 blkg
->stats
.stat_arr
[type
][BLKIO_STAT_WRITE
];
632 blkio_get_key_name(BLKIO_STAT_TOTAL
, dev
, key_str
, MAX_KEY_LEN
, false);
633 cb
->fill(cb
, key_str
, disk_total
);
637 static int blkio_check_dev_num(dev_t dev
)
640 struct gendisk
*disk
;
642 disk
= get_gendisk(dev
, &part
);
649 static int blkio_policy_parse_and_set(char *buf
,
650 struct blkio_policy_node
*newpn
, enum blkio_policy_id plid
, int fileid
)
652 char *s
[4], *p
, *major_s
= NULL
, *minor_s
= NULL
;
654 unsigned long major
, minor
, temp
, iops
;
659 memset(s
, 0, sizeof(s
));
661 while ((p
= strsep(&buf
, " ")) != NULL
) {
667 /* Prevent from inputing too many things */
675 p
= strsep(&s
[0], ":");
685 ret
= strict_strtoul(major_s
, 10, &major
);
689 ret
= strict_strtoul(minor_s
, 10, &minor
);
693 dev
= MKDEV(major
, minor
);
695 ret
= blkio_check_dev_num(dev
);
705 case BLKIO_POLICY_PROP
:
706 ret
= strict_strtoul(s
[1], 10, &temp
);
707 if (ret
|| (temp
< BLKIO_WEIGHT_MIN
&& temp
> 0) ||
708 temp
> BLKIO_WEIGHT_MAX
)
712 newpn
->fileid
= fileid
;
713 newpn
->val
.weight
= temp
;
715 case BLKIO_POLICY_THROTL
:
717 case BLKIO_THROTL_read_bps_device
:
718 case BLKIO_THROTL_write_bps_device
:
719 ret
= strict_strtoull(s
[1], 10, &bps
);
724 newpn
->fileid
= fileid
;
725 newpn
->val
.bps
= bps
;
727 case BLKIO_THROTL_read_iops_device
:
728 case BLKIO_THROTL_write_iops_device
:
729 ret
= strict_strtoul(s
[1], 10, &iops
);
734 newpn
->fileid
= fileid
;
735 newpn
->val
.iops
= iops
;
746 unsigned int blkcg_get_weight(struct blkio_cgroup
*blkcg
,
749 struct blkio_policy_node
*pn
;
751 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_PROP
,
752 BLKIO_PROP_weight_device
);
754 return pn
->val
.weight
;
756 return blkcg
->weight
;
758 EXPORT_SYMBOL_GPL(blkcg_get_weight
);
760 uint64_t blkcg_get_read_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
762 struct blkio_policy_node
*pn
;
764 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
765 BLKIO_THROTL_read_bps_device
);
772 uint64_t blkcg_get_write_bps(struct blkio_cgroup
*blkcg
, dev_t dev
)
774 struct blkio_policy_node
*pn
;
775 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
776 BLKIO_THROTL_write_bps_device
);
783 unsigned int blkcg_get_read_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
785 struct blkio_policy_node
*pn
;
787 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
788 BLKIO_THROTL_read_iops_device
);
795 unsigned int blkcg_get_write_iops(struct blkio_cgroup
*blkcg
, dev_t dev
)
797 struct blkio_policy_node
*pn
;
798 pn
= blkio_policy_search_node(blkcg
, dev
, BLKIO_POLICY_THROTL
,
799 BLKIO_THROTL_write_iops_device
);
806 /* Checks whether user asked for deleting a policy rule */
807 static bool blkio_delete_rule_command(struct blkio_policy_node
*pn
)
810 case BLKIO_POLICY_PROP
:
811 if (pn
->val
.weight
== 0)
814 case BLKIO_POLICY_THROTL
:
816 case BLKIO_THROTL_read_bps_device
:
817 case BLKIO_THROTL_write_bps_device
:
818 if (pn
->val
.bps
== 0)
821 case BLKIO_THROTL_read_iops_device
:
822 case BLKIO_THROTL_write_iops_device
:
823 if (pn
->val
.iops
== 0)
834 static void blkio_update_policy_rule(struct blkio_policy_node
*oldpn
,
835 struct blkio_policy_node
*newpn
)
837 switch(oldpn
->plid
) {
838 case BLKIO_POLICY_PROP
:
839 oldpn
->val
.weight
= newpn
->val
.weight
;
841 case BLKIO_POLICY_THROTL
:
842 switch(newpn
->fileid
) {
843 case BLKIO_THROTL_read_bps_device
:
844 case BLKIO_THROTL_write_bps_device
:
845 oldpn
->val
.bps
= newpn
->val
.bps
;
847 case BLKIO_THROTL_read_iops_device
:
848 case BLKIO_THROTL_write_iops_device
:
849 oldpn
->val
.iops
= newpn
->val
.iops
;
858 * Some rules/values in blkg have changed. Propogate those to respective
861 static void blkio_update_blkg_policy(struct blkio_cgroup
*blkcg
,
862 struct blkio_group
*blkg
, struct blkio_policy_node
*pn
)
864 unsigned int weight
, iops
;
868 case BLKIO_POLICY_PROP
:
869 weight
= pn
->val
.weight
? pn
->val
.weight
:
871 blkio_update_group_weight(blkg
, weight
);
873 case BLKIO_POLICY_THROTL
:
875 case BLKIO_THROTL_read_bps_device
:
876 case BLKIO_THROTL_write_bps_device
:
877 bps
= pn
->val
.bps
? pn
->val
.bps
: (-1);
878 blkio_update_group_bps(blkg
, bps
, pn
->fileid
);
880 case BLKIO_THROTL_read_iops_device
:
881 case BLKIO_THROTL_write_iops_device
:
882 iops
= pn
->val
.iops
? pn
->val
.iops
: (-1);
883 blkio_update_group_iops(blkg
, iops
, pn
->fileid
);
893 * A policy node rule has been updated. Propogate this update to all the
894 * block groups which might be affected by this update.
896 static void blkio_update_policy_node_blkg(struct blkio_cgroup
*blkcg
,
897 struct blkio_policy_node
*pn
)
899 struct blkio_group
*blkg
;
900 struct hlist_node
*n
;
902 spin_lock(&blkio_list_lock
);
903 spin_lock_irq(&blkcg
->lock
);
905 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
906 if (pn
->dev
!= blkg
->dev
|| pn
->plid
!= blkg
->plid
)
908 blkio_update_blkg_policy(blkcg
, blkg
, pn
);
911 spin_unlock_irq(&blkcg
->lock
);
912 spin_unlock(&blkio_list_lock
);
915 static int blkiocg_file_write(struct cgroup
*cgrp
, struct cftype
*cft
,
920 struct blkio_policy_node
*newpn
, *pn
;
921 struct blkio_cgroup
*blkcg
;
923 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
924 int fileid
= BLKIOFILE_ATTR(cft
->private);
926 buf
= kstrdup(buffer
, GFP_KERNEL
);
930 newpn
= kzalloc(sizeof(*newpn
), GFP_KERNEL
);
936 ret
= blkio_policy_parse_and_set(buf
, newpn
, plid
, fileid
);
940 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
942 spin_lock_irq(&blkcg
->lock
);
944 pn
= blkio_policy_search_node(blkcg
, newpn
->dev
, plid
, fileid
);
946 if (!blkio_delete_rule_command(newpn
)) {
947 blkio_policy_insert_node(blkcg
, newpn
);
950 spin_unlock_irq(&blkcg
->lock
);
951 goto update_io_group
;
954 if (blkio_delete_rule_command(newpn
)) {
955 blkio_policy_delete_node(pn
);
956 spin_unlock_irq(&blkcg
->lock
);
957 goto update_io_group
;
959 spin_unlock_irq(&blkcg
->lock
);
961 blkio_update_policy_rule(pn
, newpn
);
964 blkio_update_policy_node_blkg(blkcg
, newpn
);
975 blkio_print_policy_node(struct seq_file
*m
, struct blkio_policy_node
*pn
)
978 case BLKIO_POLICY_PROP
:
979 if (pn
->fileid
== BLKIO_PROP_weight_device
)
980 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
981 MINOR(pn
->dev
), pn
->val
.weight
);
983 case BLKIO_POLICY_THROTL
:
985 case BLKIO_THROTL_read_bps_device
:
986 case BLKIO_THROTL_write_bps_device
:
987 seq_printf(m
, "%u:%u\t%llu\n", MAJOR(pn
->dev
),
988 MINOR(pn
->dev
), pn
->val
.bps
);
990 case BLKIO_THROTL_read_iops_device
:
991 case BLKIO_THROTL_write_iops_device
:
992 seq_printf(m
, "%u:%u\t%u\n", MAJOR(pn
->dev
),
993 MINOR(pn
->dev
), pn
->val
.iops
);
1002 /* cgroup files which read their data from policy nodes end up here */
1003 static void blkio_read_policy_node_files(struct cftype
*cft
,
1004 struct blkio_cgroup
*blkcg
, struct seq_file
*m
)
1006 struct blkio_policy_node
*pn
;
1008 if (!list_empty(&blkcg
->policy_list
)) {
1009 spin_lock_irq(&blkcg
->lock
);
1010 list_for_each_entry(pn
, &blkcg
->policy_list
, node
) {
1011 if (!pn_matches_cftype(cft
, pn
))
1013 blkio_print_policy_node(m
, pn
);
1015 spin_unlock_irq(&blkcg
->lock
);
1019 static int blkiocg_file_read(struct cgroup
*cgrp
, struct cftype
*cft
,
1022 struct blkio_cgroup
*blkcg
;
1023 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1024 int name
= BLKIOFILE_ATTR(cft
->private);
1026 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1029 case BLKIO_POLICY_PROP
:
1031 case BLKIO_PROP_weight_device
:
1032 blkio_read_policy_node_files(cft
, blkcg
, m
);
1038 case BLKIO_POLICY_THROTL
:
1040 case BLKIO_THROTL_read_bps_device
:
1041 case BLKIO_THROTL_write_bps_device
:
1042 case BLKIO_THROTL_read_iops_device
:
1043 case BLKIO_THROTL_write_iops_device
:
1044 blkio_read_policy_node_files(cft
, blkcg
, m
);
1057 static int blkio_read_blkg_stats(struct blkio_cgroup
*blkcg
,
1058 struct cftype
*cft
, struct cgroup_map_cb
*cb
, enum stat_type type
,
1061 struct blkio_group
*blkg
;
1062 struct hlist_node
*n
;
1063 uint64_t cgroup_total
= 0;
1066 hlist_for_each_entry_rcu(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1068 if (!cftype_blkg_same_policy(cft
, blkg
))
1070 spin_lock_irq(&blkg
->stats_lock
);
1071 cgroup_total
+= blkio_get_stat(blkg
, cb
, blkg
->dev
,
1073 spin_unlock_irq(&blkg
->stats_lock
);
1077 cb
->fill(cb
, "Total", cgroup_total
);
1082 /* All map kind of cgroup file get serviced by this function */
1083 static int blkiocg_file_read_map(struct cgroup
*cgrp
, struct cftype
*cft
,
1084 struct cgroup_map_cb
*cb
)
1086 struct blkio_cgroup
*blkcg
;
1087 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1088 int name
= BLKIOFILE_ATTR(cft
->private);
1090 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1093 case BLKIO_POLICY_PROP
:
1095 case BLKIO_PROP_time
:
1096 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1097 BLKIO_STAT_TIME
, 0);
1098 case BLKIO_PROP_sectors
:
1099 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1100 BLKIO_STAT_SECTORS
, 0);
1101 case BLKIO_PROP_io_service_bytes
:
1102 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1103 BLKIO_STAT_SERVICE_BYTES
, 1);
1104 case BLKIO_PROP_io_serviced
:
1105 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1106 BLKIO_STAT_SERVICED
, 1);
1107 case BLKIO_PROP_io_service_time
:
1108 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1109 BLKIO_STAT_SERVICE_TIME
, 1);
1110 case BLKIO_PROP_io_wait_time
:
1111 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1112 BLKIO_STAT_WAIT_TIME
, 1);
1113 case BLKIO_PROP_io_merged
:
1114 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1115 BLKIO_STAT_MERGED
, 1);
1116 case BLKIO_PROP_io_queued
:
1117 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1118 BLKIO_STAT_QUEUED
, 1);
1119 #ifdef CONFIG_DEBUG_BLK_CGROUP
1120 case BLKIO_PROP_dequeue
:
1121 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1122 BLKIO_STAT_DEQUEUE
, 0);
1123 case BLKIO_PROP_avg_queue_size
:
1124 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1125 BLKIO_STAT_AVG_QUEUE_SIZE
, 0);
1126 case BLKIO_PROP_group_wait_time
:
1127 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1128 BLKIO_STAT_GROUP_WAIT_TIME
, 0);
1129 case BLKIO_PROP_idle_time
:
1130 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1131 BLKIO_STAT_IDLE_TIME
, 0);
1132 case BLKIO_PROP_empty_time
:
1133 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1134 BLKIO_STAT_EMPTY_TIME
, 0);
1140 case BLKIO_POLICY_THROTL
:
1142 case BLKIO_THROTL_io_service_bytes
:
1143 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1144 BLKIO_STAT_SERVICE_BYTES
, 1);
1145 case BLKIO_THROTL_io_serviced
:
1146 return blkio_read_blkg_stats(blkcg
, cft
, cb
,
1147 BLKIO_STAT_SERVICED
, 1);
1159 static int blkio_weight_write(struct blkio_cgroup
*blkcg
, u64 val
)
1161 struct blkio_group
*blkg
;
1162 struct hlist_node
*n
;
1163 struct blkio_policy_node
*pn
;
1165 if (val
< BLKIO_WEIGHT_MIN
|| val
> BLKIO_WEIGHT_MAX
)
1168 spin_lock(&blkio_list_lock
);
1169 spin_lock_irq(&blkcg
->lock
);
1170 blkcg
->weight
= (unsigned int)val
;
1172 hlist_for_each_entry(blkg
, n
, &blkcg
->blkg_list
, blkcg_node
) {
1173 pn
= blkio_policy_search_node(blkcg
, blkg
->dev
,
1174 BLKIO_POLICY_PROP
, BLKIO_PROP_weight_device
);
1178 blkio_update_group_weight(blkg
, blkcg
->weight
);
1180 spin_unlock_irq(&blkcg
->lock
);
1181 spin_unlock(&blkio_list_lock
);
1185 static u64
blkiocg_file_read_u64 (struct cgroup
*cgrp
, struct cftype
*cft
) {
1186 struct blkio_cgroup
*blkcg
;
1187 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1188 int name
= BLKIOFILE_ATTR(cft
->private);
1190 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1193 case BLKIO_POLICY_PROP
:
1195 case BLKIO_PROP_weight
:
1196 return (u64
)blkcg
->weight
;
1206 blkiocg_file_write_u64(struct cgroup
*cgrp
, struct cftype
*cft
, u64 val
)
1208 struct blkio_cgroup
*blkcg
;
1209 enum blkio_policy_id plid
= BLKIOFILE_POLICY(cft
->private);
1210 int name
= BLKIOFILE_ATTR(cft
->private);
1212 blkcg
= cgroup_to_blkio_cgroup(cgrp
);
1215 case BLKIO_POLICY_PROP
:
1217 case BLKIO_PROP_weight
:
1218 return blkio_weight_write(blkcg
, val
);
1228 struct cftype blkio_files
[] = {
1230 .name
= "weight_device",
1231 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1232 BLKIO_PROP_weight_device
),
1233 .read_seq_string
= blkiocg_file_read
,
1234 .write_string
= blkiocg_file_write
,
1235 .max_write_len
= 256,
1239 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1241 .read_u64
= blkiocg_file_read_u64
,
1242 .write_u64
= blkiocg_file_write_u64
,
1246 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1248 .read_map
= blkiocg_file_read_map
,
1252 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1253 BLKIO_PROP_sectors
),
1254 .read_map
= blkiocg_file_read_map
,
1257 .name
= "io_service_bytes",
1258 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1259 BLKIO_PROP_io_service_bytes
),
1260 .read_map
= blkiocg_file_read_map
,
1263 .name
= "io_serviced",
1264 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1265 BLKIO_PROP_io_serviced
),
1266 .read_map
= blkiocg_file_read_map
,
1269 .name
= "io_service_time",
1270 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1271 BLKIO_PROP_io_service_time
),
1272 .read_map
= blkiocg_file_read_map
,
1275 .name
= "io_wait_time",
1276 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1277 BLKIO_PROP_io_wait_time
),
1278 .read_map
= blkiocg_file_read_map
,
1281 .name
= "io_merged",
1282 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1283 BLKIO_PROP_io_merged
),
1284 .read_map
= blkiocg_file_read_map
,
1287 .name
= "io_queued",
1288 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1289 BLKIO_PROP_io_queued
),
1290 .read_map
= blkiocg_file_read_map
,
1293 .name
= "reset_stats",
1294 .write_u64
= blkiocg_reset_stats
,
1296 #ifdef CONFIG_BLK_DEV_THROTTLING
1298 .name
= "throttle.read_bps_device",
1299 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1300 BLKIO_THROTL_read_bps_device
),
1301 .read_seq_string
= blkiocg_file_read
,
1302 .write_string
= blkiocg_file_write
,
1303 .max_write_len
= 256,
1307 .name
= "throttle.write_bps_device",
1308 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1309 BLKIO_THROTL_write_bps_device
),
1310 .read_seq_string
= blkiocg_file_read
,
1311 .write_string
= blkiocg_file_write
,
1312 .max_write_len
= 256,
1316 .name
= "throttle.read_iops_device",
1317 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1318 BLKIO_THROTL_read_iops_device
),
1319 .read_seq_string
= blkiocg_file_read
,
1320 .write_string
= blkiocg_file_write
,
1321 .max_write_len
= 256,
1325 .name
= "throttle.write_iops_device",
1326 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1327 BLKIO_THROTL_write_iops_device
),
1328 .read_seq_string
= blkiocg_file_read
,
1329 .write_string
= blkiocg_file_write
,
1330 .max_write_len
= 256,
1333 .name
= "throttle.io_service_bytes",
1334 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1335 BLKIO_THROTL_io_service_bytes
),
1336 .read_map
= blkiocg_file_read_map
,
1339 .name
= "throttle.io_serviced",
1340 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL
,
1341 BLKIO_THROTL_io_serviced
),
1342 .read_map
= blkiocg_file_read_map
,
1344 #endif /* CONFIG_BLK_DEV_THROTTLING */
1346 #ifdef CONFIG_DEBUG_BLK_CGROUP
1348 .name
= "avg_queue_size",
1349 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1350 BLKIO_PROP_avg_queue_size
),
1351 .read_map
= blkiocg_file_read_map
,
1354 .name
= "group_wait_time",
1355 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1356 BLKIO_PROP_group_wait_time
),
1357 .read_map
= blkiocg_file_read_map
,
1360 .name
= "idle_time",
1361 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1362 BLKIO_PROP_idle_time
),
1363 .read_map
= blkiocg_file_read_map
,
1366 .name
= "empty_time",
1367 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1368 BLKIO_PROP_empty_time
),
1369 .read_map
= blkiocg_file_read_map
,
1373 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP
,
1374 BLKIO_PROP_dequeue
),
1375 .read_map
= blkiocg_file_read_map
,
1380 static int blkiocg_populate(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1382 return cgroup_add_files(cgroup
, subsys
, blkio_files
,
1383 ARRAY_SIZE(blkio_files
));
1386 static void blkiocg_destroy(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1388 struct blkio_cgroup
*blkcg
= cgroup_to_blkio_cgroup(cgroup
);
1389 unsigned long flags
;
1390 struct blkio_group
*blkg
;
1392 struct blkio_policy_type
*blkiop
;
1393 struct blkio_policy_node
*pn
, *pntmp
;
1397 spin_lock_irqsave(&blkcg
->lock
, flags
);
1399 if (hlist_empty(&blkcg
->blkg_list
)) {
1400 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1404 blkg
= hlist_entry(blkcg
->blkg_list
.first
, struct blkio_group
,
1406 key
= rcu_dereference(blkg
->key
);
1407 __blkiocg_del_blkio_group(blkg
);
1409 spin_unlock_irqrestore(&blkcg
->lock
, flags
);
1412 * This blkio_group is being unlinked as associated cgroup is
1413 * going away. Let all the IO controlling policies know about
1414 * this event. Currently this is static call to one io
1415 * controlling policy. Once we have more policies in place, we
1416 * need some dynamic registration of callback function.
1418 spin_lock(&blkio_list_lock
);
1419 list_for_each_entry(blkiop
, &blkio_list
, list
)
1420 blkiop
->ops
.blkio_unlink_group_fn(key
, blkg
);
1421 spin_unlock(&blkio_list_lock
);
1424 list_for_each_entry_safe(pn
, pntmp
, &blkcg
->policy_list
, node
) {
1425 blkio_policy_delete_node(pn
);
1429 free_css_id(&blkio_subsys
, &blkcg
->css
);
1431 if (blkcg
!= &blkio_root_cgroup
)
1435 static struct cgroup_subsys_state
*
1436 blkiocg_create(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
)
1438 struct blkio_cgroup
*blkcg
;
1439 struct cgroup
*parent
= cgroup
->parent
;
1442 blkcg
= &blkio_root_cgroup
;
1446 /* Currently we do not support hierarchy deeper than two level (0,1) */
1447 if (parent
!= cgroup
->top_cgroup
)
1448 return ERR_PTR(-EINVAL
);
1450 blkcg
= kzalloc(sizeof(*blkcg
), GFP_KERNEL
);
1452 return ERR_PTR(-ENOMEM
);
1454 blkcg
->weight
= BLKIO_WEIGHT_DEFAULT
;
1456 spin_lock_init(&blkcg
->lock
);
1457 INIT_HLIST_HEAD(&blkcg
->blkg_list
);
1459 INIT_LIST_HEAD(&blkcg
->policy_list
);
1464 * We cannot support shared io contexts, as we have no mean to support
1465 * two tasks with the same ioc in two different groups without major rework
1466 * of the main cic data structures. For now we allow a task to change
1467 * its cgroup only if it's the only owner of its ioc.
1469 static int blkiocg_can_attach(struct cgroup_subsys
*subsys
,
1470 struct cgroup
*cgroup
, struct task_struct
*tsk
,
1473 struct io_context
*ioc
;
1476 /* task_lock() is needed to avoid races with exit_io_context() */
1478 ioc
= tsk
->io_context
;
1479 if (ioc
&& atomic_read(&ioc
->nr_tasks
) > 1)
1486 static void blkiocg_attach(struct cgroup_subsys
*subsys
, struct cgroup
*cgroup
,
1487 struct cgroup
*prev
, struct task_struct
*tsk
,
1490 struct io_context
*ioc
;
1493 ioc
= tsk
->io_context
;
1495 ioc
->cgroup_changed
= 1;
1499 void blkio_policy_register(struct blkio_policy_type
*blkiop
)
1501 spin_lock(&blkio_list_lock
);
1502 list_add_tail(&blkiop
->list
, &blkio_list
);
1503 spin_unlock(&blkio_list_lock
);
1505 EXPORT_SYMBOL_GPL(blkio_policy_register
);
1507 void blkio_policy_unregister(struct blkio_policy_type
*blkiop
)
1509 spin_lock(&blkio_list_lock
);
1510 list_del_init(&blkiop
->list
);
1511 spin_unlock(&blkio_list_lock
);
1513 EXPORT_SYMBOL_GPL(blkio_policy_unregister
);
1515 static int __init
init_cgroup_blkio(void)
1517 return cgroup_load_subsys(&blkio_subsys
);
1520 static void __exit
exit_cgroup_blkio(void)
1522 cgroup_unload_subsys(&blkio_subsys
);
1525 module_init(init_cgroup_blkio
);
1526 module_exit(exit_cgroup_blkio
);
1527 MODULE_LICENSE("GPL");