4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
22 /* Max limits for throttle policy */
23 #define THROTL_IOPS_MAX UINT_MAX
25 /* CFQ specific, out here for blkcg->cfq_weight */
26 #define CFQ_WEIGHT_MIN 10
27 #define CFQ_WEIGHT_MAX 1000
28 #define CFQ_WEIGHT_DEFAULT 500
30 #ifdef CONFIG_BLK_CGROUP
32 enum blkg_rwstat_type
{
39 BLKG_RWSTAT_TOTAL
= BLKG_RWSTAT_NR
,
45 struct cgroup_subsys_state css
;
48 struct radix_tree_root blkg_tree
;
49 struct blkcg_gq
*blkg_hint
;
50 struct hlist_head blkg_list
;
52 /* for policies to test whether associated blkcg has changed */
55 /* TODO: per-policy storage in blkcg */
56 unsigned int cfq_weight
; /* belongs to cfq */
57 unsigned int cfq_leaf_weight
;
61 struct u64_stats_sync syncp
;
66 struct u64_stats_sync syncp
;
67 uint64_t cnt
[BLKG_RWSTAT_NR
];
71 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
72 * request_queue (q). This is used by blkcg policies which need to track
73 * information per blkcg - q pair.
75 * There can be multiple active blkcg policies and each has its private
76 * data on each blkg, the size of which is determined by
77 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
78 * together with blkg and invokes pd_init/exit_fn() methods.
80 * Such private data must embed struct blkg_policy_data (pd) at the
81 * beginning and pd_size can't be smaller than pd.
83 struct blkg_policy_data
{
84 /* the blkg and policy id this per-policy data belongs to */
85 struct blkcg_gq
*blkg
;
88 /* used during policy activation */
89 struct list_head alloc_node
;
92 /* association between a blk cgroup and a request queue */
94 /* Pointer to the associated request_queue */
95 struct request_queue
*q
;
96 struct list_head q_node
;
97 struct hlist_node blkcg_node
;
100 /* all non-root blkcg_gq's are guaranteed to have access to parent */
101 struct blkcg_gq
*parent
;
103 /* request allocation list for this blkcg-q pair */
104 struct request_list rl
;
106 /* reference count */
109 /* is this blkg online? protected by both blkcg and q locks */
112 struct blkg_policy_data
*pd
[BLKCG_MAX_POLS
];
114 struct rcu_head rcu_head
;
117 typedef void (blkcg_pol_init_pd_fn
)(struct blkcg_gq
*blkg
);
118 typedef void (blkcg_pol_online_pd_fn
)(struct blkcg_gq
*blkg
);
119 typedef void (blkcg_pol_offline_pd_fn
)(struct blkcg_gq
*blkg
);
120 typedef void (blkcg_pol_exit_pd_fn
)(struct blkcg_gq
*blkg
);
121 typedef void (blkcg_pol_reset_pd_stats_fn
)(struct blkcg_gq
*blkg
);
123 struct blkcg_policy
{
125 /* policy specific private data size */
127 /* cgroup files for the policy */
128 struct cftype
*cftypes
;
131 blkcg_pol_init_pd_fn
*pd_init_fn
;
132 blkcg_pol_online_pd_fn
*pd_online_fn
;
133 blkcg_pol_offline_pd_fn
*pd_offline_fn
;
134 blkcg_pol_exit_pd_fn
*pd_exit_fn
;
135 blkcg_pol_reset_pd_stats_fn
*pd_reset_stats_fn
;
138 extern struct blkcg blkcg_root
;
140 struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, struct request_queue
*q
);
141 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
142 struct request_queue
*q
);
143 int blkcg_init_queue(struct request_queue
*q
);
144 void blkcg_drain_queue(struct request_queue
*q
);
145 void blkcg_exit_queue(struct request_queue
*q
);
147 /* Blkio controller policy registration */
148 int blkcg_policy_register(struct blkcg_policy
*pol
);
149 void blkcg_policy_unregister(struct blkcg_policy
*pol
);
150 int blkcg_activate_policy(struct request_queue
*q
,
151 const struct blkcg_policy
*pol
);
152 void blkcg_deactivate_policy(struct request_queue
*q
,
153 const struct blkcg_policy
*pol
);
155 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
156 u64 (*prfill
)(struct seq_file
*,
157 struct blkg_policy_data
*, int),
158 const struct blkcg_policy
*pol
, int data
,
160 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
);
161 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
162 const struct blkg_rwstat
*rwstat
);
163 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
);
164 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
167 struct blkg_conf_ctx
{
168 struct gendisk
*disk
;
169 struct blkcg_gq
*blkg
;
173 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
174 const char *input
, struct blkg_conf_ctx
*ctx
);
175 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
);
178 static inline struct blkcg
*cgroup_to_blkcg(struct cgroup
*cgroup
)
180 return container_of(cgroup_subsys_state(cgroup
, blkio_subsys_id
),
184 static inline struct blkcg
*task_blkcg(struct task_struct
*tsk
)
186 return container_of(task_subsys_state(tsk
, blkio_subsys_id
),
190 static inline struct blkcg
*bio_blkcg(struct bio
*bio
)
192 if (bio
&& bio
->bi_css
)
193 return container_of(bio
->bi_css
, struct blkcg
, css
);
194 return task_blkcg(current
);
198 * blkcg_parent - get the parent of a blkcg
199 * @blkcg: blkcg of interest
201 * Return the parent blkcg of @blkcg. Can be called anytime.
203 static inline struct blkcg
*blkcg_parent(struct blkcg
*blkcg
)
205 struct cgroup
*pcg
= blkcg
->css
.cgroup
->parent
;
207 return pcg
? cgroup_to_blkcg(pcg
) : NULL
;
211 * blkg_to_pdata - get policy private data
212 * @blkg: blkg of interest
213 * @pol: policy of interest
215 * Return pointer to private data associated with the @blkg-@pol pair.
217 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
218 struct blkcg_policy
*pol
)
220 return blkg
? blkg
->pd
[pol
->plid
] : NULL
;
224 * pdata_to_blkg - get blkg associated with policy private data
225 * @pd: policy private data of interest
227 * @pd is policy private data. Determine the blkg it's associated with.
229 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
)
231 return pd
? pd
->blkg
: NULL
;
235 * blkg_path - format cgroup path of blkg
236 * @blkg: blkg of interest
237 * @buf: target buffer
238 * @buflen: target buffer length
240 * Format the path of the cgroup of @blkg into @buf.
242 static inline int blkg_path(struct blkcg_gq
*blkg
, char *buf
, int buflen
)
247 ret
= cgroup_path(blkg
->blkcg
->css
.cgroup
, buf
, buflen
);
250 strncpy(buf
, "<unavailable>", buflen
);
255 * blkg_get - get a blkg reference
258 * The caller should be holding queue_lock and an existing reference.
260 static inline void blkg_get(struct blkcg_gq
*blkg
)
262 lockdep_assert_held(blkg
->q
->queue_lock
);
263 WARN_ON_ONCE(!blkg
->refcnt
);
267 void __blkg_release(struct blkcg_gq
*blkg
);
270 * blkg_put - put a blkg reference
273 * The caller should be holding queue_lock.
275 static inline void blkg_put(struct blkcg_gq
*blkg
)
277 lockdep_assert_held(blkg
->q
->queue_lock
);
278 WARN_ON_ONCE(blkg
->refcnt
<= 0);
280 __blkg_release(blkg
);
284 * blk_get_rl - get request_list to use
285 * @q: request_queue of interest
286 * @bio: bio which will be attached to the allocated request (may be %NULL)
288 * The caller wants to allocate a request from @q to use for @bio. Find
289 * the request_list to use and obtain a reference on it. Should be called
290 * under queue_lock. This function is guaranteed to return non-%NULL
293 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
297 struct blkcg_gq
*blkg
;
301 blkcg
= bio_blkcg(bio
);
303 /* bypass blkg lookup and use @q->root_rl directly for root */
304 if (blkcg
== &blkcg_root
)
308 * Try to use blkg->rl. blkg lookup may fail under memory pressure
309 * or if either the blkcg or queue is going away. Fall back to
310 * root_rl in such cases.
312 blkg
= blkg_lookup_create(blkcg
, q
);
313 if (unlikely(IS_ERR(blkg
)))
325 * blk_put_rl - put request_list
326 * @rl: request_list to put
328 * Put the reference acquired by blk_get_rl(). Should be called under
331 static inline void blk_put_rl(struct request_list
*rl
)
333 /* root_rl may not have blkg set */
334 if (rl
->blkg
&& rl
->blkg
->blkcg
!= &blkcg_root
)
339 * blk_rq_set_rl - associate a request with a request_list
340 * @rq: request of interest
341 * @rl: target request_list
343 * Associate @rq with @rl so that accounting and freeing can know the
344 * request_list @rq came from.
346 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
)
352 * blk_rq_rl - return the request_list a request came from
353 * @rq: request of interest
355 * Return the request_list @rq is allocated from.
357 static inline struct request_list
*blk_rq_rl(struct request
*rq
)
362 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
363 struct request_queue
*q
);
365 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
367 * Should be used under queue_lock.
369 #define blk_queue_for_each_rl(rl, q) \
370 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
373 * blkg_stat_add - add a value to a blkg_stat
374 * @stat: target blkg_stat
377 * Add @val to @stat. The caller is responsible for synchronizing calls to
380 static inline void blkg_stat_add(struct blkg_stat
*stat
, uint64_t val
)
382 u64_stats_update_begin(&stat
->syncp
);
384 u64_stats_update_end(&stat
->syncp
);
388 * blkg_stat_read - read the current value of a blkg_stat
389 * @stat: blkg_stat to read
391 * Read the current value of @stat. This function can be called without
392 * synchroniztion and takes care of u64 atomicity.
394 static inline uint64_t blkg_stat_read(struct blkg_stat
*stat
)
400 start
= u64_stats_fetch_begin(&stat
->syncp
);
402 } while (u64_stats_fetch_retry(&stat
->syncp
, start
));
408 * blkg_stat_reset - reset a blkg_stat
409 * @stat: blkg_stat to reset
411 static inline void blkg_stat_reset(struct blkg_stat
*stat
)
417 * blkg_rwstat_add - add a value to a blkg_rwstat
418 * @rwstat: target blkg_rwstat
419 * @rw: mask of REQ_{WRITE|SYNC}
422 * Add @val to @rwstat. The counters are chosen according to @rw. The
423 * caller is responsible for synchronizing calls to this function.
425 static inline void blkg_rwstat_add(struct blkg_rwstat
*rwstat
,
426 int rw
, uint64_t val
)
428 u64_stats_update_begin(&rwstat
->syncp
);
431 rwstat
->cnt
[BLKG_RWSTAT_WRITE
] += val
;
433 rwstat
->cnt
[BLKG_RWSTAT_READ
] += val
;
435 rwstat
->cnt
[BLKG_RWSTAT_SYNC
] += val
;
437 rwstat
->cnt
[BLKG_RWSTAT_ASYNC
] += val
;
439 u64_stats_update_end(&rwstat
->syncp
);
443 * blkg_rwstat_read - read the current values of a blkg_rwstat
444 * @rwstat: blkg_rwstat to read
446 * Read the current snapshot of @rwstat and return it as the return value.
447 * This function can be called without synchronization and takes care of
450 static inline struct blkg_rwstat
blkg_rwstat_read(struct blkg_rwstat
*rwstat
)
453 struct blkg_rwstat tmp
;
456 start
= u64_stats_fetch_begin(&rwstat
->syncp
);
458 } while (u64_stats_fetch_retry(&rwstat
->syncp
, start
));
464 * blkg_rwstat_sum - read the total count of a blkg_rwstat
465 * @rwstat: blkg_rwstat to read
467 * Return the total count of @rwstat regardless of the IO direction. This
468 * function can be called without synchronization and takes care of u64
471 static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat
*rwstat
)
473 struct blkg_rwstat tmp
= blkg_rwstat_read(rwstat
);
475 return tmp
.cnt
[BLKG_RWSTAT_READ
] + tmp
.cnt
[BLKG_RWSTAT_WRITE
];
479 * blkg_rwstat_reset - reset a blkg_rwstat
480 * @rwstat: blkg_rwstat to reset
482 static inline void blkg_rwstat_reset(struct blkg_rwstat
*rwstat
)
484 memset(rwstat
->cnt
, 0, sizeof(rwstat
->cnt
));
487 #else /* CONFIG_BLK_CGROUP */
492 struct blkg_policy_data
{
498 struct blkcg_policy
{
501 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, void *key
) { return NULL
; }
502 static inline int blkcg_init_queue(struct request_queue
*q
) { return 0; }
503 static inline void blkcg_drain_queue(struct request_queue
*q
) { }
504 static inline void blkcg_exit_queue(struct request_queue
*q
) { }
505 static inline int blkcg_policy_register(struct blkcg_policy
*pol
) { return 0; }
506 static inline void blkcg_policy_unregister(struct blkcg_policy
*pol
) { }
507 static inline int blkcg_activate_policy(struct request_queue
*q
,
508 const struct blkcg_policy
*pol
) { return 0; }
509 static inline void blkcg_deactivate_policy(struct request_queue
*q
,
510 const struct blkcg_policy
*pol
) { }
512 static inline struct blkcg
*cgroup_to_blkcg(struct cgroup
*cgroup
) { return NULL
; }
513 static inline struct blkcg
*bio_blkcg(struct bio
*bio
) { return NULL
; }
515 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
516 struct blkcg_policy
*pol
) { return NULL
; }
517 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
) { return NULL
; }
518 static inline char *blkg_path(struct blkcg_gq
*blkg
) { return NULL
; }
519 static inline void blkg_get(struct blkcg_gq
*blkg
) { }
520 static inline void blkg_put(struct blkcg_gq
*blkg
) { }
522 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
523 struct bio
*bio
) { return &q
->root_rl
; }
524 static inline void blk_put_rl(struct request_list
*rl
) { }
525 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
) { }
526 static inline struct request_list
*blk_rq_rl(struct request
*rq
) { return &rq
->q
->root_rl
; }
528 #define blk_queue_for_each_rl(rl, q) \
529 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
531 #endif /* CONFIG_BLK_CGROUP */
532 #endif /* _BLK_CGROUP_H */