cgroup: make hierarchy iterators deal with cgroup_subsys_state instead of cgroup
[deliverable/linux.git] / block / blk-cgroup.h
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21
22 /* Max limits for throttle policy */
23 #define THROTL_IOPS_MAX UINT_MAX
24
25 /* CFQ specific, out here for blkcg->cfq_weight */
26 #define CFQ_WEIGHT_MIN 10
27 #define CFQ_WEIGHT_MAX 1000
28 #define CFQ_WEIGHT_DEFAULT 500
29
30 #ifdef CONFIG_BLK_CGROUP
31
32 enum blkg_rwstat_type {
33 BLKG_RWSTAT_READ,
34 BLKG_RWSTAT_WRITE,
35 BLKG_RWSTAT_SYNC,
36 BLKG_RWSTAT_ASYNC,
37
38 BLKG_RWSTAT_NR,
39 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
40 };
41
42 struct blkcg_gq;
43
44 struct blkcg {
45 struct cgroup_subsys_state css;
46 spinlock_t lock;
47
48 struct radix_tree_root blkg_tree;
49 struct blkcg_gq *blkg_hint;
50 struct hlist_head blkg_list;
51
52 /* for policies to test whether associated blkcg has changed */
53 uint64_t id;
54
55 /* TODO: per-policy storage in blkcg */
56 unsigned int cfq_weight; /* belongs to cfq */
57 unsigned int cfq_leaf_weight;
58 };
59
60 struct blkg_stat {
61 struct u64_stats_sync syncp;
62 uint64_t cnt;
63 };
64
65 struct blkg_rwstat {
66 struct u64_stats_sync syncp;
67 uint64_t cnt[BLKG_RWSTAT_NR];
68 };
69
70 /*
71 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
72 * request_queue (q). This is used by blkcg policies which need to track
73 * information per blkcg - q pair.
74 *
75 * There can be multiple active blkcg policies and each has its private
76 * data on each blkg, the size of which is determined by
77 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
78 * together with blkg and invokes pd_init/exit_fn() methods.
79 *
80 * Such private data must embed struct blkg_policy_data (pd) at the
81 * beginning and pd_size can't be smaller than pd.
82 */
83 struct blkg_policy_data {
84 /* the blkg and policy id this per-policy data belongs to */
85 struct blkcg_gq *blkg;
86 int plid;
87
88 /* used during policy activation */
89 struct list_head alloc_node;
90 };
91
92 /* association between a blk cgroup and a request queue */
93 struct blkcg_gq {
94 /* Pointer to the associated request_queue */
95 struct request_queue *q;
96 struct list_head q_node;
97 struct hlist_node blkcg_node;
98 struct blkcg *blkcg;
99
100 /* all non-root blkcg_gq's are guaranteed to have access to parent */
101 struct blkcg_gq *parent;
102
103 /* request allocation list for this blkcg-q pair */
104 struct request_list rl;
105
106 /* reference count */
107 int refcnt;
108
109 /* is this blkg online? protected by both blkcg and q locks */
110 bool online;
111
112 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
113
114 struct rcu_head rcu_head;
115 };
116
117 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
118 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
119 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
120 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
121 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
122
123 struct blkcg_policy {
124 int plid;
125 /* policy specific private data size */
126 size_t pd_size;
127 /* cgroup files for the policy */
128 struct cftype *cftypes;
129
130 /* operations */
131 blkcg_pol_init_pd_fn *pd_init_fn;
132 blkcg_pol_online_pd_fn *pd_online_fn;
133 blkcg_pol_offline_pd_fn *pd_offline_fn;
134 blkcg_pol_exit_pd_fn *pd_exit_fn;
135 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
136 };
137
138 extern struct blkcg blkcg_root;
139
140 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
141 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
142 struct request_queue *q);
143 int blkcg_init_queue(struct request_queue *q);
144 void blkcg_drain_queue(struct request_queue *q);
145 void blkcg_exit_queue(struct request_queue *q);
146
147 /* Blkio controller policy registration */
148 int blkcg_policy_register(struct blkcg_policy *pol);
149 void blkcg_policy_unregister(struct blkcg_policy *pol);
150 int blkcg_activate_policy(struct request_queue *q,
151 const struct blkcg_policy *pol);
152 void blkcg_deactivate_policy(struct request_queue *q,
153 const struct blkcg_policy *pol);
154
155 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
156 u64 (*prfill)(struct seq_file *,
157 struct blkg_policy_data *, int),
158 const struct blkcg_policy *pol, int data,
159 bool show_total);
160 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
161 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
162 const struct blkg_rwstat *rwstat);
163 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
164 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
165 int off);
166
167 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
168 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
169 int off);
170
171 struct blkg_conf_ctx {
172 struct gendisk *disk;
173 struct blkcg_gq *blkg;
174 u64 v;
175 };
176
177 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
178 const char *input, struct blkg_conf_ctx *ctx);
179 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
180
181
182 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
183 {
184 return css ? container_of(css, struct blkcg, css) : NULL;
185 }
186
187 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
188 {
189 return css_to_blkcg(task_css(tsk, blkio_subsys_id));
190 }
191
192 static inline struct blkcg *bio_blkcg(struct bio *bio)
193 {
194 if (bio && bio->bi_css)
195 return css_to_blkcg(bio->bi_css);
196 return task_blkcg(current);
197 }
198
199 /**
200 * blkcg_parent - get the parent of a blkcg
201 * @blkcg: blkcg of interest
202 *
203 * Return the parent blkcg of @blkcg. Can be called anytime.
204 */
205 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
206 {
207 return css_to_blkcg(css_parent(&blkcg->css));
208 }
209
210 /**
211 * blkg_to_pdata - get policy private data
212 * @blkg: blkg of interest
213 * @pol: policy of interest
214 *
215 * Return pointer to private data associated with the @blkg-@pol pair.
216 */
217 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
218 struct blkcg_policy *pol)
219 {
220 return blkg ? blkg->pd[pol->plid] : NULL;
221 }
222
223 /**
224 * pdata_to_blkg - get blkg associated with policy private data
225 * @pd: policy private data of interest
226 *
227 * @pd is policy private data. Determine the blkg it's associated with.
228 */
229 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
230 {
231 return pd ? pd->blkg : NULL;
232 }
233
234 /**
235 * blkg_path - format cgroup path of blkg
236 * @blkg: blkg of interest
237 * @buf: target buffer
238 * @buflen: target buffer length
239 *
240 * Format the path of the cgroup of @blkg into @buf.
241 */
242 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
243 {
244 int ret;
245
246 ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
247 if (ret)
248 strncpy(buf, "<unavailable>", buflen);
249 return ret;
250 }
251
252 /**
253 * blkg_get - get a blkg reference
254 * @blkg: blkg to get
255 *
256 * The caller should be holding queue_lock and an existing reference.
257 */
258 static inline void blkg_get(struct blkcg_gq *blkg)
259 {
260 lockdep_assert_held(blkg->q->queue_lock);
261 WARN_ON_ONCE(!blkg->refcnt);
262 blkg->refcnt++;
263 }
264
265 void __blkg_release_rcu(struct rcu_head *rcu);
266
267 /**
268 * blkg_put - put a blkg reference
269 * @blkg: blkg to put
270 *
271 * The caller should be holding queue_lock.
272 */
273 static inline void blkg_put(struct blkcg_gq *blkg)
274 {
275 lockdep_assert_held(blkg->q->queue_lock);
276 WARN_ON_ONCE(blkg->refcnt <= 0);
277 if (!--blkg->refcnt)
278 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
279 }
280
281 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
282 bool update_hint);
283
284 /**
285 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
286 * @d_blkg: loop cursor pointing to the current descendant
287 * @pos_css: used for iteration
288 * @p_blkg: target blkg to walk descendants of
289 *
290 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
291 * read locked. If called under either blkcg or queue lock, the iteration
292 * is guaranteed to include all and only online blkgs. The caller may
293 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
294 */
295 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
296 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
297 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
298 (p_blkg)->q, false)))
299
300 /**
301 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
302 * @d_blkg: loop cursor pointing to the current descendant
303 * @pos_css: used for iteration
304 * @p_blkg: target blkg to walk descendants of
305 *
306 * Similar to blkg_for_each_descendant_pre() but performs post-order
307 * traversal instead. Synchronization rules are the same.
308 */
309 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
310 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
311 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
312 (p_blkg)->q, false)))
313
314 /**
315 * blk_get_rl - get request_list to use
316 * @q: request_queue of interest
317 * @bio: bio which will be attached to the allocated request (may be %NULL)
318 *
319 * The caller wants to allocate a request from @q to use for @bio. Find
320 * the request_list to use and obtain a reference on it. Should be called
321 * under queue_lock. This function is guaranteed to return non-%NULL
322 * request_list.
323 */
324 static inline struct request_list *blk_get_rl(struct request_queue *q,
325 struct bio *bio)
326 {
327 struct blkcg *blkcg;
328 struct blkcg_gq *blkg;
329
330 rcu_read_lock();
331
332 blkcg = bio_blkcg(bio);
333
334 /* bypass blkg lookup and use @q->root_rl directly for root */
335 if (blkcg == &blkcg_root)
336 goto root_rl;
337
338 /*
339 * Try to use blkg->rl. blkg lookup may fail under memory pressure
340 * or if either the blkcg or queue is going away. Fall back to
341 * root_rl in such cases.
342 */
343 blkg = blkg_lookup_create(blkcg, q);
344 if (unlikely(IS_ERR(blkg)))
345 goto root_rl;
346
347 blkg_get(blkg);
348 rcu_read_unlock();
349 return &blkg->rl;
350 root_rl:
351 rcu_read_unlock();
352 return &q->root_rl;
353 }
354
355 /**
356 * blk_put_rl - put request_list
357 * @rl: request_list to put
358 *
359 * Put the reference acquired by blk_get_rl(). Should be called under
360 * queue_lock.
361 */
362 static inline void blk_put_rl(struct request_list *rl)
363 {
364 /* root_rl may not have blkg set */
365 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
366 blkg_put(rl->blkg);
367 }
368
369 /**
370 * blk_rq_set_rl - associate a request with a request_list
371 * @rq: request of interest
372 * @rl: target request_list
373 *
374 * Associate @rq with @rl so that accounting and freeing can know the
375 * request_list @rq came from.
376 */
377 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
378 {
379 rq->rl = rl;
380 }
381
382 /**
383 * blk_rq_rl - return the request_list a request came from
384 * @rq: request of interest
385 *
386 * Return the request_list @rq is allocated from.
387 */
388 static inline struct request_list *blk_rq_rl(struct request *rq)
389 {
390 return rq->rl;
391 }
392
393 struct request_list *__blk_queue_next_rl(struct request_list *rl,
394 struct request_queue *q);
395 /**
396 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
397 *
398 * Should be used under queue_lock.
399 */
400 #define blk_queue_for_each_rl(rl, q) \
401 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
402
403 /**
404 * blkg_stat_add - add a value to a blkg_stat
405 * @stat: target blkg_stat
406 * @val: value to add
407 *
408 * Add @val to @stat. The caller is responsible for synchronizing calls to
409 * this function.
410 */
411 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
412 {
413 u64_stats_update_begin(&stat->syncp);
414 stat->cnt += val;
415 u64_stats_update_end(&stat->syncp);
416 }
417
418 /**
419 * blkg_stat_read - read the current value of a blkg_stat
420 * @stat: blkg_stat to read
421 *
422 * Read the current value of @stat. This function can be called without
423 * synchroniztion and takes care of u64 atomicity.
424 */
425 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
426 {
427 unsigned int start;
428 uint64_t v;
429
430 do {
431 start = u64_stats_fetch_begin(&stat->syncp);
432 v = stat->cnt;
433 } while (u64_stats_fetch_retry(&stat->syncp, start));
434
435 return v;
436 }
437
438 /**
439 * blkg_stat_reset - reset a blkg_stat
440 * @stat: blkg_stat to reset
441 */
442 static inline void blkg_stat_reset(struct blkg_stat *stat)
443 {
444 stat->cnt = 0;
445 }
446
447 /**
448 * blkg_stat_merge - merge a blkg_stat into another
449 * @to: the destination blkg_stat
450 * @from: the source
451 *
452 * Add @from's count to @to.
453 */
454 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
455 {
456 blkg_stat_add(to, blkg_stat_read(from));
457 }
458
459 /**
460 * blkg_rwstat_add - add a value to a blkg_rwstat
461 * @rwstat: target blkg_rwstat
462 * @rw: mask of REQ_{WRITE|SYNC}
463 * @val: value to add
464 *
465 * Add @val to @rwstat. The counters are chosen according to @rw. The
466 * caller is responsible for synchronizing calls to this function.
467 */
468 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
469 int rw, uint64_t val)
470 {
471 u64_stats_update_begin(&rwstat->syncp);
472
473 if (rw & REQ_WRITE)
474 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
475 else
476 rwstat->cnt[BLKG_RWSTAT_READ] += val;
477 if (rw & REQ_SYNC)
478 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
479 else
480 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
481
482 u64_stats_update_end(&rwstat->syncp);
483 }
484
485 /**
486 * blkg_rwstat_read - read the current values of a blkg_rwstat
487 * @rwstat: blkg_rwstat to read
488 *
489 * Read the current snapshot of @rwstat and return it as the return value.
490 * This function can be called without synchronization and takes care of
491 * u64 atomicity.
492 */
493 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
494 {
495 unsigned int start;
496 struct blkg_rwstat tmp;
497
498 do {
499 start = u64_stats_fetch_begin(&rwstat->syncp);
500 tmp = *rwstat;
501 } while (u64_stats_fetch_retry(&rwstat->syncp, start));
502
503 return tmp;
504 }
505
506 /**
507 * blkg_rwstat_total - read the total count of a blkg_rwstat
508 * @rwstat: blkg_rwstat to read
509 *
510 * Return the total count of @rwstat regardless of the IO direction. This
511 * function can be called without synchronization and takes care of u64
512 * atomicity.
513 */
514 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
515 {
516 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
517
518 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
519 }
520
521 /**
522 * blkg_rwstat_reset - reset a blkg_rwstat
523 * @rwstat: blkg_rwstat to reset
524 */
525 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
526 {
527 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
528 }
529
530 /**
531 * blkg_rwstat_merge - merge a blkg_rwstat into another
532 * @to: the destination blkg_rwstat
533 * @from: the source
534 *
535 * Add @from's counts to @to.
536 */
537 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
538 struct blkg_rwstat *from)
539 {
540 struct blkg_rwstat v = blkg_rwstat_read(from);
541 int i;
542
543 u64_stats_update_begin(&to->syncp);
544 for (i = 0; i < BLKG_RWSTAT_NR; i++)
545 to->cnt[i] += v.cnt[i];
546 u64_stats_update_end(&to->syncp);
547 }
548
549 #else /* CONFIG_BLK_CGROUP */
550
551 struct cgroup;
552 struct blkcg;
553
554 struct blkg_policy_data {
555 };
556
557 struct blkcg_gq {
558 };
559
560 struct blkcg_policy {
561 };
562
563 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
564 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
565 static inline void blkcg_drain_queue(struct request_queue *q) { }
566 static inline void blkcg_exit_queue(struct request_queue *q) { }
567 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
568 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
569 static inline int blkcg_activate_policy(struct request_queue *q,
570 const struct blkcg_policy *pol) { return 0; }
571 static inline void blkcg_deactivate_policy(struct request_queue *q,
572 const struct blkcg_policy *pol) { }
573
574 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
575
576 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
577 struct blkcg_policy *pol) { return NULL; }
578 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
579 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
580 static inline void blkg_get(struct blkcg_gq *blkg) { }
581 static inline void blkg_put(struct blkcg_gq *blkg) { }
582
583 static inline struct request_list *blk_get_rl(struct request_queue *q,
584 struct bio *bio) { return &q->root_rl; }
585 static inline void blk_put_rl(struct request_list *rl) { }
586 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
587 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
588
589 #define blk_queue_for_each_rl(rl, q) \
590 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
591
592 #endif /* CONFIG_BLK_CGROUP */
593 #endif /* _BLK_CGROUP_H */
This page took 0.043325 seconds and 5 git commands to generate.