blkcg: restructure blkg_policy_data allocation in blkcg_activate_policy()
[deliverable/linux.git] / include / linux / blk-cgroup.h
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
22
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX UINT_MAX
25
26 #ifdef CONFIG_BLK_CGROUP
27
28 enum blkg_rwstat_type {
29 BLKG_RWSTAT_READ,
30 BLKG_RWSTAT_WRITE,
31 BLKG_RWSTAT_SYNC,
32 BLKG_RWSTAT_ASYNC,
33
34 BLKG_RWSTAT_NR,
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36 };
37
38 struct blkcg_gq;
39
40 struct blkcg {
41 struct cgroup_subsys_state css;
42 spinlock_t lock;
43
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list;
47
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
49
50 struct list_head all_blkcgs_node;
51 #ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list;
53 #endif
54 };
55
56 struct blkg_stat {
57 struct u64_stats_sync syncp;
58 uint64_t cnt;
59 };
60
61 struct blkg_rwstat {
62 struct u64_stats_sync syncp;
63 uint64_t cnt[BLKG_RWSTAT_NR];
64 };
65
66 /*
67 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair.
70 *
71 * There can be multiple active blkcg policies and each has its private
72 * data on each blkg, the size of which is determined by
73 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
74 * together with blkg and invokes pd_init/exit_fn() methods.
75 *
76 * Such private data must embed struct blkg_policy_data (pd) at the
77 * beginning and pd_size can't be smaller than pd.
78 */
79 struct blkg_policy_data {
80 /* the blkg and policy id this per-policy data belongs to */
81 struct blkcg_gq *blkg;
82 int plid;
83 };
84
85 /*
86 * Policies that need to keep per-blkcg data which is independent
87 * from any request_queue associated to it must specify its size
88 * with the cpd_size field of the blkcg_policy structure and
89 * embed a blkcg_policy_data in it. cpd_init() is invoked to let
90 * each policy handle per-blkcg data.
91 */
92 struct blkcg_policy_data {
93 /* the policy id this per-policy data belongs to */
94 int plid;
95 };
96
97 /* association between a blk cgroup and a request queue */
98 struct blkcg_gq {
99 /* Pointer to the associated request_queue */
100 struct request_queue *q;
101 struct list_head q_node;
102 struct hlist_node blkcg_node;
103 struct blkcg *blkcg;
104
105 /*
106 * Each blkg gets congested separately and the congestion state is
107 * propagated to the matching bdi_writeback_congested.
108 */
109 struct bdi_writeback_congested *wb_congested;
110
111 /* all non-root blkcg_gq's are guaranteed to have access to parent */
112 struct blkcg_gq *parent;
113
114 /* request allocation list for this blkcg-q pair */
115 struct request_list rl;
116
117 /* reference count */
118 atomic_t refcnt;
119
120 /* is this blkg online? protected by both blkcg and q locks */
121 bool online;
122
123 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
124
125 struct rcu_head rcu_head;
126 };
127
128 typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
129 typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
130 typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
131 typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
132 typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
133 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
134
135 struct blkcg_policy {
136 int plid;
137 /* policy specific private data size */
138 size_t pd_size;
139 /* policy specific per-blkcg data size */
140 size_t cpd_size;
141 /* cgroup files for the policy */
142 struct cftype *cftypes;
143
144 /* operations */
145 blkcg_pol_init_cpd_fn *cpd_init_fn;
146 blkcg_pol_init_pd_fn *pd_init_fn;
147 blkcg_pol_online_pd_fn *pd_online_fn;
148 blkcg_pol_offline_pd_fn *pd_offline_fn;
149 blkcg_pol_exit_pd_fn *pd_exit_fn;
150 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
151 };
152
153 extern struct blkcg blkcg_root;
154 extern struct cgroup_subsys_state * const blkcg_root_css;
155
156 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
157 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
158 struct request_queue *q);
159 int blkcg_init_queue(struct request_queue *q);
160 void blkcg_drain_queue(struct request_queue *q);
161 void blkcg_exit_queue(struct request_queue *q);
162
163 /* Blkio controller policy registration */
164 int blkcg_policy_register(struct blkcg_policy *pol);
165 void blkcg_policy_unregister(struct blkcg_policy *pol);
166 int blkcg_activate_policy(struct request_queue *q,
167 const struct blkcg_policy *pol);
168 void blkcg_deactivate_policy(struct request_queue *q,
169 const struct blkcg_policy *pol);
170
171 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
172 u64 (*prfill)(struct seq_file *,
173 struct blkg_policy_data *, int),
174 const struct blkcg_policy *pol, int data,
175 bool show_total);
176 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
177 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
178 const struct blkg_rwstat *rwstat);
179 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
180 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
181 int off);
182
183 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
184 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
185 int off);
186
187 struct blkg_conf_ctx {
188 struct gendisk *disk;
189 struct blkcg_gq *blkg;
190 u64 v;
191 };
192
193 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
194 const char *input, struct blkg_conf_ctx *ctx);
195 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
196
197
198 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
199 {
200 return css ? container_of(css, struct blkcg, css) : NULL;
201 }
202
203 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
204 {
205 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
206 }
207
208 static inline struct blkcg *bio_blkcg(struct bio *bio)
209 {
210 if (bio && bio->bi_css)
211 return css_to_blkcg(bio->bi_css);
212 return task_blkcg(current);
213 }
214
215 static inline struct cgroup_subsys_state *
216 task_get_blkcg_css(struct task_struct *task)
217 {
218 return task_get_css(task, blkio_cgrp_id);
219 }
220
221 /**
222 * blkcg_parent - get the parent of a blkcg
223 * @blkcg: blkcg of interest
224 *
225 * Return the parent blkcg of @blkcg. Can be called anytime.
226 */
227 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
228 {
229 return css_to_blkcg(blkcg->css.parent);
230 }
231
232 /**
233 * blkg_to_pdata - get policy private data
234 * @blkg: blkg of interest
235 * @pol: policy of interest
236 *
237 * Return pointer to private data associated with the @blkg-@pol pair.
238 */
239 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
240 struct blkcg_policy *pol)
241 {
242 return blkg ? blkg->pd[pol->plid] : NULL;
243 }
244
245 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
246 struct blkcg_policy *pol)
247 {
248 return blkcg ? blkcg->pd[pol->plid] : NULL;
249 }
250
251 /**
252 * pdata_to_blkg - get blkg associated with policy private data
253 * @pd: policy private data of interest
254 *
255 * @pd is policy private data. Determine the blkg it's associated with.
256 */
257 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
258 {
259 return pd ? pd->blkg : NULL;
260 }
261
262 /**
263 * blkg_path - format cgroup path of blkg
264 * @blkg: blkg of interest
265 * @buf: target buffer
266 * @buflen: target buffer length
267 *
268 * Format the path of the cgroup of @blkg into @buf.
269 */
270 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
271 {
272 char *p;
273
274 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
275 if (!p) {
276 strncpy(buf, "<unavailable>", buflen);
277 return -ENAMETOOLONG;
278 }
279
280 memmove(buf, p, buf + buflen - p);
281 return 0;
282 }
283
284 /**
285 * blkg_get - get a blkg reference
286 * @blkg: blkg to get
287 *
288 * The caller should be holding an existing reference.
289 */
290 static inline void blkg_get(struct blkcg_gq *blkg)
291 {
292 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
293 atomic_inc(&blkg->refcnt);
294 }
295
296 void __blkg_release_rcu(struct rcu_head *rcu);
297
298 /**
299 * blkg_put - put a blkg reference
300 * @blkg: blkg to put
301 */
302 static inline void blkg_put(struct blkcg_gq *blkg)
303 {
304 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
305 if (atomic_dec_and_test(&blkg->refcnt))
306 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
307 }
308
309 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
310 bool update_hint);
311
312 /**
313 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
314 * @d_blkg: loop cursor pointing to the current descendant
315 * @pos_css: used for iteration
316 * @p_blkg: target blkg to walk descendants of
317 *
318 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
319 * read locked. If called under either blkcg or queue lock, the iteration
320 * is guaranteed to include all and only online blkgs. The caller may
321 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
322 * @p_blkg is included in the iteration and the first node to be visited.
323 */
324 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
325 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
326 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
327 (p_blkg)->q, false)))
328
329 /**
330 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
331 * @d_blkg: loop cursor pointing to the current descendant
332 * @pos_css: used for iteration
333 * @p_blkg: target blkg to walk descendants of
334 *
335 * Similar to blkg_for_each_descendant_pre() but performs post-order
336 * traversal instead. Synchronization rules are the same. @p_blkg is
337 * included in the iteration and the last node to be visited.
338 */
339 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
340 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
341 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
342 (p_blkg)->q, false)))
343
344 /**
345 * blk_get_rl - get request_list to use
346 * @q: request_queue of interest
347 * @bio: bio which will be attached to the allocated request (may be %NULL)
348 *
349 * The caller wants to allocate a request from @q to use for @bio. Find
350 * the request_list to use and obtain a reference on it. Should be called
351 * under queue_lock. This function is guaranteed to return non-%NULL
352 * request_list.
353 */
354 static inline struct request_list *blk_get_rl(struct request_queue *q,
355 struct bio *bio)
356 {
357 struct blkcg *blkcg;
358 struct blkcg_gq *blkg;
359
360 rcu_read_lock();
361
362 blkcg = bio_blkcg(bio);
363
364 /* bypass blkg lookup and use @q->root_rl directly for root */
365 if (blkcg == &blkcg_root)
366 goto root_rl;
367
368 /*
369 * Try to use blkg->rl. blkg lookup may fail under memory pressure
370 * or if either the blkcg or queue is going away. Fall back to
371 * root_rl in such cases.
372 */
373 blkg = blkg_lookup_create(blkcg, q);
374 if (unlikely(IS_ERR(blkg)))
375 goto root_rl;
376
377 blkg_get(blkg);
378 rcu_read_unlock();
379 return &blkg->rl;
380 root_rl:
381 rcu_read_unlock();
382 return &q->root_rl;
383 }
384
385 /**
386 * blk_put_rl - put request_list
387 * @rl: request_list to put
388 *
389 * Put the reference acquired by blk_get_rl(). Should be called under
390 * queue_lock.
391 */
392 static inline void blk_put_rl(struct request_list *rl)
393 {
394 if (rl->blkg->blkcg != &blkcg_root)
395 blkg_put(rl->blkg);
396 }
397
398 /**
399 * blk_rq_set_rl - associate a request with a request_list
400 * @rq: request of interest
401 * @rl: target request_list
402 *
403 * Associate @rq with @rl so that accounting and freeing can know the
404 * request_list @rq came from.
405 */
406 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
407 {
408 rq->rl = rl;
409 }
410
411 /**
412 * blk_rq_rl - return the request_list a request came from
413 * @rq: request of interest
414 *
415 * Return the request_list @rq is allocated from.
416 */
417 static inline struct request_list *blk_rq_rl(struct request *rq)
418 {
419 return rq->rl;
420 }
421
422 struct request_list *__blk_queue_next_rl(struct request_list *rl,
423 struct request_queue *q);
424 /**
425 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
426 *
427 * Should be used under queue_lock.
428 */
429 #define blk_queue_for_each_rl(rl, q) \
430 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
431
432 static inline void blkg_stat_init(struct blkg_stat *stat)
433 {
434 u64_stats_init(&stat->syncp);
435 }
436
437 /**
438 * blkg_stat_add - add a value to a blkg_stat
439 * @stat: target blkg_stat
440 * @val: value to add
441 *
442 * Add @val to @stat. The caller is responsible for synchronizing calls to
443 * this function.
444 */
445 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
446 {
447 u64_stats_update_begin(&stat->syncp);
448 stat->cnt += val;
449 u64_stats_update_end(&stat->syncp);
450 }
451
452 /**
453 * blkg_stat_read - read the current value of a blkg_stat
454 * @stat: blkg_stat to read
455 *
456 * Read the current value of @stat. This function can be called without
457 * synchroniztion and takes care of u64 atomicity.
458 */
459 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
460 {
461 unsigned int start;
462 uint64_t v;
463
464 do {
465 start = u64_stats_fetch_begin_irq(&stat->syncp);
466 v = stat->cnt;
467 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
468
469 return v;
470 }
471
472 /**
473 * blkg_stat_reset - reset a blkg_stat
474 * @stat: blkg_stat to reset
475 */
476 static inline void blkg_stat_reset(struct blkg_stat *stat)
477 {
478 stat->cnt = 0;
479 }
480
481 /**
482 * blkg_stat_merge - merge a blkg_stat into another
483 * @to: the destination blkg_stat
484 * @from: the source
485 *
486 * Add @from's count to @to.
487 */
488 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
489 {
490 blkg_stat_add(to, blkg_stat_read(from));
491 }
492
493 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
494 {
495 u64_stats_init(&rwstat->syncp);
496 }
497
498 /**
499 * blkg_rwstat_add - add a value to a blkg_rwstat
500 * @rwstat: target blkg_rwstat
501 * @rw: mask of REQ_{WRITE|SYNC}
502 * @val: value to add
503 *
504 * Add @val to @rwstat. The counters are chosen according to @rw. The
505 * caller is responsible for synchronizing calls to this function.
506 */
507 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
508 int rw, uint64_t val)
509 {
510 u64_stats_update_begin(&rwstat->syncp);
511
512 if (rw & REQ_WRITE)
513 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
514 else
515 rwstat->cnt[BLKG_RWSTAT_READ] += val;
516 if (rw & REQ_SYNC)
517 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
518 else
519 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
520
521 u64_stats_update_end(&rwstat->syncp);
522 }
523
524 /**
525 * blkg_rwstat_read - read the current values of a blkg_rwstat
526 * @rwstat: blkg_rwstat to read
527 *
528 * Read the current snapshot of @rwstat and return it as the return value.
529 * This function can be called without synchronization and takes care of
530 * u64 atomicity.
531 */
532 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
533 {
534 unsigned int start;
535 struct blkg_rwstat tmp;
536
537 do {
538 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
539 tmp = *rwstat;
540 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
541
542 return tmp;
543 }
544
545 /**
546 * blkg_rwstat_total - read the total count of a blkg_rwstat
547 * @rwstat: blkg_rwstat to read
548 *
549 * Return the total count of @rwstat regardless of the IO direction. This
550 * function can be called without synchronization and takes care of u64
551 * atomicity.
552 */
553 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
554 {
555 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
556
557 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
558 }
559
560 /**
561 * blkg_rwstat_reset - reset a blkg_rwstat
562 * @rwstat: blkg_rwstat to reset
563 */
564 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
565 {
566 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
567 }
568
569 /**
570 * blkg_rwstat_merge - merge a blkg_rwstat into another
571 * @to: the destination blkg_rwstat
572 * @from: the source
573 *
574 * Add @from's counts to @to.
575 */
576 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
577 struct blkg_rwstat *from)
578 {
579 struct blkg_rwstat v = blkg_rwstat_read(from);
580 int i;
581
582 u64_stats_update_begin(&to->syncp);
583 for (i = 0; i < BLKG_RWSTAT_NR; i++)
584 to->cnt[i] += v.cnt[i];
585 u64_stats_update_end(&to->syncp);
586 }
587
588 #else /* CONFIG_BLK_CGROUP */
589
590 struct blkcg {
591 };
592
593 struct blkg_policy_data {
594 };
595
596 struct blkcg_policy_data {
597 };
598
599 struct blkcg_gq {
600 };
601
602 struct blkcg_policy {
603 };
604
605 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
606
607 static inline struct cgroup_subsys_state *
608 task_get_blkcg_css(struct task_struct *task)
609 {
610 return NULL;
611 }
612
613 #ifdef CONFIG_BLOCK
614
615 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
616 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
617 static inline void blkcg_drain_queue(struct request_queue *q) { }
618 static inline void blkcg_exit_queue(struct request_queue *q) { }
619 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
620 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
621 static inline int blkcg_activate_policy(struct request_queue *q,
622 const struct blkcg_policy *pol) { return 0; }
623 static inline void blkcg_deactivate_policy(struct request_queue *q,
624 const struct blkcg_policy *pol) { }
625
626 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
627
628 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
629 struct blkcg_policy *pol) { return NULL; }
630 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
631 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
632 static inline void blkg_get(struct blkcg_gq *blkg) { }
633 static inline void blkg_put(struct blkcg_gq *blkg) { }
634
635 static inline struct request_list *blk_get_rl(struct request_queue *q,
636 struct bio *bio) { return &q->root_rl; }
637 static inline void blk_put_rl(struct request_list *rl) { }
638 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
639 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
640
641 #define blk_queue_for_each_rl(rl, q) \
642 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
643
644 #endif /* CONFIG_BLOCK */
645 #endif /* CONFIG_BLK_CGROUP */
646 #endif /* _BLK_CGROUP_H */
This page took 0.044182 seconds and 5 git commands to generate.