blkcg: fix blkcg_policy_data allocation bug
[deliverable/linux.git] / include / linux / blk-cgroup.h
CommitLineData
31e4c28d
VG
1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
575969a0 17#include <linux/u64_stats_sync.h>
829fdb50 18#include <linux/seq_file.h>
a637120e 19#include <linux/radix-tree.h>
a051661c 20#include <linux/blkdev.h>
a5049a8a 21#include <linux/atomic.h>
31e4c28d 22
9355aede
VG
23/* Max limits for throttle policy */
24#define THROTL_IOPS_MAX UINT_MAX
25
f48ec1d7
TH
26#ifdef CONFIG_BLK_CGROUP
27
edcb0722
TH
28enum blkg_rwstat_type {
29 BLKG_RWSTAT_READ,
30 BLKG_RWSTAT_WRITE,
31 BLKG_RWSTAT_SYNC,
32 BLKG_RWSTAT_ASYNC,
33
34 BLKG_RWSTAT_NR,
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
303a3acb
DS
36};
37
a637120e
TH
38struct blkcg_gq;
39
3c798398 40struct blkcg {
36558c8a
TH
41 struct cgroup_subsys_state css;
42 spinlock_t lock;
a637120e
TH
43
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
36558c8a 46 struct hlist_head blkg_list;
9a9e8a26 47
e48453c3 48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
52ebea74 49
7876f930 50 struct list_head all_blkcgs_node;
52ebea74
TH
51#ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list;
53#endif
31e4c28d
VG
54};
55
edcb0722
TH
56struct blkg_stat {
57 struct u64_stats_sync syncp;
58 uint64_t cnt;
59};
60
61struct blkg_rwstat {
62 struct u64_stats_sync syncp;
63 uint64_t cnt[BLKG_RWSTAT_NR];
64};
65
f95a04af
TH
66/*
67 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair.
70 *
71 * There can be multiple active blkcg policies and each has its private
72 * data on each blkg, the size of which is determined by
73 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
74 * together with blkg and invokes pd_init/exit_fn() methods.
75 *
76 * Such private data must embed struct blkg_policy_data (pd) at the
77 * beginning and pd_size can't be smaller than pd.
78 */
0381411e 79struct blkg_policy_data {
b276a876 80 /* the blkg and policy id this per-policy data belongs to */
3c798398 81 struct blkcg_gq *blkg;
b276a876 82 int plid;
0381411e 83
a2b1693b 84 /* used during policy activation */
36558c8a 85 struct list_head alloc_node;
0381411e
TH
86};
87
e48453c3
AA
88/*
89 * Policies that need to keep per-blkcg data which is independent
90 * from any request_queue associated to it must specify its size
91 * with the cpd_size field of the blkcg_policy structure and
06b285bd
TH
92 * embed a blkcg_policy_data in it. cpd_init() is invoked to let
93 * each policy handle per-blkcg data.
e48453c3
AA
94 */
95struct blkcg_policy_data {
96 /* the policy id this per-policy data belongs to */
97 int plid;
e48453c3
AA
98};
99
3c798398
TH
100/* association between a blk cgroup and a request queue */
101struct blkcg_gq {
c875f4d0 102 /* Pointer to the associated request_queue */
36558c8a
TH
103 struct request_queue *q;
104 struct list_head q_node;
105 struct hlist_node blkcg_node;
3c798398 106 struct blkcg *blkcg;
3c547865 107
ce7acfea
TH
108 /*
109 * Each blkg gets congested separately and the congestion state is
110 * propagated to the matching bdi_writeback_congested.
111 */
112 struct bdi_writeback_congested *wb_congested;
113
3c547865
TH
114 /* all non-root blkcg_gq's are guaranteed to have access to parent */
115 struct blkcg_gq *parent;
116
a051661c
TH
117 /* request allocation list for this blkcg-q pair */
118 struct request_list rl;
3c547865 119
1adaf3dd 120 /* reference count */
a5049a8a 121 atomic_t refcnt;
22084190 122
f427d909
TH
123 /* is this blkg online? protected by both blkcg and q locks */
124 bool online;
125
36558c8a 126 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
1adaf3dd 127
36558c8a 128 struct rcu_head rcu_head;
31e4c28d
VG
129};
130
e48453c3 131typedef void (blkcg_pol_init_cpd_fn)(const struct blkcg *blkcg);
3c798398 132typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
f427d909
TH
133typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
134typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
3c798398
TH
135typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
136typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
3e252066 137
3c798398 138struct blkcg_policy {
36558c8a
TH
139 int plid;
140 /* policy specific private data size */
f95a04af 141 size_t pd_size;
e48453c3
AA
142 /* policy specific per-blkcg data size */
143 size_t cpd_size;
36558c8a
TH
144 /* cgroup files for the policy */
145 struct cftype *cftypes;
f9fcc2d3
TH
146
147 /* operations */
e48453c3 148 blkcg_pol_init_cpd_fn *cpd_init_fn;
f9fcc2d3 149 blkcg_pol_init_pd_fn *pd_init_fn;
f427d909
TH
150 blkcg_pol_online_pd_fn *pd_online_fn;
151 blkcg_pol_offline_pd_fn *pd_offline_fn;
f9fcc2d3
TH
152 blkcg_pol_exit_pd_fn *pd_exit_fn;
153 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
3e252066
VG
154};
155
3c798398 156extern struct blkcg blkcg_root;
496d5e75 157extern struct cgroup_subsys_state * const blkcg_root_css;
36558c8a 158
3c798398
TH
159struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
160struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
161 struct request_queue *q);
36558c8a
TH
162int blkcg_init_queue(struct request_queue *q);
163void blkcg_drain_queue(struct request_queue *q);
164void blkcg_exit_queue(struct request_queue *q);
5efd6113 165
3e252066 166/* Blkio controller policy registration */
d5bf0291 167int blkcg_policy_register(struct blkcg_policy *pol);
3c798398 168void blkcg_policy_unregister(struct blkcg_policy *pol);
36558c8a 169int blkcg_activate_policy(struct request_queue *q,
3c798398 170 const struct blkcg_policy *pol);
36558c8a 171void blkcg_deactivate_policy(struct request_queue *q,
3c798398 172 const struct blkcg_policy *pol);
3e252066 173
3c798398 174void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
175 u64 (*prfill)(struct seq_file *,
176 struct blkg_policy_data *, int),
3c798398 177 const struct blkcg_policy *pol, int data,
ec399347 178 bool show_total);
f95a04af
TH
179u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
180u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb50 181 const struct blkg_rwstat *rwstat);
f95a04af
TH
182u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
183u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
184 int off);
829fdb50 185
16b3de66
TH
186u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
187struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
188 int off);
189
829fdb50 190struct blkg_conf_ctx {
36558c8a 191 struct gendisk *disk;
3c798398 192 struct blkcg_gq *blkg;
36558c8a 193 u64 v;
829fdb50
TH
194};
195
3c798398
TH
196int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
197 const char *input, struct blkg_conf_ctx *ctx);
829fdb50
TH
198void blkg_conf_finish(struct blkg_conf_ctx *ctx);
199
200
a7c6d554
TH
201static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
202{
203 return css ? container_of(css, struct blkcg, css) : NULL;
204}
205
b1208b56
TH
206static inline struct blkcg *task_blkcg(struct task_struct *tsk)
207{
073219e9 208 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
b1208b56
TH
209}
210
211static inline struct blkcg *bio_blkcg(struct bio *bio)
212{
213 if (bio && bio->bi_css)
a7c6d554 214 return css_to_blkcg(bio->bi_css);
b1208b56
TH
215 return task_blkcg(current);
216}
217
fd383c2d
TH
218static inline struct cgroup_subsys_state *
219task_get_blkcg_css(struct task_struct *task)
220{
221 return task_get_css(task, blkio_cgrp_id);
222}
223
3c547865
TH
224/**
225 * blkcg_parent - get the parent of a blkcg
226 * @blkcg: blkcg of interest
227 *
228 * Return the parent blkcg of @blkcg. Can be called anytime.
229 */
230static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
231{
5c9d535b 232 return css_to_blkcg(blkcg->css.parent);
3c547865
TH
233}
234
0381411e
TH
235/**
236 * blkg_to_pdata - get policy private data
237 * @blkg: blkg of interest
238 * @pol: policy of interest
239 *
240 * Return pointer to private data associated with the @blkg-@pol pair.
241 */
f95a04af
TH
242static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
243 struct blkcg_policy *pol)
0381411e 244{
f95a04af 245 return blkg ? blkg->pd[pol->plid] : NULL;
0381411e
TH
246}
247
e48453c3
AA
248static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
249 struct blkcg_policy *pol)
250{
251 return blkcg ? blkcg->pd[pol->plid] : NULL;
252}
253
0381411e
TH
254/**
255 * pdata_to_blkg - get blkg associated with policy private data
f95a04af 256 * @pd: policy private data of interest
0381411e 257 *
f95a04af 258 * @pd is policy private data. Determine the blkg it's associated with.
0381411e 259 */
f95a04af 260static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
0381411e 261{
f95a04af 262 return pd ? pd->blkg : NULL;
0381411e
TH
263}
264
54e7ed12
TH
265/**
266 * blkg_path - format cgroup path of blkg
267 * @blkg: blkg of interest
268 * @buf: target buffer
269 * @buflen: target buffer length
270 *
271 * Format the path of the cgroup of @blkg into @buf.
272 */
3c798398 273static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
afc24d49 274{
e61734c5 275 char *p;
54e7ed12 276
e61734c5
TH
277 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
278 if (!p) {
54e7ed12 279 strncpy(buf, "<unavailable>", buflen);
e61734c5
TH
280 return -ENAMETOOLONG;
281 }
282
283 memmove(buf, p, buf + buflen - p);
284 return 0;
afc24d49
VG
285}
286
1adaf3dd
TH
287/**
288 * blkg_get - get a blkg reference
289 * @blkg: blkg to get
290 *
a5049a8a 291 * The caller should be holding an existing reference.
1adaf3dd 292 */
3c798398 293static inline void blkg_get(struct blkcg_gq *blkg)
1adaf3dd 294{
a5049a8a
TH
295 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
296 atomic_inc(&blkg->refcnt);
1adaf3dd
TH
297}
298
2a4fd070 299void __blkg_release_rcu(struct rcu_head *rcu);
1adaf3dd
TH
300
301/**
302 * blkg_put - put a blkg reference
303 * @blkg: blkg to put
1adaf3dd 304 */
3c798398 305static inline void blkg_put(struct blkcg_gq *blkg)
1adaf3dd 306{
a5049a8a
TH
307 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
308 if (atomic_dec_and_test(&blkg->refcnt))
2a4fd070 309 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
1adaf3dd
TH
310}
311
dd4a4ffc
TH
312struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
313 bool update_hint);
314
315/**
316 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
317 * @d_blkg: loop cursor pointing to the current descendant
492eb21b 318 * @pos_css: used for iteration
dd4a4ffc
TH
319 * @p_blkg: target blkg to walk descendants of
320 *
321 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
322 * read locked. If called under either blkcg or queue lock, the iteration
323 * is guaranteed to include all and only online blkgs. The caller may
492eb21b 324 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
bd8815a6 325 * @p_blkg is included in the iteration and the first node to be visited.
dd4a4ffc 326 */
492eb21b
TH
327#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
328 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
329 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
dd4a4ffc
TH
330 (p_blkg)->q, false)))
331
aa539cb3
TH
332/**
333 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
334 * @d_blkg: loop cursor pointing to the current descendant
492eb21b 335 * @pos_css: used for iteration
aa539cb3
TH
336 * @p_blkg: target blkg to walk descendants of
337 *
338 * Similar to blkg_for_each_descendant_pre() but performs post-order
bd8815a6
TH
339 * traversal instead. Synchronization rules are the same. @p_blkg is
340 * included in the iteration and the last node to be visited.
aa539cb3 341 */
492eb21b
TH
342#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
343 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
344 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
aa539cb3
TH
345 (p_blkg)->q, false)))
346
a051661c
TH
347/**
348 * blk_get_rl - get request_list to use
349 * @q: request_queue of interest
350 * @bio: bio which will be attached to the allocated request (may be %NULL)
351 *
352 * The caller wants to allocate a request from @q to use for @bio. Find
353 * the request_list to use and obtain a reference on it. Should be called
354 * under queue_lock. This function is guaranteed to return non-%NULL
355 * request_list.
356 */
357static inline struct request_list *blk_get_rl(struct request_queue *q,
358 struct bio *bio)
359{
360 struct blkcg *blkcg;
361 struct blkcg_gq *blkg;
362
363 rcu_read_lock();
364
365 blkcg = bio_blkcg(bio);
366
367 /* bypass blkg lookup and use @q->root_rl directly for root */
368 if (blkcg == &blkcg_root)
369 goto root_rl;
370
371 /*
372 * Try to use blkg->rl. blkg lookup may fail under memory pressure
373 * or if either the blkcg or queue is going away. Fall back to
374 * root_rl in such cases.
375 */
376 blkg = blkg_lookup_create(blkcg, q);
377 if (unlikely(IS_ERR(blkg)))
378 goto root_rl;
379
380 blkg_get(blkg);
381 rcu_read_unlock();
382 return &blkg->rl;
383root_rl:
384 rcu_read_unlock();
385 return &q->root_rl;
386}
387
388/**
389 * blk_put_rl - put request_list
390 * @rl: request_list to put
391 *
392 * Put the reference acquired by blk_get_rl(). Should be called under
393 * queue_lock.
394 */
395static inline void blk_put_rl(struct request_list *rl)
396{
397 /* root_rl may not have blkg set */
398 if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
399 blkg_put(rl->blkg);
400}
401
402/**
403 * blk_rq_set_rl - associate a request with a request_list
404 * @rq: request of interest
405 * @rl: target request_list
406 *
407 * Associate @rq with @rl so that accounting and freeing can know the
408 * request_list @rq came from.
409 */
410static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
411{
412 rq->rl = rl;
413}
414
415/**
416 * blk_rq_rl - return the request_list a request came from
417 * @rq: request of interest
418 *
419 * Return the request_list @rq is allocated from.
420 */
421static inline struct request_list *blk_rq_rl(struct request *rq)
422{
423 return rq->rl;
424}
425
426struct request_list *__blk_queue_next_rl(struct request_list *rl,
427 struct request_queue *q);
428/**
429 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
430 *
431 * Should be used under queue_lock.
432 */
433#define blk_queue_for_each_rl(rl, q) \
434 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
435
90d3839b
PZ
436static inline void blkg_stat_init(struct blkg_stat *stat)
437{
438 u64_stats_init(&stat->syncp);
439}
440
edcb0722
TH
441/**
442 * blkg_stat_add - add a value to a blkg_stat
443 * @stat: target blkg_stat
444 * @val: value to add
445 *
446 * Add @val to @stat. The caller is responsible for synchronizing calls to
447 * this function.
448 */
449static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
450{
451 u64_stats_update_begin(&stat->syncp);
452 stat->cnt += val;
453 u64_stats_update_end(&stat->syncp);
454}
455
456/**
457 * blkg_stat_read - read the current value of a blkg_stat
458 * @stat: blkg_stat to read
459 *
460 * Read the current value of @stat. This function can be called without
461 * synchroniztion and takes care of u64 atomicity.
462 */
463static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
464{
465 unsigned int start;
466 uint64_t v;
467
468 do {
57a7744e 469 start = u64_stats_fetch_begin_irq(&stat->syncp);
edcb0722 470 v = stat->cnt;
57a7744e 471 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
edcb0722
TH
472
473 return v;
474}
475
476/**
477 * blkg_stat_reset - reset a blkg_stat
478 * @stat: blkg_stat to reset
479 */
480static inline void blkg_stat_reset(struct blkg_stat *stat)
481{
482 stat->cnt = 0;
483}
484
16b3de66
TH
485/**
486 * blkg_stat_merge - merge a blkg_stat into another
487 * @to: the destination blkg_stat
488 * @from: the source
489 *
490 * Add @from's count to @to.
491 */
492static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
493{
494 blkg_stat_add(to, blkg_stat_read(from));
495}
496
90d3839b
PZ
497static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
498{
499 u64_stats_init(&rwstat->syncp);
500}
501
edcb0722
TH
502/**
503 * blkg_rwstat_add - add a value to a blkg_rwstat
504 * @rwstat: target blkg_rwstat
505 * @rw: mask of REQ_{WRITE|SYNC}
506 * @val: value to add
507 *
508 * Add @val to @rwstat. The counters are chosen according to @rw. The
509 * caller is responsible for synchronizing calls to this function.
510 */
511static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
512 int rw, uint64_t val)
513{
514 u64_stats_update_begin(&rwstat->syncp);
515
516 if (rw & REQ_WRITE)
517 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
518 else
519 rwstat->cnt[BLKG_RWSTAT_READ] += val;
520 if (rw & REQ_SYNC)
521 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
522 else
523 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
524
525 u64_stats_update_end(&rwstat->syncp);
526}
527
528/**
529 * blkg_rwstat_read - read the current values of a blkg_rwstat
530 * @rwstat: blkg_rwstat to read
531 *
532 * Read the current snapshot of @rwstat and return it as the return value.
533 * This function can be called without synchronization and takes care of
534 * u64 atomicity.
535 */
c94bed89 536static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
edcb0722
TH
537{
538 unsigned int start;
539 struct blkg_rwstat tmp;
540
541 do {
57a7744e 542 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
edcb0722 543 tmp = *rwstat;
57a7744e 544 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
edcb0722
TH
545
546 return tmp;
547}
548
549/**
4d5e80a7 550 * blkg_rwstat_total - read the total count of a blkg_rwstat
edcb0722
TH
551 * @rwstat: blkg_rwstat to read
552 *
553 * Return the total count of @rwstat regardless of the IO direction. This
554 * function can be called without synchronization and takes care of u64
555 * atomicity.
556 */
4d5e80a7 557static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
edcb0722
TH
558{
559 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
560
561 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
562}
563
564/**
565 * blkg_rwstat_reset - reset a blkg_rwstat
566 * @rwstat: blkg_rwstat to reset
567 */
568static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
569{
570 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
571}
572
16b3de66
TH
573/**
574 * blkg_rwstat_merge - merge a blkg_rwstat into another
575 * @to: the destination blkg_rwstat
576 * @from: the source
577 *
578 * Add @from's counts to @to.
579 */
580static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
581 struct blkg_rwstat *from)
582{
583 struct blkg_rwstat v = blkg_rwstat_read(from);
584 int i;
585
586 u64_stats_update_begin(&to->syncp);
587 for (i = 0; i < BLKG_RWSTAT_NR; i++)
588 to->cnt[i] += v.cnt[i];
589 u64_stats_update_end(&to->syncp);
590}
591
36558c8a
TH
592#else /* CONFIG_BLK_CGROUP */
593
efa7d1c7
TH
594struct blkcg {
595};
2f5ea477 596
f95a04af
TH
597struct blkg_policy_data {
598};
599
e48453c3
AA
600struct blkcg_policy_data {
601};
602
3c798398 603struct blkcg_gq {
2f5ea477
JA
604};
605
3c798398 606struct blkcg_policy {
3e252066
VG
607};
608
496d5e75
TH
609#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
610
fd383c2d
TH
611static inline struct cgroup_subsys_state *
612task_get_blkcg_css(struct task_struct *task)
613{
614 return NULL;
615}
616
efa7d1c7
TH
617#ifdef CONFIG_BLOCK
618
3c798398 619static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
5efd6113
TH
620static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
621static inline void blkcg_drain_queue(struct request_queue *q) { }
622static inline void blkcg_exit_queue(struct request_queue *q) { }
d5bf0291 623static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
3c798398 624static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
a2b1693b 625static inline int blkcg_activate_policy(struct request_queue *q,
3c798398 626 const struct blkcg_policy *pol) { return 0; }
a2b1693b 627static inline void blkcg_deactivate_policy(struct request_queue *q,
3c798398
TH
628 const struct blkcg_policy *pol) { }
629
b1208b56 630static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
a051661c 631
f95a04af
TH
632static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
633 struct blkcg_policy *pol) { return NULL; }
634static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
3c798398
TH
635static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
636static inline void blkg_get(struct blkcg_gq *blkg) { }
637static inline void blkg_put(struct blkcg_gq *blkg) { }
afc24d49 638
a051661c
TH
639static inline struct request_list *blk_get_rl(struct request_queue *q,
640 struct bio *bio) { return &q->root_rl; }
641static inline void blk_put_rl(struct request_list *rl) { }
642static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
643static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
644
645#define blk_queue_for_each_rl(rl, q) \
646 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
647
efa7d1c7 648#endif /* CONFIG_BLOCK */
36558c8a
TH
649#endif /* CONFIG_BLK_CGROUP */
650#endif /* _BLK_CGROUP_H */
This page took 0.210348 seconds and 5 git commands to generate.