blkcg: let blkcg core manage per-queue blkg list and counter
[deliverable/linux.git] / block / blk-cgroup.h
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18
19 enum blkio_policy_id {
20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
21 BLKIO_POLICY_THROTL, /* Throttling */
22
23 BLKIO_NR_POLICIES,
24 };
25
26 /* Max limits for throttle policy */
27 #define THROTL_IOPS_MAX UINT_MAX
28
29 #ifdef CONFIG_BLK_CGROUP
30
31 enum stat_type {
32 /* Total time spent (in ns) between request dispatch to the driver and
33 * request completion for IOs doen by this cgroup. This may not be
34 * accurate when NCQ is turned on. */
35 BLKIO_STAT_SERVICE_TIME = 0,
36 /* Total time spent waiting in scheduler queue in ns */
37 BLKIO_STAT_WAIT_TIME,
38 /* Number of IOs queued up */
39 BLKIO_STAT_QUEUED,
40 /* All the single valued stats go below this */
41 BLKIO_STAT_TIME,
42 #ifdef CONFIG_DEBUG_BLK_CGROUP
43 /* Time not charged to this cgroup */
44 BLKIO_STAT_UNACCOUNTED_TIME,
45 BLKIO_STAT_AVG_QUEUE_SIZE,
46 BLKIO_STAT_IDLE_TIME,
47 BLKIO_STAT_EMPTY_TIME,
48 BLKIO_STAT_GROUP_WAIT_TIME,
49 BLKIO_STAT_DEQUEUE
50 #endif
51 };
52
53 /* Per cpu stats */
54 enum stat_type_cpu {
55 BLKIO_STAT_CPU_SECTORS,
56 /* Total bytes transferred */
57 BLKIO_STAT_CPU_SERVICE_BYTES,
58 /* Total IOs serviced, post merge */
59 BLKIO_STAT_CPU_SERVICED,
60 /* Number of IOs merged */
61 BLKIO_STAT_CPU_MERGED,
62 BLKIO_STAT_CPU_NR
63 };
64
65 enum stat_sub_type {
66 BLKIO_STAT_READ = 0,
67 BLKIO_STAT_WRITE,
68 BLKIO_STAT_SYNC,
69 BLKIO_STAT_ASYNC,
70 BLKIO_STAT_TOTAL
71 };
72
73 /* blkg state flags */
74 enum blkg_state_flags {
75 BLKG_waiting = 0,
76 BLKG_idling,
77 BLKG_empty,
78 };
79
80 /* cgroup files owned by proportional weight policy */
81 enum blkcg_file_name_prop {
82 BLKIO_PROP_weight = 1,
83 BLKIO_PROP_weight_device,
84 BLKIO_PROP_io_service_bytes,
85 BLKIO_PROP_io_serviced,
86 BLKIO_PROP_time,
87 BLKIO_PROP_sectors,
88 BLKIO_PROP_unaccounted_time,
89 BLKIO_PROP_io_service_time,
90 BLKIO_PROP_io_wait_time,
91 BLKIO_PROP_io_merged,
92 BLKIO_PROP_io_queued,
93 BLKIO_PROP_avg_queue_size,
94 BLKIO_PROP_group_wait_time,
95 BLKIO_PROP_idle_time,
96 BLKIO_PROP_empty_time,
97 BLKIO_PROP_dequeue,
98 };
99
100 /* cgroup files owned by throttle policy */
101 enum blkcg_file_name_throtl {
102 BLKIO_THROTL_read_bps_device,
103 BLKIO_THROTL_write_bps_device,
104 BLKIO_THROTL_read_iops_device,
105 BLKIO_THROTL_write_iops_device,
106 BLKIO_THROTL_io_service_bytes,
107 BLKIO_THROTL_io_serviced,
108 };
109
110 struct blkio_cgroup {
111 struct cgroup_subsys_state css;
112 unsigned int weight;
113 spinlock_t lock;
114 struct hlist_head blkg_list;
115 };
116
117 struct blkio_group_stats {
118 /* total disk time and nr sectors dispatched by this group */
119 uint64_t time;
120 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
121 #ifdef CONFIG_DEBUG_BLK_CGROUP
122 /* Time not charged to this cgroup */
123 uint64_t unaccounted_time;
124
125 /* Sum of number of IOs queued across all samples */
126 uint64_t avg_queue_size_sum;
127 /* Count of samples taken for average */
128 uint64_t avg_queue_size_samples;
129 /* How many times this group has been removed from service tree */
130 unsigned long dequeue;
131
132 /* Total time spent waiting for it to be assigned a timeslice. */
133 uint64_t group_wait_time;
134 uint64_t start_group_wait_time;
135
136 /* Time spent idling for this blkio_group */
137 uint64_t idle_time;
138 uint64_t start_idle_time;
139 /*
140 * Total time when we have requests queued and do not contain the
141 * current active queue.
142 */
143 uint64_t empty_time;
144 uint64_t start_empty_time;
145 uint16_t flags;
146 #endif
147 };
148
149 /* Per cpu blkio group stats */
150 struct blkio_group_stats_cpu {
151 uint64_t sectors;
152 uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
153 struct u64_stats_sync syncp;
154 };
155
156 struct blkio_group_conf {
157 unsigned int weight;
158 unsigned int iops[2];
159 u64 bps[2];
160 };
161
162 /* per-blkg per-policy data */
163 struct blkg_policy_data {
164 /* the blkg this per-policy data belongs to */
165 struct blkio_group *blkg;
166
167 /* Configuration */
168 struct blkio_group_conf conf;
169
170 struct blkio_group_stats stats;
171 /* Per cpu stats pointer */
172 struct blkio_group_stats_cpu __percpu *stats_cpu;
173
174 /* pol->pdata_size bytes of private data used by policy impl */
175 char pdata[] __aligned(__alignof__(unsigned long long));
176 };
177
178 struct blkio_group {
179 /* Pointer to the associated request_queue, RCU protected */
180 struct request_queue __rcu *q;
181 struct list_head q_node[BLKIO_NR_POLICIES];
182 struct hlist_node blkcg_node;
183 struct blkio_cgroup *blkcg;
184 /* Store cgroup path */
185 char path[128];
186 /* policy which owns this blk group */
187 enum blkio_policy_id plid;
188 /* reference count */
189 int refcnt;
190
191 /* Need to serialize the stats in the case of reset/update */
192 spinlock_t stats_lock;
193 struct blkg_policy_data *pd[BLKIO_NR_POLICIES];
194
195 struct rcu_head rcu_head;
196 };
197
198 typedef void (blkio_init_group_fn)(struct blkio_group *blkg);
199 typedef void (blkio_update_group_weight_fn)(struct request_queue *q,
200 struct blkio_group *blkg, unsigned int weight);
201 typedef void (blkio_update_group_read_bps_fn)(struct request_queue *q,
202 struct blkio_group *blkg, u64 read_bps);
203 typedef void (blkio_update_group_write_bps_fn)(struct request_queue *q,
204 struct blkio_group *blkg, u64 write_bps);
205 typedef void (blkio_update_group_read_iops_fn)(struct request_queue *q,
206 struct blkio_group *blkg, unsigned int read_iops);
207 typedef void (blkio_update_group_write_iops_fn)(struct request_queue *q,
208 struct blkio_group *blkg, unsigned int write_iops);
209
210 struct blkio_policy_ops {
211 blkio_init_group_fn *blkio_init_group_fn;
212 blkio_update_group_weight_fn *blkio_update_group_weight_fn;
213 blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn;
214 blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn;
215 blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn;
216 blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn;
217 };
218
219 struct blkio_policy_type {
220 struct list_head list;
221 struct blkio_policy_ops ops;
222 enum blkio_policy_id plid;
223 size_t pdata_size; /* policy specific private data size */
224 };
225
226 extern int blkcg_init_queue(struct request_queue *q);
227 extern void blkcg_drain_queue(struct request_queue *q);
228 extern void blkcg_exit_queue(struct request_queue *q);
229
230 /* Blkio controller policy registration */
231 extern void blkio_policy_register(struct blkio_policy_type *);
232 extern void blkio_policy_unregister(struct blkio_policy_type *);
233 extern void blkg_destroy_all(struct request_queue *q,
234 enum blkio_policy_id plid, bool destroy_root);
235
236 /**
237 * blkg_to_pdata - get policy private data
238 * @blkg: blkg of interest
239 * @pol: policy of interest
240 *
241 * Return pointer to private data associated with the @blkg-@pol pair.
242 */
243 static inline void *blkg_to_pdata(struct blkio_group *blkg,
244 struct blkio_policy_type *pol)
245 {
246 return blkg ? blkg->pd[pol->plid]->pdata : NULL;
247 }
248
249 /**
250 * pdata_to_blkg - get blkg associated with policy private data
251 * @pdata: policy private data of interest
252 * @pol: policy @pdata is for
253 *
254 * @pdata is policy private data for @pol. Determine the blkg it's
255 * associated with.
256 */
257 static inline struct blkio_group *pdata_to_blkg(void *pdata,
258 struct blkio_policy_type *pol)
259 {
260 if (pdata) {
261 struct blkg_policy_data *pd =
262 container_of(pdata, struct blkg_policy_data, pdata);
263 return pd->blkg;
264 }
265 return NULL;
266 }
267
268 static inline char *blkg_path(struct blkio_group *blkg)
269 {
270 return blkg->path;
271 }
272
273 /**
274 * blkg_get - get a blkg reference
275 * @blkg: blkg to get
276 *
277 * The caller should be holding queue_lock and an existing reference.
278 */
279 static inline void blkg_get(struct blkio_group *blkg)
280 {
281 lockdep_assert_held(blkg->q->queue_lock);
282 WARN_ON_ONCE(!blkg->refcnt);
283 blkg->refcnt++;
284 }
285
286 void __blkg_release(struct blkio_group *blkg);
287
288 /**
289 * blkg_put - put a blkg reference
290 * @blkg: blkg to put
291 *
292 * The caller should be holding queue_lock.
293 */
294 static inline void blkg_put(struct blkio_group *blkg)
295 {
296 lockdep_assert_held(blkg->q->queue_lock);
297 WARN_ON_ONCE(blkg->refcnt <= 0);
298 if (!--blkg->refcnt)
299 __blkg_release(blkg);
300 }
301
302 #else
303
304 struct blkio_group {
305 };
306
307 struct blkio_policy_type {
308 };
309
310 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
311 static inline void blkcg_drain_queue(struct request_queue *q) { }
312 static inline void blkcg_exit_queue(struct request_queue *q) { }
313 static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { }
314 static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { }
315 static inline void blkg_destroy_all(struct request_queue *q,
316 enum blkio_policy_id plid,
317 bool destory_root) { }
318
319 static inline void *blkg_to_pdata(struct blkio_group *blkg,
320 struct blkio_policy_type *pol) { return NULL; }
321 static inline struct blkio_group *pdata_to_blkg(void *pdata,
322 struct blkio_policy_type *pol) { return NULL; }
323 static inline char *blkg_path(struct blkio_group *blkg) { return NULL; }
324 static inline void blkg_get(struct blkio_group *blkg) { }
325 static inline void blkg_put(struct blkio_group *blkg) { }
326
327 #endif
328
329 #define BLKIO_WEIGHT_MIN 10
330 #define BLKIO_WEIGHT_MAX 1000
331 #define BLKIO_WEIGHT_DEFAULT 500
332
333 #ifdef CONFIG_DEBUG_BLK_CGROUP
334 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
335 struct blkio_policy_type *pol);
336 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
337 struct blkio_policy_type *pol,
338 unsigned long dequeue);
339 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
340 struct blkio_policy_type *pol);
341 void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
342 struct blkio_policy_type *pol);
343 void blkiocg_set_start_empty_time(struct blkio_group *blkg,
344 struct blkio_policy_type *pol);
345
346 #define BLKG_FLAG_FNS(name) \
347 static inline void blkio_mark_blkg_##name( \
348 struct blkio_group_stats *stats) \
349 { \
350 stats->flags |= (1 << BLKG_##name); \
351 } \
352 static inline void blkio_clear_blkg_##name( \
353 struct blkio_group_stats *stats) \
354 { \
355 stats->flags &= ~(1 << BLKG_##name); \
356 } \
357 static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
358 { \
359 return (stats->flags & (1 << BLKG_##name)) != 0; \
360 } \
361
362 BLKG_FLAG_FNS(waiting)
363 BLKG_FLAG_FNS(idling)
364 BLKG_FLAG_FNS(empty)
365 #undef BLKG_FLAG_FNS
366 #else
367 static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
368 struct blkio_policy_type *pol) { }
369 static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
370 struct blkio_policy_type *pol, unsigned long dequeue) { }
371 static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
372 struct blkio_policy_type *pol) { }
373 static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
374 struct blkio_policy_type *pol) { }
375 static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
376 struct blkio_policy_type *pol) { }
377 #endif
378
379 #ifdef CONFIG_BLK_CGROUP
380 extern struct blkio_cgroup blkio_root_cgroup;
381 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
382 extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
383 extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
384 extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
385 struct request_queue *q,
386 enum blkio_policy_id plid);
387 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
388 struct request_queue *q,
389 enum blkio_policy_id plid,
390 bool for_root);
391 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
392 struct blkio_policy_type *pol,
393 unsigned long time,
394 unsigned long unaccounted_time);
395 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
396 struct blkio_policy_type *pol,
397 uint64_t bytes, bool direction, bool sync);
398 void blkiocg_update_completion_stats(struct blkio_group *blkg,
399 struct blkio_policy_type *pol,
400 uint64_t start_time,
401 uint64_t io_start_time, bool direction,
402 bool sync);
403 void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
404 struct blkio_policy_type *pol,
405 bool direction, bool sync);
406 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
407 struct blkio_policy_type *pol,
408 struct blkio_group *curr_blkg, bool direction,
409 bool sync);
410 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
411 struct blkio_policy_type *pol,
412 bool direction, bool sync);
413 #else
414 struct cgroup;
415 static inline struct blkio_cgroup *
416 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
417 static inline struct blkio_cgroup *
418 task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
419
420 static inline int
421 blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
422
423 static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
424 void *key) { return NULL; }
425 static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
426 struct blkio_policy_type *pol, unsigned long time,
427 unsigned long unaccounted_time) { }
428 static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
429 struct blkio_policy_type *pol, uint64_t bytes,
430 bool direction, bool sync) { }
431 static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
432 struct blkio_policy_type *pol, uint64_t start_time,
433 uint64_t io_start_time, bool direction, bool sync) { }
434 static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
435 struct blkio_policy_type *pol, bool direction,
436 bool sync) { }
437 static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
438 struct blkio_policy_type *pol,
439 struct blkio_group *curr_blkg, bool direction,
440 bool sync) { }
441 static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
442 struct blkio_policy_type *pol, bool direction,
443 bool sync) { }
444 #endif
445 #endif /* _BLK_CGROUP_H */
This page took 0.060495 seconds and 6 git commands to generate.