block, cfq: restructure io_cq creation path for io_context interface cleanup
[deliverable/linux.git] / include / linux / iocontext.h
CommitLineData
fd0928df
JA
1#ifndef IOCONTEXT_H
2#define IOCONTEXT_H
3
4ac845a2 4#include <linux/radix-tree.h>
34e6bbf2 5#include <linux/rcupdate.h>
b2efa052 6#include <linux/workqueue.h>
4ac845a2 7
dc86900e 8enum {
c5869807
TH
9 ICQ_IOPRIO_CHANGED,
10 ICQ_CGROUP_CHANGED,
dc86900e
TH
11};
12
c5869807
TH
13struct io_cq {
14 struct request_queue *q;
15 struct io_context *ioc;
fd0928df 16
7e5a8794
TH
17 /*
18 * q_node and ioc_node link io_cq through icq_list of q and ioc
19 * respectively. Both fields are unused once ioc_exit_icq() is
20 * called and shared with __rcu_icq_cache and __rcu_head which are
21 * used for RCU free of io_cq.
22 */
23 union {
24 struct list_head q_node;
25 struct kmem_cache *__rcu_icq_cache;
26 };
27 union {
28 struct hlist_node ioc_node;
29 struct rcu_head __rcu_head;
30 };
dc86900e 31
c5869807 32 unsigned long changed;
fd0928df
JA
33};
34
35/*
d38ecf93
JA
36 * I/O subsystem state of the associated processes. It is refcounted
37 * and kmalloc'ed. These could be shared between processes.
fd0928df
JA
38 */
39struct io_context {
d9c7d394 40 atomic_long_t refcount;
d38ecf93
JA
41 atomic_t nr_tasks;
42
43 /* all the fields below are protected by this lock */
44 spinlock_t lock;
fd0928df
JA
45
46 unsigned short ioprio;
31e4c28d 47
fd0928df
JA
48 /*
49 * For request batching
50 */
fd0928df 51 int nr_batch_requests; /* Number of requests left in the batch */
58c24a61 52 unsigned long last_waited; /* Time last woken after wait for request */
fd0928df 53
c5869807
TH
54 struct radix_tree_root icq_tree;
55 struct io_cq __rcu *icq_hint;
56 struct hlist_head icq_list;
b2efa052
TH
57
58 struct work_struct release_work;
fd0928df
JA
59};
60
d38ecf93
JA
61static inline struct io_context *ioc_task_link(struct io_context *ioc)
62{
63 /*
64 * if ref count is zero, don't allow sharing (ioc is going away, it's
65 * a race).
66 */
d9c7d394 67 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
cbb4f264 68 atomic_inc(&ioc->nr_tasks);
d38ecf93 69 return ioc;
d237e5c7 70 }
d38ecf93
JA
71
72 return NULL;
73}
74
b69f2292 75struct task_struct;
da9cbc87 76#ifdef CONFIG_BLOCK
b2efa052 77void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
b69f2292 78void exit_io_context(struct task_struct *task);
6e736be7
TH
79struct io_context *get_task_io_context(struct task_struct *task,
80 gfp_t gfp_flags, int node);
dc86900e
TH
81void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
82void ioc_cgroup_changed(struct io_context *ioc);
da9cbc87 83#else
da9cbc87 84struct io_context;
b2efa052
TH
85static inline void put_io_context(struct io_context *ioc,
86 struct request_queue *locked_q) { }
42ec57a8 87static inline void exit_io_context(struct task_struct *task) { }
da9cbc87
JA
88#endif
89
fd0928df 90#endif
This page took 0.69539 seconds and 5 git commands to generate.