4 #include <linux/radix-tree.h>
5 #include <linux/rcupdate.h>
6 #include <linux/workqueue.h>
14 struct request_queue
*q
;
15 struct io_context
*ioc
;
18 * q_node and ioc_node link io_cq through icq_list of q and ioc
19 * respectively. Both fields are unused once ioc_exit_icq() is
20 * called and shared with __rcu_icq_cache and __rcu_head which are
21 * used for RCU free of io_cq.
24 struct list_head q_node
;
25 struct kmem_cache
*__rcu_icq_cache
;
28 struct hlist_node ioc_node
;
29 struct rcu_head __rcu_head
;
32 unsigned long changed
;
36 * I/O subsystem state of the associated processes. It is refcounted
37 * and kmalloc'ed. These could be shared between processes.
40 atomic_long_t refcount
;
43 /* all the fields below are protected by this lock */
46 unsigned short ioprio
;
49 * For request batching
51 int nr_batch_requests
; /* Number of requests left in the batch */
52 unsigned long last_waited
; /* Time last woken after wait for request */
54 struct radix_tree_root icq_tree
;
55 struct io_cq __rcu
*icq_hint
;
56 struct hlist_head icq_list
;
58 struct work_struct release_work
;
61 static inline struct io_context
*ioc_task_link(struct io_context
*ioc
)
64 * if ref count is zero, don't allow sharing (ioc is going away, it's
67 if (ioc
&& atomic_long_inc_not_zero(&ioc
->refcount
)) {
68 atomic_inc(&ioc
->nr_tasks
);
77 void put_io_context(struct io_context
*ioc
, struct request_queue
*locked_q
);
78 void exit_io_context(struct task_struct
*task
);
79 struct io_context
*get_task_io_context(struct task_struct
*task
,
80 gfp_t gfp_flags
, int node
);
81 void ioc_ioprio_changed(struct io_context
*ioc
, int ioprio
);
82 void ioc_cgroup_changed(struct io_context
*ioc
);
85 static inline void put_io_context(struct io_context
*ioc
,
86 struct request_queue
*locked_q
) { }
87 static inline void exit_io_context(struct task_struct
*task
) { }
This page took 0.035906 seconds and 6 git commands to generate.