Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #ifndef BLK_MQ_H |
2 | #define BLK_MQ_H | |
3 | ||
4 | #include <linux/blkdev.h> | |
5 | ||
6 | struct blk_mq_tags; | |
7 | ||
8 | struct blk_mq_cpu_notifier { | |
9 | struct list_head list; | |
10 | void *data; | |
11 | void (*notify)(void *data, unsigned long action, unsigned int cpu); | |
12 | }; | |
13 | ||
14 | struct blk_mq_hw_ctx { | |
15 | struct { | |
16 | spinlock_t lock; | |
17 | struct list_head dispatch; | |
18 | } ____cacheline_aligned_in_smp; | |
19 | ||
20 | unsigned long state; /* BLK_MQ_S_* flags */ | |
70f4db63 CH |
21 | struct delayed_work run_work; |
22 | struct delayed_work delay_work; | |
e4043dcf | 23 | cpumask_var_t cpumask; |
506e931f JA |
24 | int next_cpu; |
25 | int next_cpu_batch; | |
320ae51f JA |
26 | |
27 | unsigned long flags; /* BLK_MQ_F_* flags */ | |
28 | ||
29 | struct request_queue *queue; | |
30 | unsigned int queue_num; | |
31 | ||
32 | void *driver_data; | |
33 | ||
320ae51f JA |
34 | unsigned int nr_ctx_map; |
35 | unsigned long *ctx_map; | |
4bb659b1 JA |
36 | unsigned int nr_ctx; |
37 | struct blk_mq_ctx **ctxs; | |
38 | ||
39 | unsigned int wait_index; | |
320ae51f | 40 | |
320ae51f JA |
41 | struct blk_mq_tags *tags; |
42 | ||
43 | unsigned long queued; | |
44 | unsigned long run; | |
45 | #define BLK_MQ_MAX_DISPATCH_ORDER 10 | |
46 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; | |
47 | ||
320ae51f JA |
48 | unsigned int numa_node; |
49 | unsigned int cmd_size; /* per-request extra data */ | |
50 | ||
51 | struct blk_mq_cpu_notifier cpu_notifier; | |
52 | struct kobject kobj; | |
53 | }; | |
54 | ||
24d2f903 | 55 | struct blk_mq_tag_set { |
320ae51f JA |
56 | struct blk_mq_ops *ops; |
57 | unsigned int nr_hw_queues; | |
58 | unsigned int queue_depth; | |
59 | unsigned int reserved_tags; | |
60 | unsigned int cmd_size; /* per-request extra data */ | |
61 | int numa_node; | |
62 | unsigned int timeout; | |
63 | unsigned int flags; /* BLK_MQ_F_* */ | |
24d2f903 CH |
64 | void *driver_data; |
65 | ||
66 | struct blk_mq_tags **tags; | |
320ae51f JA |
67 | }; |
68 | ||
69 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); | |
70 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); | |
24d2f903 CH |
71 | typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *, |
72 | unsigned int); | |
320ae51f JA |
73 | typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
74 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); | |
75 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | |
24d2f903 CH |
76 | typedef int (init_request_fn)(void *, struct request *, unsigned int, |
77 | unsigned int, unsigned int); | |
78 | typedef void (exit_request_fn)(void *, struct request *, unsigned int, | |
79 | unsigned int); | |
320ae51f JA |
80 | |
81 | struct blk_mq_ops { | |
82 | /* | |
83 | * Queue request | |
84 | */ | |
85 | queue_rq_fn *queue_rq; | |
86 | ||
87 | /* | |
88 | * Map to specific hardware queue | |
89 | */ | |
90 | map_queue_fn *map_queue; | |
91 | ||
92 | /* | |
93 | * Called on request timeout | |
94 | */ | |
95 | rq_timed_out_fn *timeout; | |
96 | ||
30a91cb4 CH |
97 | softirq_done_fn *complete; |
98 | ||
320ae51f JA |
99 | /* |
100 | * Override for hctx allocations (should probably go) | |
101 | */ | |
102 | alloc_hctx_fn *alloc_hctx; | |
103 | free_hctx_fn *free_hctx; | |
104 | ||
105 | /* | |
106 | * Called when the block layer side of a hardware queue has been | |
107 | * set up, allowing the driver to allocate/init matching structures. | |
108 | * Ditto for exit/teardown. | |
109 | */ | |
110 | init_hctx_fn *init_hctx; | |
111 | exit_hctx_fn *exit_hctx; | |
e9b267d9 CH |
112 | |
113 | /* | |
114 | * Called for every command allocated by the block layer to allow | |
115 | * the driver to set up driver specific data. | |
116 | * Ditto for exit/teardown. | |
117 | */ | |
118 | init_request_fn *init_request; | |
119 | exit_request_fn *exit_request; | |
320ae51f JA |
120 | }; |
121 | ||
122 | enum { | |
123 | BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */ | |
124 | BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */ | |
125 | BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ | |
126 | ||
127 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | |
128 | BLK_MQ_F_SHOULD_SORT = 1 << 1, | |
320ae51f | 129 | |
5d12f905 | 130 | BLK_MQ_S_STOPPED = 0, |
320ae51f JA |
131 | |
132 | BLK_MQ_MAX_DEPTH = 2048, | |
506e931f JA |
133 | |
134 | BLK_MQ_CPU_WORK_BATCH = 8, | |
320ae51f JA |
135 | }; |
136 | ||
24d2f903 | 137 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
320ae51f JA |
138 | int blk_mq_register_disk(struct gendisk *); |
139 | void blk_mq_unregister_disk(struct gendisk *); | |
320ae51f | 140 | |
24d2f903 CH |
141 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); |
142 | void blk_mq_free_tag_set(struct blk_mq_tag_set *set); | |
143 | ||
320ae51f JA |
144 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
145 | ||
feb71dae | 146 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
320ae51f JA |
147 | void blk_mq_run_queues(struct request_queue *q, bool async); |
148 | void blk_mq_free_request(struct request *rq); | |
149 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | |
18741986 | 150 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); |
320ae51f | 151 | struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); |
24d2f903 | 152 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); |
320ae51f JA |
153 | |
154 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | |
24d2f903 | 155 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int); |
320ae51f JA |
156 | void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); |
157 | ||
63151a44 CH |
158 | void blk_mq_end_io(struct request *rq, int error); |
159 | void __blk_mq_end_io(struct request *rq, int error); | |
320ae51f | 160 | |
ed0791b2 CH |
161 | void blk_mq_requeue_request(struct request *rq); |
162 | ||
30a91cb4 CH |
163 | void blk_mq_complete_request(struct request *rq); |
164 | ||
320ae51f JA |
165 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
166 | void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); | |
280d45f6 | 167 | void blk_mq_stop_hw_queues(struct request_queue *q); |
2f268556 | 168 | void blk_mq_start_hw_queues(struct request_queue *q); |
1b4a3258 | 169 | void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); |
70f4db63 | 170 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
320ae51f JA |
171 | |
172 | /* | |
173 | * Driver command data is immediately after the request. So subtract request | |
174 | * size to get back to the original request. | |
175 | */ | |
176 | static inline struct request *blk_mq_rq_from_pdu(void *pdu) | |
177 | { | |
178 | return pdu - sizeof(struct request); | |
179 | } | |
180 | static inline void *blk_mq_rq_to_pdu(struct request *rq) | |
181 | { | |
182 | return (void *) rq + sizeof(*rq); | |
183 | } | |
184 | ||
320ae51f | 185 | #define queue_for_each_hw_ctx(q, hctx, i) \ |
0d0b7d42 JA |
186 | for ((i) = 0; (i) < (q)->nr_hw_queues && \ |
187 | ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) | |
320ae51f JA |
188 | |
189 | #define queue_for_each_ctx(q, ctx, i) \ | |
0d0b7d42 JA |
190 | for ((i) = 0; (i) < (q)->nr_queues && \ |
191 | ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) | |
320ae51f JA |
192 | |
193 | #define hctx_for_each_ctx(hctx, ctx, i) \ | |
0d0b7d42 JA |
194 | for ((i) = 0; (i) < (hctx)->nr_ctx && \ |
195 | ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) | |
320ae51f JA |
196 | |
197 | #define blk_ctx_sum(q, sum) \ | |
198 | ({ \ | |
199 | struct blk_mq_ctx *__x; \ | |
200 | unsigned int __ret = 0, __i; \ | |
201 | \ | |
202 | queue_for_each_ctx((q), __x, __i) \ | |
203 | __ret += sum; \ | |
204 | __ret; \ | |
205 | }) | |
206 | ||
207 | #endif |