Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5a0e3ad6 | 5 | #include <linux/slab.h> |
8324aa91 JA |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/blktrace_api.h> | |
320ae51f | 10 | #include <linux/blk-mq.h> |
8324aa91 JA |
11 | |
12 | #include "blk.h" | |
5efd6113 | 13 | #include "blk-cgroup.h" |
3edcc0ce | 14 | #include "blk-mq.h" |
8324aa91 JA |
15 | |
16 | struct queue_sysfs_entry { | |
17 | struct attribute attr; | |
18 | ssize_t (*show)(struct request_queue *, char *); | |
19 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
20 | }; | |
21 | ||
22 | static ssize_t | |
9cb308ce | 23 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 24 | { |
9cb308ce | 25 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
26 | } |
27 | ||
28 | static ssize_t | |
29 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
30 | { | |
b1f3b64d DR |
31 | int err; |
32 | unsigned long v; | |
33 | ||
ed751e68 | 34 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
35 | if (err || v > UINT_MAX) |
36 | return -EINVAL; | |
37 | ||
38 | *var = v; | |
8324aa91 | 39 | |
8324aa91 JA |
40 | return count; |
41 | } | |
42 | ||
43 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
44 | { | |
45 | return queue_var_show(q->nr_requests, (page)); | |
46 | } | |
47 | ||
48 | static ssize_t | |
49 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
50 | { | |
8324aa91 | 51 | unsigned long nr; |
e3a2b3f9 | 52 | int ret, err; |
b8a9ae77 | 53 | |
e3a2b3f9 | 54 | if (!q->request_fn && !q->mq_ops) |
b8a9ae77 JA |
55 | return -EINVAL; |
56 | ||
57 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
58 | if (ret < 0) |
59 | return ret; | |
60 | ||
8324aa91 JA |
61 | if (nr < BLKDEV_MIN_RQ) |
62 | nr = BLKDEV_MIN_RQ; | |
63 | ||
e3a2b3f9 JA |
64 | if (q->request_fn) |
65 | err = blk_update_nr_requests(q, nr); | |
66 | else | |
67 | err = blk_mq_update_nr_requests(q, nr); | |
68 | ||
69 | if (err) | |
70 | return err; | |
71 | ||
8324aa91 JA |
72 | return ret; |
73 | } | |
74 | ||
75 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
76 | { | |
9cb308ce XF |
77 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
78 | (PAGE_CACHE_SHIFT - 10); | |
8324aa91 JA |
79 | |
80 | return queue_var_show(ra_kb, (page)); | |
81 | } | |
82 | ||
83 | static ssize_t | |
84 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
85 | { | |
86 | unsigned long ra_kb; | |
87 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
88 | ||
b1f3b64d DR |
89 | if (ret < 0) |
90 | return ret; | |
91 | ||
8324aa91 | 92 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
8324aa91 JA |
93 | |
94 | return ret; | |
95 | } | |
96 | ||
97 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
98 | { | |
ae03bf63 | 99 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
100 | |
101 | return queue_var_show(max_sectors_kb, (page)); | |
102 | } | |
103 | ||
c77a5710 MP |
104 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
105 | { | |
106 | return queue_var_show(queue_max_segments(q), (page)); | |
107 | } | |
108 | ||
13f05c8d MP |
109 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
110 | { | |
111 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
112 | } | |
113 | ||
c77a5710 MP |
114 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
115 | { | |
e692cb66 | 116 | if (blk_queue_cluster(q)) |
c77a5710 MP |
117 | return queue_var_show(queue_max_segment_size(q), (page)); |
118 | ||
119 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | |
120 | } | |
121 | ||
e1defc4f | 122 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 123 | { |
e1defc4f | 124 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
125 | } |
126 | ||
c72758f3 MP |
127 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
128 | { | |
129 | return queue_var_show(queue_physical_block_size(q), page); | |
130 | } | |
131 | ||
132 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
133 | { | |
134 | return queue_var_show(queue_io_min(q), page); | |
135 | } | |
136 | ||
137 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
138 | { | |
139 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
140 | } |
141 | ||
86b37281 MP |
142 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
143 | { | |
144 | return queue_var_show(q->limits.discard_granularity, page); | |
145 | } | |
146 | ||
147 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | |
148 | { | |
a934a00a MP |
149 | return sprintf(page, "%llu\n", |
150 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
151 | } |
152 | ||
98262f27 MP |
153 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
154 | { | |
155 | return queue_var_show(queue_discard_zeroes_data(q), page); | |
156 | } | |
157 | ||
4363ac7c MP |
158 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
159 | { | |
160 | return sprintf(page, "%llu\n", | |
161 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
162 | } | |
163 | ||
164 | ||
8324aa91 JA |
165 | static ssize_t |
166 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
167 | { | |
168 | unsigned long max_sectors_kb, | |
ae03bf63 | 169 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
8324aa91 JA |
170 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
171 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
172 | ||
b1f3b64d DR |
173 | if (ret < 0) |
174 | return ret; | |
175 | ||
8324aa91 JA |
176 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
177 | return -EINVAL; | |
7c239517 | 178 | |
8324aa91 | 179 | spin_lock_irq(q->queue_lock); |
c295fc05 | 180 | q->limits.max_sectors = max_sectors_kb << 1; |
8324aa91 JA |
181 | spin_unlock_irq(q->queue_lock); |
182 | ||
183 | return ret; | |
184 | } | |
185 | ||
186 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
187 | { | |
ae03bf63 | 188 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
189 | |
190 | return queue_var_show(max_hw_sectors_kb, (page)); | |
191 | } | |
192 | ||
956bcb7c JA |
193 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
194 | static ssize_t \ | |
195 | queue_show_##name(struct request_queue *q, char *page) \ | |
196 | { \ | |
197 | int bit; \ | |
198 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
199 | return queue_var_show(neg ? !bit : bit, page); \ | |
200 | } \ | |
201 | static ssize_t \ | |
202 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
203 | { \ | |
204 | unsigned long val; \ | |
205 | ssize_t ret; \ | |
206 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
207 | if (ret < 0) \ |
208 | return ret; \ | |
956bcb7c JA |
209 | if (neg) \ |
210 | val = !val; \ | |
211 | \ | |
212 | spin_lock_irq(q->queue_lock); \ | |
213 | if (val) \ | |
214 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
215 | else \ | |
216 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
217 | spin_unlock_irq(q->queue_lock); \ | |
218 | return ret; \ | |
1308835f BZ |
219 | } |
220 | ||
956bcb7c JA |
221 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
222 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
223 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
224 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 225 | |
ac9fafa1 AB |
226 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
227 | { | |
488991e2 AB |
228 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
229 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
230 | } |
231 | ||
232 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
233 | size_t count) | |
234 | { | |
235 | unsigned long nm; | |
236 | ssize_t ret = queue_var_store(&nm, page, count); | |
237 | ||
b1f3b64d DR |
238 | if (ret < 0) |
239 | return ret; | |
240 | ||
bf0f9702 | 241 | spin_lock_irq(q->queue_lock); |
488991e2 AB |
242 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
243 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
244 | if (nm == 2) | |
bf0f9702 | 245 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 AB |
246 | else if (nm) |
247 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
bf0f9702 | 248 | spin_unlock_irq(q->queue_lock); |
1308835f | 249 | |
ac9fafa1 AB |
250 | return ret; |
251 | } | |
252 | ||
c7c22e4d JA |
253 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
254 | { | |
9cb308ce | 255 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 256 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 257 | |
5757a6d7 | 258 | return queue_var_show(set << force, page); |
c7c22e4d JA |
259 | } |
260 | ||
261 | static ssize_t | |
262 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
263 | { | |
264 | ssize_t ret = -EINVAL; | |
0a06ff06 | 265 | #ifdef CONFIG_SMP |
c7c22e4d JA |
266 | unsigned long val; |
267 | ||
268 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
269 | if (ret < 0) |
270 | return ret; | |
271 | ||
c7c22e4d | 272 | spin_lock_irq(q->queue_lock); |
e8037d49 | 273 | if (val == 2) { |
c7c22e4d | 274 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
e8037d49 ES |
275 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
276 | } else if (val == 1) { | |
277 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
278 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
279 | } else if (val == 0) { | |
5757a6d7 DW |
280 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
281 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
282 | } | |
c7c22e4d JA |
283 | spin_unlock_irq(q->queue_lock); |
284 | #endif | |
285 | return ret; | |
286 | } | |
8324aa91 JA |
287 | |
288 | static struct queue_sysfs_entry queue_requests_entry = { | |
289 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
290 | .show = queue_requests_show, | |
291 | .store = queue_requests_store, | |
292 | }; | |
293 | ||
294 | static struct queue_sysfs_entry queue_ra_entry = { | |
295 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
296 | .show = queue_ra_show, | |
297 | .store = queue_ra_store, | |
298 | }; | |
299 | ||
300 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
301 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
302 | .show = queue_max_sectors_show, | |
303 | .store = queue_max_sectors_store, | |
304 | }; | |
305 | ||
306 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
307 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
308 | .show = queue_max_hw_sectors_show, | |
309 | }; | |
310 | ||
c77a5710 MP |
311 | static struct queue_sysfs_entry queue_max_segments_entry = { |
312 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
313 | .show = queue_max_segments_show, | |
314 | }; | |
315 | ||
13f05c8d MP |
316 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
317 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
318 | .show = queue_max_integrity_segments_show, | |
319 | }; | |
320 | ||
c77a5710 MP |
321 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
322 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
323 | .show = queue_max_segment_size_show, | |
324 | }; | |
325 | ||
8324aa91 JA |
326 | static struct queue_sysfs_entry queue_iosched_entry = { |
327 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
328 | .show = elv_iosched_show, | |
329 | .store = elv_iosched_store, | |
330 | }; | |
331 | ||
e68b903c MP |
332 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
333 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
e1defc4f MP |
334 | .show = queue_logical_block_size_show, |
335 | }; | |
336 | ||
337 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
338 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
339 | .show = queue_logical_block_size_show, | |
e68b903c MP |
340 | }; |
341 | ||
c72758f3 MP |
342 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
343 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
344 | .show = queue_physical_block_size_show, | |
345 | }; | |
346 | ||
347 | static struct queue_sysfs_entry queue_io_min_entry = { | |
348 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
349 | .show = queue_io_min_show, | |
350 | }; | |
351 | ||
352 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
353 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
354 | .show = queue_io_opt_show, | |
e68b903c MP |
355 | }; |
356 | ||
86b37281 MP |
357 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
358 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
359 | .show = queue_discard_granularity_show, | |
360 | }; | |
361 | ||
362 | static struct queue_sysfs_entry queue_discard_max_entry = { | |
363 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, | |
364 | .show = queue_discard_max_show, | |
365 | }; | |
366 | ||
98262f27 MP |
367 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
368 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
369 | .show = queue_discard_zeroes_data_show, | |
370 | }; | |
371 | ||
4363ac7c MP |
372 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
373 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, | |
374 | .show = queue_write_same_max_show, | |
375 | }; | |
376 | ||
1308835f BZ |
377 | static struct queue_sysfs_entry queue_nonrot_entry = { |
378 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
379 | .show = queue_show_nonrot, |
380 | .store = queue_store_nonrot, | |
1308835f BZ |
381 | }; |
382 | ||
ac9fafa1 AB |
383 | static struct queue_sysfs_entry queue_nomerges_entry = { |
384 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
385 | .show = queue_nomerges_show, | |
386 | .store = queue_nomerges_store, | |
387 | }; | |
388 | ||
c7c22e4d JA |
389 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
390 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
391 | .show = queue_rq_affinity_show, | |
392 | .store = queue_rq_affinity_store, | |
393 | }; | |
394 | ||
bc58ba94 JA |
395 | static struct queue_sysfs_entry queue_iostats_entry = { |
396 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
397 | .show = queue_show_iostats, |
398 | .store = queue_store_iostats, | |
bc58ba94 JA |
399 | }; |
400 | ||
e2e1a148 JA |
401 | static struct queue_sysfs_entry queue_random_entry = { |
402 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
403 | .show = queue_show_random, |
404 | .store = queue_store_random, | |
e2e1a148 JA |
405 | }; |
406 | ||
8324aa91 JA |
407 | static struct attribute *default_attrs[] = { |
408 | &queue_requests_entry.attr, | |
409 | &queue_ra_entry.attr, | |
410 | &queue_max_hw_sectors_entry.attr, | |
411 | &queue_max_sectors_entry.attr, | |
c77a5710 | 412 | &queue_max_segments_entry.attr, |
13f05c8d | 413 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 414 | &queue_max_segment_size_entry.attr, |
8324aa91 | 415 | &queue_iosched_entry.attr, |
e68b903c | 416 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 417 | &queue_logical_block_size_entry.attr, |
c72758f3 MP |
418 | &queue_physical_block_size_entry.attr, |
419 | &queue_io_min_entry.attr, | |
420 | &queue_io_opt_entry.attr, | |
86b37281 MP |
421 | &queue_discard_granularity_entry.attr, |
422 | &queue_discard_max_entry.attr, | |
98262f27 | 423 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 424 | &queue_write_same_max_entry.attr, |
1308835f | 425 | &queue_nonrot_entry.attr, |
ac9fafa1 | 426 | &queue_nomerges_entry.attr, |
c7c22e4d | 427 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 428 | &queue_iostats_entry.attr, |
e2e1a148 | 429 | &queue_random_entry.attr, |
8324aa91 JA |
430 | NULL, |
431 | }; | |
432 | ||
433 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
434 | ||
435 | static ssize_t | |
436 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
437 | { | |
438 | struct queue_sysfs_entry *entry = to_queue(attr); | |
439 | struct request_queue *q = | |
440 | container_of(kobj, struct request_queue, kobj); | |
441 | ssize_t res; | |
442 | ||
443 | if (!entry->show) | |
444 | return -EIO; | |
445 | mutex_lock(&q->sysfs_lock); | |
3f3299d5 | 446 | if (blk_queue_dying(q)) { |
8324aa91 JA |
447 | mutex_unlock(&q->sysfs_lock); |
448 | return -ENOENT; | |
449 | } | |
450 | res = entry->show(q, page); | |
451 | mutex_unlock(&q->sysfs_lock); | |
452 | return res; | |
453 | } | |
454 | ||
455 | static ssize_t | |
456 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
457 | const char *page, size_t length) | |
458 | { | |
459 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 460 | struct request_queue *q; |
8324aa91 JA |
461 | ssize_t res; |
462 | ||
463 | if (!entry->store) | |
464 | return -EIO; | |
6728cb0e JA |
465 | |
466 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 467 | mutex_lock(&q->sysfs_lock); |
3f3299d5 | 468 | if (blk_queue_dying(q)) { |
8324aa91 JA |
469 | mutex_unlock(&q->sysfs_lock); |
470 | return -ENOENT; | |
471 | } | |
472 | res = entry->store(q, page, length); | |
473 | mutex_unlock(&q->sysfs_lock); | |
474 | return res; | |
475 | } | |
476 | ||
548bc8e1 TH |
477 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
478 | { | |
479 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
480 | rcu_head); | |
481 | kmem_cache_free(blk_requestq_cachep, q); | |
482 | } | |
483 | ||
8324aa91 | 484 | /** |
499337bb AM |
485 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
486 | * @kobj: the kobj belonging to the request queue to be released | |
8324aa91 JA |
487 | * |
488 | * Description: | |
499337bb | 489 | * blk_release_queue is the pair to blk_init_queue() or |
8324aa91 JA |
490 | * blk_queue_make_request(). It should be called when a request queue is |
491 | * being released; typically when a block device is being de-registered. | |
492 | * Currently, its primary task it to free all the &struct request | |
493 | * structures that were allocated to the queue and the queue itself. | |
494 | * | |
45a9c9d9 BVA |
495 | * Note: |
496 | * The low level driver must have finished any outstanding requests first | |
497 | * via blk_cleanup_queue(). | |
8324aa91 JA |
498 | **/ |
499 | static void blk_release_queue(struct kobject *kobj) | |
500 | { | |
501 | struct request_queue *q = | |
502 | container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 503 | |
e8989fae TH |
504 | blkcg_exit_queue(q); |
505 | ||
7e5a8794 TH |
506 | if (q->elevator) { |
507 | spin_lock_irq(q->queue_lock); | |
508 | ioc_clear_queue(q); | |
509 | spin_unlock_irq(q->queue_lock); | |
777eb1bf | 510 | elevator_exit(q->elevator); |
7e5a8794 | 511 | } |
777eb1bf | 512 | |
a051661c | 513 | blk_exit_rl(&q->root_rl); |
8324aa91 JA |
514 | |
515 | if (q->queue_tags) | |
516 | __blk_queue_free_tags(q); | |
517 | ||
45a9c9d9 | 518 | if (!q->mq_ops) |
f70ced09 | 519 | blk_free_flush_queue(q->fq); |
e09aae7e ML |
520 | else |
521 | blk_mq_release(q); | |
18741986 | 522 | |
8324aa91 JA |
523 | blk_trace_shutdown(q); |
524 | ||
a73f730d | 525 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 526 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
527 | } |
528 | ||
52cf25d0 | 529 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
530 | .show = queue_attr_show, |
531 | .store = queue_attr_store, | |
532 | }; | |
533 | ||
534 | struct kobj_type blk_queue_ktype = { | |
535 | .sysfs_ops = &queue_sysfs_ops, | |
536 | .default_attrs = default_attrs, | |
537 | .release = blk_release_queue, | |
538 | }; | |
539 | ||
540 | int blk_register_queue(struct gendisk *disk) | |
541 | { | |
542 | int ret; | |
1d54ad6d | 543 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
544 | struct request_queue *q = disk->queue; |
545 | ||
fb199746 | 546 | if (WARN_ON(!q)) |
8324aa91 JA |
547 | return -ENXIO; |
548 | ||
749fefe6 | 549 | /* |
17497acb TH |
550 | * SCSI probing may synchronously create and destroy a lot of |
551 | * request_queues for non-existent devices. Shutting down a fully | |
552 | * functional queue takes measureable wallclock time as RCU grace | |
553 | * periods are involved. To avoid excessive latency in these | |
554 | * cases, a request_queue starts out in a degraded mode which is | |
555 | * faster to shut down and is made fully functional here as | |
556 | * request_queues for non-existent devices never get registered. | |
749fefe6 | 557 | */ |
df35c7c9 AS |
558 | if (!blk_queue_init_done(q)) { |
559 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | |
560 | blk_queue_bypass_end(q); | |
17497acb TH |
561 | if (q->mq_ops) |
562 | blk_mq_finish_init(q); | |
df35c7c9 | 563 | } |
749fefe6 | 564 | |
1d54ad6d LZ |
565 | ret = blk_trace_init_sysfs(dev); |
566 | if (ret) | |
567 | return ret; | |
568 | ||
c9059598 | 569 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
570 | if (ret < 0) { |
571 | blk_trace_remove_sysfs(dev); | |
8324aa91 | 572 | return ret; |
ed5302d3 | 573 | } |
8324aa91 JA |
574 | |
575 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
576 | ||
320ae51f JA |
577 | if (q->mq_ops) |
578 | blk_mq_register_disk(disk); | |
579 | ||
cd43e26f MP |
580 | if (!q->request_fn) |
581 | return 0; | |
582 | ||
8324aa91 JA |
583 | ret = elv_register_queue(q); |
584 | if (ret) { | |
585 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
586 | kobject_del(&q->kobj); | |
80656b67 | 587 | blk_trace_remove_sysfs(dev); |
c87ffbb8 | 588 | kobject_put(&dev->kobj); |
8324aa91 JA |
589 | return ret; |
590 | } | |
591 | ||
592 | return 0; | |
593 | } | |
594 | ||
595 | void blk_unregister_queue(struct gendisk *disk) | |
596 | { | |
597 | struct request_queue *q = disk->queue; | |
598 | ||
fb199746 AM |
599 | if (WARN_ON(!q)) |
600 | return; | |
601 | ||
320ae51f JA |
602 | if (q->mq_ops) |
603 | blk_mq_unregister_disk(disk); | |
604 | ||
48c0d4d4 | 605 | if (q->request_fn) |
8324aa91 JA |
606 | elv_unregister_queue(q); |
607 | ||
48c0d4d4 ZK |
608 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
609 | kobject_del(&q->kobj); | |
610 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
611 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 612 | } |