Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Block device elevator/IO-scheduler. |
3 | * | |
4 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | |
5 | * | |
0fe23479 | 6 | * 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4 LT |
7 | * |
8 | * Split the elevator a bit so that it is possible to choose a different | |
9 | * one or even write a new "plug in". There are three pieces: | |
10 | * - elevator_fn, inserts a new request in the queue list | |
11 | * - elevator_merge_fn, decides whether a new buffer can be merged with | |
12 | * an existing request | |
13 | * - elevator_dequeue_fn, called when a request is taken off the active list | |
14 | * | |
15 | * 20082000 Dave Jones <davej@suse.de> : | |
16 | * Removed tests for max-bomb-segments, which was breaking elvtune | |
17 | * when run without -bN | |
18 | * | |
19 | * Jens: | |
20 | * - Rework again to work with bio instead of buffer_heads | |
21 | * - loose bi_dev comparisons, partition handling is right now | |
22 | * - completely modularize elevator setup and teardown | |
23 | * | |
24 | */ | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/fs.h> | |
27 | #include <linux/blkdev.h> | |
28 | #include <linux/elevator.h> | |
29 | #include <linux/bio.h> | |
1da177e4 LT |
30 | #include <linux/module.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/compiler.h> | |
2056a782 | 34 | #include <linux/blktrace_api.h> |
9817064b | 35 | #include <linux/hash.h> |
0835da67 | 36 | #include <linux/uaccess.h> |
1da177e4 | 37 | |
55782138 LZ |
38 | #include <trace/events/block.h> |
39 | ||
242f9dcb | 40 | #include "blk.h" |
72e06c25 | 41 | #include "blk-cgroup.h" |
242f9dcb | 42 | |
1da177e4 LT |
43 | static DEFINE_SPINLOCK(elv_list_lock); |
44 | static LIST_HEAD(elv_list); | |
45 | ||
9817064b JA |
46 | /* |
47 | * Merge hash stuff. | |
48 | */ | |
83096ebf | 49 | #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
9817064b | 50 | |
da775265 JA |
51 | /* |
52 | * Query io scheduler to see if the current process issuing bio may be | |
53 | * merged with rq. | |
54 | */ | |
55 | static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) | |
56 | { | |
165125e1 | 57 | struct request_queue *q = rq->q; |
b374d18a | 58 | struct elevator_queue *e = q->elevator; |
da775265 | 59 | |
22f746e2 TH |
60 | if (e->type->ops.elevator_allow_merge_fn) |
61 | return e->type->ops.elevator_allow_merge_fn(q, rq, bio); | |
da775265 JA |
62 | |
63 | return 1; | |
64 | } | |
65 | ||
1da177e4 LT |
66 | /* |
67 | * can we safely merge with this request? | |
68 | */ | |
050c8ea8 | 69 | bool elv_rq_merge_ok(struct request *rq, struct bio *bio) |
1da177e4 | 70 | { |
050c8ea8 | 71 | if (!blk_rq_merge_ok(rq, bio)) |
7ba1ba12 MP |
72 | return 0; |
73 | ||
da775265 JA |
74 | if (!elv_iosched_allow_merge(rq, bio)) |
75 | return 0; | |
1da177e4 | 76 | |
da775265 | 77 | return 1; |
1da177e4 LT |
78 | } |
79 | EXPORT_SYMBOL(elv_rq_merge_ok); | |
80 | ||
1da177e4 LT |
81 | static struct elevator_type *elevator_find(const char *name) |
82 | { | |
a22b169d | 83 | struct elevator_type *e; |
1da177e4 | 84 | |
70cee26e | 85 | list_for_each_entry(e, &elv_list, list) { |
a22b169d VT |
86 | if (!strcmp(e->elevator_name, name)) |
87 | return e; | |
1da177e4 | 88 | } |
1da177e4 | 89 | |
a22b169d | 90 | return NULL; |
1da177e4 LT |
91 | } |
92 | ||
93 | static void elevator_put(struct elevator_type *e) | |
94 | { | |
95 | module_put(e->elevator_owner); | |
96 | } | |
97 | ||
21c3c5d2 | 98 | static struct elevator_type *elevator_get(const char *name, bool try_loading) |
1da177e4 | 99 | { |
2824bc93 | 100 | struct elevator_type *e; |
1da177e4 | 101 | |
2a12dcd7 | 102 | spin_lock(&elv_list_lock); |
2824bc93 TH |
103 | |
104 | e = elevator_find(name); | |
21c3c5d2 | 105 | if (!e && try_loading) { |
e1640949 | 106 | spin_unlock(&elv_list_lock); |
490b94be | 107 | request_module("%s-iosched", name); |
e1640949 JA |
108 | spin_lock(&elv_list_lock); |
109 | e = elevator_find(name); | |
110 | } | |
111 | ||
2824bc93 TH |
112 | if (e && !try_module_get(e->elevator_owner)) |
113 | e = NULL; | |
114 | ||
2a12dcd7 | 115 | spin_unlock(&elv_list_lock); |
1da177e4 LT |
116 | |
117 | return e; | |
118 | } | |
119 | ||
484fc254 | 120 | static char chosen_elevator[ELV_NAME_MAX]; |
1da177e4 | 121 | |
5f003976 | 122 | static int __init elevator_setup(char *str) |
1da177e4 | 123 | { |
752a3b79 CE |
124 | /* |
125 | * Be backwards-compatible with previous kernels, so users | |
126 | * won't get the wrong elevator. | |
127 | */ | |
492af635 | 128 | strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); |
9b41046c | 129 | return 1; |
1da177e4 LT |
130 | } |
131 | ||
132 | __setup("elevator=", elevator_setup); | |
133 | ||
bb813f4c TH |
134 | /* called during boot to load the elevator chosen by the elevator param */ |
135 | void __init load_default_elevator_module(void) | |
136 | { | |
137 | struct elevator_type *e; | |
138 | ||
139 | if (!chosen_elevator[0]) | |
140 | return; | |
141 | ||
142 | spin_lock(&elv_list_lock); | |
143 | e = elevator_find(chosen_elevator); | |
144 | spin_unlock(&elv_list_lock); | |
145 | ||
146 | if (!e) | |
147 | request_module("%s-iosched", chosen_elevator); | |
148 | } | |
149 | ||
3d1ab40f AV |
150 | static struct kobj_type elv_ktype; |
151 | ||
b374d18a | 152 | static struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1 | 153 | struct elevator_type *e) |
3d1ab40f | 154 | { |
b374d18a | 155 | struct elevator_queue *eq; |
9817064b | 156 | |
b374d18a | 157 | eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); |
9817064b JA |
158 | if (unlikely(!eq)) |
159 | goto err; | |
160 | ||
22f746e2 | 161 | eq->type = e; |
f9cb074b | 162 | kobject_init(&eq->kobj, &elv_ktype); |
9817064b | 163 | mutex_init(&eq->sysfs_lock); |
242d98f0 | 164 | hash_init(eq->hash); |
9817064b | 165 | |
3d1ab40f | 166 | return eq; |
9817064b JA |
167 | err: |
168 | kfree(eq); | |
169 | elevator_put(e); | |
170 | return NULL; | |
3d1ab40f AV |
171 | } |
172 | ||
173 | static void elevator_release(struct kobject *kobj) | |
174 | { | |
b374d18a | 175 | struct elevator_queue *e; |
9817064b | 176 | |
b374d18a | 177 | e = container_of(kobj, struct elevator_queue, kobj); |
22f746e2 | 178 | elevator_put(e->type); |
3d1ab40f AV |
179 | kfree(e); |
180 | } | |
181 | ||
165125e1 | 182 | int elevator_init(struct request_queue *q, char *name) |
1da177e4 LT |
183 | { |
184 | struct elevator_type *e = NULL; | |
f8fc877d | 185 | int err; |
1da177e4 | 186 | |
1abec4fd MS |
187 | if (unlikely(q->elevator)) |
188 | return 0; | |
189 | ||
cb98fc8b TH |
190 | INIT_LIST_HEAD(&q->queue_head); |
191 | q->last_merge = NULL; | |
192 | q->end_sector = 0; | |
193 | q->boundary_rq = NULL; | |
cb98fc8b | 194 | |
4eb166d9 | 195 | if (name) { |
21c3c5d2 | 196 | e = elevator_get(name, true); |
4eb166d9 JA |
197 | if (!e) |
198 | return -EINVAL; | |
199 | } | |
1da177e4 | 200 | |
21c3c5d2 TH |
201 | /* |
202 | * Use the default elevator specified by config boot param or | |
203 | * config option. Don't try to load modules as we could be running | |
204 | * off async and request_module() isn't allowed from async. | |
205 | */ | |
4eb166d9 | 206 | if (!e && *chosen_elevator) { |
21c3c5d2 | 207 | e = elevator_get(chosen_elevator, false); |
4eb166d9 JA |
208 | if (!e) |
209 | printk(KERN_ERR "I/O scheduler %s not found\n", | |
210 | chosen_elevator); | |
211 | } | |
248d5ca5 | 212 | |
4eb166d9 | 213 | if (!e) { |
21c3c5d2 | 214 | e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); |
4eb166d9 JA |
215 | if (!e) { |
216 | printk(KERN_ERR | |
217 | "Default I/O scheduler not found. " \ | |
218 | "Using noop.\n"); | |
21c3c5d2 | 219 | e = elevator_get("noop", false); |
4eb166d9 | 220 | } |
5f003976 ND |
221 | } |
222 | ||
5a5bafdc TH |
223 | q->elevator = elevator_alloc(q, e); |
224 | if (!q->elevator) | |
1da177e4 | 225 | return -ENOMEM; |
1da177e4 | 226 | |
b2fab5ac | 227 | err = e->ops.elevator_init_fn(q); |
f8fc877d | 228 | if (err) { |
5a5bafdc | 229 | kobject_put(&q->elevator->kobj); |
f8fc877d | 230 | return err; |
bc1c1169 | 231 | } |
1da177e4 | 232 | |
1abec4fd | 233 | return 0; |
1da177e4 | 234 | } |
2e662b65 JA |
235 | EXPORT_SYMBOL(elevator_init); |
236 | ||
b374d18a | 237 | void elevator_exit(struct elevator_queue *e) |
1da177e4 | 238 | { |
3d1ab40f | 239 | mutex_lock(&e->sysfs_lock); |
22f746e2 TH |
240 | if (e->type->ops.elevator_exit_fn) |
241 | e->type->ops.elevator_exit_fn(e); | |
3d1ab40f | 242 | mutex_unlock(&e->sysfs_lock); |
1da177e4 | 243 | |
3d1ab40f | 244 | kobject_put(&e->kobj); |
1da177e4 | 245 | } |
2e662b65 JA |
246 | EXPORT_SYMBOL(elevator_exit); |
247 | ||
9817064b JA |
248 | static inline void __elv_rqhash_del(struct request *rq) |
249 | { | |
242d98f0 | 250 | hash_del(&rq->hash); |
9817064b JA |
251 | } |
252 | ||
165125e1 | 253 | static void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b JA |
254 | { |
255 | if (ELV_ON_HASH(rq)) | |
256 | __elv_rqhash_del(rq); | |
257 | } | |
258 | ||
165125e1 | 259 | static void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b | 260 | { |
b374d18a | 261 | struct elevator_queue *e = q->elevator; |
9817064b JA |
262 | |
263 | BUG_ON(ELV_ON_HASH(rq)); | |
242d98f0 | 264 | hash_add(e->hash, &rq->hash, rq_hash_key(rq)); |
9817064b JA |
265 | } |
266 | ||
165125e1 | 267 | static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b JA |
268 | { |
269 | __elv_rqhash_del(rq); | |
270 | elv_rqhash_add(q, rq); | |
271 | } | |
272 | ||
165125e1 | 273 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b | 274 | { |
b374d18a | 275 | struct elevator_queue *e = q->elevator; |
b67bfe0d | 276 | struct hlist_node *next; |
9817064b JA |
277 | struct request *rq; |
278 | ||
ee89f812 | 279 | hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
9817064b JA |
280 | BUG_ON(!ELV_ON_HASH(rq)); |
281 | ||
282 | if (unlikely(!rq_mergeable(rq))) { | |
283 | __elv_rqhash_del(rq); | |
284 | continue; | |
285 | } | |
286 | ||
287 | if (rq_hash_key(rq) == offset) | |
288 | return rq; | |
289 | } | |
290 | ||
291 | return NULL; | |
292 | } | |
293 | ||
2e662b65 JA |
294 | /* |
295 | * RB-tree support functions for inserting/lookup/removal of requests | |
296 | * in a sorted RB tree. | |
297 | */ | |
796d5116 | 298 | void elv_rb_add(struct rb_root *root, struct request *rq) |
2e662b65 JA |
299 | { |
300 | struct rb_node **p = &root->rb_node; | |
301 | struct rb_node *parent = NULL; | |
302 | struct request *__rq; | |
303 | ||
304 | while (*p) { | |
305 | parent = *p; | |
306 | __rq = rb_entry(parent, struct request, rb_node); | |
307 | ||
83096ebf | 308 | if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
2e662b65 | 309 | p = &(*p)->rb_left; |
796d5116 | 310 | else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) |
2e662b65 | 311 | p = &(*p)->rb_right; |
2e662b65 JA |
312 | } |
313 | ||
314 | rb_link_node(&rq->rb_node, parent, p); | |
315 | rb_insert_color(&rq->rb_node, root); | |
2e662b65 | 316 | } |
2e662b65 JA |
317 | EXPORT_SYMBOL(elv_rb_add); |
318 | ||
319 | void elv_rb_del(struct rb_root *root, struct request *rq) | |
320 | { | |
321 | BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); | |
322 | rb_erase(&rq->rb_node, root); | |
323 | RB_CLEAR_NODE(&rq->rb_node); | |
324 | } | |
2e662b65 JA |
325 | EXPORT_SYMBOL(elv_rb_del); |
326 | ||
327 | struct request *elv_rb_find(struct rb_root *root, sector_t sector) | |
328 | { | |
329 | struct rb_node *n = root->rb_node; | |
330 | struct request *rq; | |
331 | ||
332 | while (n) { | |
333 | rq = rb_entry(n, struct request, rb_node); | |
334 | ||
83096ebf | 335 | if (sector < blk_rq_pos(rq)) |
2e662b65 | 336 | n = n->rb_left; |
83096ebf | 337 | else if (sector > blk_rq_pos(rq)) |
2e662b65 JA |
338 | n = n->rb_right; |
339 | else | |
340 | return rq; | |
341 | } | |
342 | ||
343 | return NULL; | |
344 | } | |
2e662b65 JA |
345 | EXPORT_SYMBOL(elv_rb_find); |
346 | ||
8922e16c TH |
347 | /* |
348 | * Insert rq into dispatch queue of q. Queue lock must be held on | |
dbe7f76d | 349 | * entry. rq is sort instead into the dispatch queue. To be used by |
2e662b65 | 350 | * specific elevators. |
8922e16c | 351 | */ |
165125e1 | 352 | void elv_dispatch_sort(struct request_queue *q, struct request *rq) |
8922e16c TH |
353 | { |
354 | sector_t boundary; | |
8922e16c | 355 | struct list_head *entry; |
4eb166d9 | 356 | int stop_flags; |
8922e16c | 357 | |
06b86245 TH |
358 | if (q->last_merge == rq) |
359 | q->last_merge = NULL; | |
9817064b JA |
360 | |
361 | elv_rqhash_del(q, rq); | |
362 | ||
15853af9 | 363 | q->nr_sorted--; |
06b86245 | 364 | |
1b47f531 | 365 | boundary = q->end_sector; |
02e031cb | 366 | stop_flags = REQ_SOFTBARRIER | REQ_STARTED; |
8922e16c TH |
367 | list_for_each_prev(entry, &q->queue_head) { |
368 | struct request *pos = list_entry_rq(entry); | |
369 | ||
33659ebb CH |
370 | if ((rq->cmd_flags & REQ_DISCARD) != |
371 | (pos->cmd_flags & REQ_DISCARD)) | |
e17fc0a1 | 372 | break; |
783660b2 JA |
373 | if (rq_data_dir(rq) != rq_data_dir(pos)) |
374 | break; | |
4eb166d9 | 375 | if (pos->cmd_flags & stop_flags) |
8922e16c | 376 | break; |
83096ebf TH |
377 | if (blk_rq_pos(rq) >= boundary) { |
378 | if (blk_rq_pos(pos) < boundary) | |
8922e16c TH |
379 | continue; |
380 | } else { | |
83096ebf | 381 | if (blk_rq_pos(pos) >= boundary) |
8922e16c TH |
382 | break; |
383 | } | |
83096ebf | 384 | if (blk_rq_pos(rq) >= blk_rq_pos(pos)) |
8922e16c TH |
385 | break; |
386 | } | |
387 | ||
388 | list_add(&rq->queuelist, entry); | |
389 | } | |
2e662b65 JA |
390 | EXPORT_SYMBOL(elv_dispatch_sort); |
391 | ||
9817064b | 392 | /* |
2e662b65 JA |
393 | * Insert rq into dispatch queue of q. Queue lock must be held on |
394 | * entry. rq is added to the back of the dispatch queue. To be used by | |
395 | * specific elevators. | |
9817064b JA |
396 | */ |
397 | void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) | |
398 | { | |
399 | if (q->last_merge == rq) | |
400 | q->last_merge = NULL; | |
401 | ||
402 | elv_rqhash_del(q, rq); | |
403 | ||
404 | q->nr_sorted--; | |
405 | ||
406 | q->end_sector = rq_end_sector(rq); | |
407 | q->boundary_rq = rq; | |
408 | list_add_tail(&rq->queuelist, &q->queue_head); | |
409 | } | |
2e662b65 JA |
410 | EXPORT_SYMBOL(elv_dispatch_add_tail); |
411 | ||
165125e1 | 412 | int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) |
1da177e4 | 413 | { |
b374d18a | 414 | struct elevator_queue *e = q->elevator; |
9817064b | 415 | struct request *__rq; |
06b86245 TH |
416 | int ret; |
417 | ||
488991e2 AB |
418 | /* |
419 | * Levels of merges: | |
420 | * nomerges: No merges at all attempted | |
421 | * noxmerges: Only simple one-hit cache try | |
422 | * merges: All merge tries attempted | |
423 | */ | |
424 | if (blk_queue_nomerges(q)) | |
425 | return ELEVATOR_NO_MERGE; | |
426 | ||
9817064b JA |
427 | /* |
428 | * First try one-hit cache. | |
429 | */ | |
050c8ea8 TH |
430 | if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { |
431 | ret = blk_try_merge(q->last_merge, bio); | |
06b86245 TH |
432 | if (ret != ELEVATOR_NO_MERGE) { |
433 | *req = q->last_merge; | |
434 | return ret; | |
435 | } | |
436 | } | |
1da177e4 | 437 | |
488991e2 | 438 | if (blk_queue_noxmerges(q)) |
ac9fafa1 AB |
439 | return ELEVATOR_NO_MERGE; |
440 | ||
9817064b JA |
441 | /* |
442 | * See if our hash lookup can find a potential backmerge. | |
443 | */ | |
444 | __rq = elv_rqhash_find(q, bio->bi_sector); | |
445 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | |
446 | *req = __rq; | |
447 | return ELEVATOR_BACK_MERGE; | |
448 | } | |
449 | ||
22f746e2 TH |
450 | if (e->type->ops.elevator_merge_fn) |
451 | return e->type->ops.elevator_merge_fn(q, req, bio); | |
1da177e4 LT |
452 | |
453 | return ELEVATOR_NO_MERGE; | |
454 | } | |
455 | ||
5e84ea3a JA |
456 | /* |
457 | * Attempt to do an insertion back merge. Only check for the case where | |
458 | * we can append 'rq' to an existing request, so we can throw 'rq' away | |
459 | * afterwards. | |
460 | * | |
461 | * Returns true if we merged, false otherwise | |
462 | */ | |
463 | static bool elv_attempt_insert_merge(struct request_queue *q, | |
464 | struct request *rq) | |
465 | { | |
466 | struct request *__rq; | |
bee0393c | 467 | bool ret; |
5e84ea3a JA |
468 | |
469 | if (blk_queue_nomerges(q)) | |
470 | return false; | |
471 | ||
472 | /* | |
473 | * First try one-hit cache. | |
474 | */ | |
475 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) | |
476 | return true; | |
477 | ||
478 | if (blk_queue_noxmerges(q)) | |
479 | return false; | |
480 | ||
bee0393c | 481 | ret = false; |
5e84ea3a JA |
482 | /* |
483 | * See if our hash lookup can find a potential backmerge. | |
484 | */ | |
bee0393c SL |
485 | while (1) { |
486 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); | |
487 | if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) | |
488 | break; | |
489 | ||
490 | /* The merged request could be merged with others, try again */ | |
491 | ret = true; | |
492 | rq = __rq; | |
493 | } | |
27419322 | 494 | |
bee0393c | 495 | return ret; |
5e84ea3a JA |
496 | } |
497 | ||
165125e1 | 498 | void elv_merged_request(struct request_queue *q, struct request *rq, int type) |
1da177e4 | 499 | { |
b374d18a | 500 | struct elevator_queue *e = q->elevator; |
1da177e4 | 501 | |
22f746e2 TH |
502 | if (e->type->ops.elevator_merged_fn) |
503 | e->type->ops.elevator_merged_fn(q, rq, type); | |
06b86245 | 504 | |
2e662b65 JA |
505 | if (type == ELEVATOR_BACK_MERGE) |
506 | elv_rqhash_reposition(q, rq); | |
9817064b | 507 | |
06b86245 | 508 | q->last_merge = rq; |
1da177e4 LT |
509 | } |
510 | ||
165125e1 | 511 | void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4 LT |
512 | struct request *next) |
513 | { | |
b374d18a | 514 | struct elevator_queue *e = q->elevator; |
5e84ea3a | 515 | const int next_sorted = next->cmd_flags & REQ_SORTED; |
1da177e4 | 516 | |
22f746e2 TH |
517 | if (next_sorted && e->type->ops.elevator_merge_req_fn) |
518 | e->type->ops.elevator_merge_req_fn(q, rq, next); | |
06b86245 | 519 | |
9817064b | 520 | elv_rqhash_reposition(q, rq); |
9817064b | 521 | |
5e84ea3a JA |
522 | if (next_sorted) { |
523 | elv_rqhash_del(q, next); | |
524 | q->nr_sorted--; | |
525 | } | |
526 | ||
06b86245 | 527 | q->last_merge = rq; |
1da177e4 LT |
528 | } |
529 | ||
812d4026 DS |
530 | void elv_bio_merged(struct request_queue *q, struct request *rq, |
531 | struct bio *bio) | |
532 | { | |
533 | struct elevator_queue *e = q->elevator; | |
534 | ||
22f746e2 TH |
535 | if (e->type->ops.elevator_bio_merged_fn) |
536 | e->type->ops.elevator_bio_merged_fn(q, rq, bio); | |
812d4026 DS |
537 | } |
538 | ||
165125e1 | 539 | void elv_requeue_request(struct request_queue *q, struct request *rq) |
1da177e4 | 540 | { |
1da177e4 LT |
541 | /* |
542 | * it already went through dequeue, we need to decrement the | |
543 | * in_flight count again | |
544 | */ | |
8922e16c | 545 | if (blk_account_rq(rq)) { |
0a7ae2ff | 546 | q->in_flight[rq_is_sync(rq)]--; |
33659ebb | 547 | if (rq->cmd_flags & REQ_SORTED) |
cad97516 | 548 | elv_deactivate_rq(q, rq); |
8922e16c | 549 | } |
1da177e4 | 550 | |
4aff5e23 | 551 | rq->cmd_flags &= ~REQ_STARTED; |
1da177e4 | 552 | |
b710a480 | 553 | __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE); |
1da177e4 LT |
554 | } |
555 | ||
26308eab | 556 | void elv_drain_elevator(struct request_queue *q) |
15853af9 TH |
557 | { |
558 | static int printed; | |
e3c78ca5 TH |
559 | |
560 | lockdep_assert_held(q->queue_lock); | |
561 | ||
22f746e2 | 562 | while (q->elevator->type->ops.elevator_dispatch_fn(q, 1)) |
15853af9 | 563 | ; |
e3c78ca5 | 564 | if (q->nr_sorted && printed++ < 10) { |
15853af9 TH |
565 | printk(KERN_ERR "%s: forced dispatching is broken " |
566 | "(nr_sorted=%u), please report this\n", | |
22f746e2 | 567 | q->elevator->type->elevator_name, q->nr_sorted); |
15853af9 TH |
568 | } |
569 | } | |
570 | ||
b710a480 | 571 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4 | 572 | { |
5f3ea37c | 573 | trace_block_rq_insert(q, rq); |
2056a782 | 574 | |
1da177e4 LT |
575 | rq->q = q; |
576 | ||
b710a480 JA |
577 | if (rq->cmd_flags & REQ_SOFTBARRIER) { |
578 | /* barriers are scheduling boundary, update end_sector */ | |
e2a60da7 | 579 | if (rq->cmd_type == REQ_TYPE_FS) { |
b710a480 JA |
580 | q->end_sector = rq_end_sector(rq); |
581 | q->boundary_rq = rq; | |
582 | } | |
583 | } else if (!(rq->cmd_flags & REQ_ELVPRIV) && | |
3aa72873 JA |
584 | (where == ELEVATOR_INSERT_SORT || |
585 | where == ELEVATOR_INSERT_SORT_MERGE)) | |
b710a480 JA |
586 | where = ELEVATOR_INSERT_BACK; |
587 | ||
8922e16c | 588 | switch (where) { |
28e7d184 | 589 | case ELEVATOR_INSERT_REQUEUE: |
8922e16c | 590 | case ELEVATOR_INSERT_FRONT: |
4aff5e23 | 591 | rq->cmd_flags |= REQ_SOFTBARRIER; |
8922e16c TH |
592 | list_add(&rq->queuelist, &q->queue_head); |
593 | break; | |
594 | ||
595 | case ELEVATOR_INSERT_BACK: | |
4aff5e23 | 596 | rq->cmd_flags |= REQ_SOFTBARRIER; |
15853af9 | 597 | elv_drain_elevator(q); |
8922e16c TH |
598 | list_add_tail(&rq->queuelist, &q->queue_head); |
599 | /* | |
600 | * We kick the queue here for the following reasons. | |
601 | * - The elevator might have returned NULL previously | |
602 | * to delay requests and returned them now. As the | |
603 | * queue wasn't empty before this request, ll_rw_blk | |
604 | * won't run the queue on return, resulting in hang. | |
605 | * - Usually, back inserted requests won't be merged | |
606 | * with anything. There's no point in delaying queue | |
607 | * processing. | |
608 | */ | |
24ecfbe2 | 609 | __blk_run_queue(q); |
8922e16c TH |
610 | break; |
611 | ||
5e84ea3a JA |
612 | case ELEVATOR_INSERT_SORT_MERGE: |
613 | /* | |
614 | * If we succeed in merging this request with one in the | |
615 | * queue already, we are done - rq has now been freed, | |
616 | * so no need to do anything further. | |
617 | */ | |
618 | if (elv_attempt_insert_merge(q, rq)) | |
619 | break; | |
8922e16c | 620 | case ELEVATOR_INSERT_SORT: |
e2a60da7 | 621 | BUG_ON(rq->cmd_type != REQ_TYPE_FS); |
4aff5e23 | 622 | rq->cmd_flags |= REQ_SORTED; |
15853af9 | 623 | q->nr_sorted++; |
9817064b JA |
624 | if (rq_mergeable(rq)) { |
625 | elv_rqhash_add(q, rq); | |
626 | if (!q->last_merge) | |
627 | q->last_merge = rq; | |
628 | } | |
629 | ||
ca23509f TH |
630 | /* |
631 | * Some ioscheds (cfq) run q->request_fn directly, so | |
632 | * rq cannot be accessed after calling | |
633 | * elevator_add_req_fn. | |
634 | */ | |
22f746e2 | 635 | q->elevator->type->ops.elevator_add_req_fn(q, rq); |
8922e16c TH |
636 | break; |
637 | ||
ae1b1539 TH |
638 | case ELEVATOR_INSERT_FLUSH: |
639 | rq->cmd_flags |= REQ_SOFTBARRIER; | |
640 | blk_insert_flush(rq); | |
641 | break; | |
8922e16c TH |
642 | default: |
643 | printk(KERN_ERR "%s: bad insertion point %d\n", | |
24c03d47 | 644 | __func__, where); |
8922e16c TH |
645 | BUG(); |
646 | } | |
1da177e4 | 647 | } |
2e662b65 JA |
648 | EXPORT_SYMBOL(__elv_add_request); |
649 | ||
7eaceacc | 650 | void elv_add_request(struct request_queue *q, struct request *rq, int where) |
1da177e4 LT |
651 | { |
652 | unsigned long flags; | |
653 | ||
654 | spin_lock_irqsave(q->queue_lock, flags); | |
7eaceacc | 655 | __elv_add_request(q, rq, where); |
1da177e4 LT |
656 | spin_unlock_irqrestore(q->queue_lock, flags); |
657 | } | |
2e662b65 JA |
658 | EXPORT_SYMBOL(elv_add_request); |
659 | ||
165125e1 | 660 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4 | 661 | { |
b374d18a | 662 | struct elevator_queue *e = q->elevator; |
1da177e4 | 663 | |
22f746e2 TH |
664 | if (e->type->ops.elevator_latter_req_fn) |
665 | return e->type->ops.elevator_latter_req_fn(q, rq); | |
1da177e4 LT |
666 | return NULL; |
667 | } | |
668 | ||
165125e1 | 669 | struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4 | 670 | { |
b374d18a | 671 | struct elevator_queue *e = q->elevator; |
1da177e4 | 672 | |
22f746e2 TH |
673 | if (e->type->ops.elevator_former_req_fn) |
674 | return e->type->ops.elevator_former_req_fn(q, rq); | |
1da177e4 LT |
675 | return NULL; |
676 | } | |
677 | ||
852c788f TH |
678 | int elv_set_request(struct request_queue *q, struct request *rq, |
679 | struct bio *bio, gfp_t gfp_mask) | |
1da177e4 | 680 | { |
b374d18a | 681 | struct elevator_queue *e = q->elevator; |
1da177e4 | 682 | |
22f746e2 | 683 | if (e->type->ops.elevator_set_req_fn) |
852c788f | 684 | return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); |
1da177e4 LT |
685 | return 0; |
686 | } | |
687 | ||
165125e1 | 688 | void elv_put_request(struct request_queue *q, struct request *rq) |
1da177e4 | 689 | { |
b374d18a | 690 | struct elevator_queue *e = q->elevator; |
1da177e4 | 691 | |
22f746e2 TH |
692 | if (e->type->ops.elevator_put_req_fn) |
693 | e->type->ops.elevator_put_req_fn(rq); | |
1da177e4 LT |
694 | } |
695 | ||
165125e1 | 696 | int elv_may_queue(struct request_queue *q, int rw) |
1da177e4 | 697 | { |
b374d18a | 698 | struct elevator_queue *e = q->elevator; |
1da177e4 | 699 | |
22f746e2 TH |
700 | if (e->type->ops.elevator_may_queue_fn) |
701 | return e->type->ops.elevator_may_queue_fn(q, rw); | |
1da177e4 LT |
702 | |
703 | return ELV_MQUEUE_MAY; | |
704 | } | |
705 | ||
11914a53 MA |
706 | void elv_abort_queue(struct request_queue *q) |
707 | { | |
708 | struct request *rq; | |
709 | ||
ae1b1539 TH |
710 | blk_abort_flushes(q); |
711 | ||
11914a53 MA |
712 | while (!list_empty(&q->queue_head)) { |
713 | rq = list_entry_rq(q->queue_head.next); | |
714 | rq->cmd_flags |= REQ_QUIET; | |
5f3ea37c | 715 | trace_block_rq_abort(q, rq); |
53c663ce KU |
716 | /* |
717 | * Mark this request as started so we don't trigger | |
718 | * any debug logic in the end I/O path. | |
719 | */ | |
720 | blk_start_request(rq); | |
40cbbb78 | 721 | __blk_end_request_all(rq, -EIO); |
11914a53 MA |
722 | } |
723 | } | |
724 | EXPORT_SYMBOL(elv_abort_queue); | |
725 | ||
165125e1 | 726 | void elv_completed_request(struct request_queue *q, struct request *rq) |
1da177e4 | 727 | { |
b374d18a | 728 | struct elevator_queue *e = q->elevator; |
1da177e4 LT |
729 | |
730 | /* | |
731 | * request is released from the driver, io must be done | |
732 | */ | |
8922e16c | 733 | if (blk_account_rq(rq)) { |
0a7ae2ff | 734 | q->in_flight[rq_is_sync(rq)]--; |
33659ebb | 735 | if ((rq->cmd_flags & REQ_SORTED) && |
22f746e2 TH |
736 | e->type->ops.elevator_completed_req_fn) |
737 | e->type->ops.elevator_completed_req_fn(q, rq); | |
1bc691d3 | 738 | } |
1da177e4 LT |
739 | } |
740 | ||
3d1ab40f AV |
741 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
742 | ||
743 | static ssize_t | |
744 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
1da177e4 | 745 | { |
3d1ab40f | 746 | struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 747 | struct elevator_queue *e; |
3d1ab40f AV |
748 | ssize_t error; |
749 | ||
750 | if (!entry->show) | |
751 | return -EIO; | |
752 | ||
b374d18a | 753 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 754 | mutex_lock(&e->sysfs_lock); |
22f746e2 | 755 | error = e->type ? entry->show(e, page) : -ENOENT; |
3d1ab40f AV |
756 | mutex_unlock(&e->sysfs_lock); |
757 | return error; | |
758 | } | |
1da177e4 | 759 | |
3d1ab40f AV |
760 | static ssize_t |
761 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | |
762 | const char *page, size_t length) | |
763 | { | |
3d1ab40f | 764 | struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 765 | struct elevator_queue *e; |
3d1ab40f | 766 | ssize_t error; |
1da177e4 | 767 | |
3d1ab40f AV |
768 | if (!entry->store) |
769 | return -EIO; | |
1da177e4 | 770 | |
b374d18a | 771 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 772 | mutex_lock(&e->sysfs_lock); |
22f746e2 | 773 | error = e->type ? entry->store(e, page, length) : -ENOENT; |
3d1ab40f AV |
774 | mutex_unlock(&e->sysfs_lock); |
775 | return error; | |
776 | } | |
777 | ||
52cf25d0 | 778 | static const struct sysfs_ops elv_sysfs_ops = { |
3d1ab40f AV |
779 | .show = elv_attr_show, |
780 | .store = elv_attr_store, | |
781 | }; | |
782 | ||
783 | static struct kobj_type elv_ktype = { | |
784 | .sysfs_ops = &elv_sysfs_ops, | |
785 | .release = elevator_release, | |
786 | }; | |
787 | ||
5a5bafdc | 788 | int elv_register_queue(struct request_queue *q) |
3d1ab40f | 789 | { |
5a5bafdc | 790 | struct elevator_queue *e = q->elevator; |
3d1ab40f AV |
791 | int error; |
792 | ||
b2d6db58 | 793 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
3d1ab40f | 794 | if (!error) { |
22f746e2 | 795 | struct elv_fs_entry *attr = e->type->elevator_attrs; |
3d1ab40f | 796 | if (attr) { |
e572ec7e AV |
797 | while (attr->attr.name) { |
798 | if (sysfs_create_file(&e->kobj, &attr->attr)) | |
3d1ab40f | 799 | break; |
e572ec7e | 800 | attr++; |
3d1ab40f AV |
801 | } |
802 | } | |
803 | kobject_uevent(&e->kobj, KOBJ_ADD); | |
430c62fb | 804 | e->registered = 1; |
3d1ab40f AV |
805 | } |
806 | return error; | |
1da177e4 | 807 | } |
f8fc877d | 808 | EXPORT_SYMBOL(elv_register_queue); |
bc1c1169 | 809 | |
1da177e4 LT |
810 | void elv_unregister_queue(struct request_queue *q) |
811 | { | |
f8fc877d TH |
812 | if (q) { |
813 | struct elevator_queue *e = q->elevator; | |
814 | ||
815 | kobject_uevent(&e->kobj, KOBJ_REMOVE); | |
816 | kobject_del(&e->kobj); | |
817 | e->registered = 0; | |
818 | } | |
1da177e4 | 819 | } |
01effb0d | 820 | EXPORT_SYMBOL(elv_unregister_queue); |
1da177e4 | 821 | |
3d3c2379 | 822 | int elv_register(struct elevator_type *e) |
1da177e4 | 823 | { |
1ffb96c5 | 824 | char *def = ""; |
2a12dcd7 | 825 | |
3d3c2379 TH |
826 | /* create icq_cache if requested */ |
827 | if (e->icq_size) { | |
828 | if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || | |
829 | WARN_ON(e->icq_align < __alignof__(struct io_cq))) | |
830 | return -EINVAL; | |
831 | ||
832 | snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), | |
833 | "%s_io_cq", e->elevator_name); | |
834 | e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, | |
835 | e->icq_align, 0, NULL); | |
836 | if (!e->icq_cache) | |
837 | return -ENOMEM; | |
838 | } | |
839 | ||
840 | /* register, don't allow duplicate names */ | |
2a12dcd7 | 841 | spin_lock(&elv_list_lock); |
3d3c2379 TH |
842 | if (elevator_find(e->elevator_name)) { |
843 | spin_unlock(&elv_list_lock); | |
844 | if (e->icq_cache) | |
845 | kmem_cache_destroy(e->icq_cache); | |
846 | return -EBUSY; | |
847 | } | |
1da177e4 | 848 | list_add_tail(&e->list, &elv_list); |
2a12dcd7 | 849 | spin_unlock(&elv_list_lock); |
1da177e4 | 850 | |
3d3c2379 | 851 | /* print pretty message */ |
5f003976 ND |
852 | if (!strcmp(e->elevator_name, chosen_elevator) || |
853 | (!*chosen_elevator && | |
854 | !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) | |
1ffb96c5 TV |
855 | def = " (default)"; |
856 | ||
4eb166d9 JA |
857 | printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, |
858 | def); | |
3d3c2379 | 859 | return 0; |
1da177e4 LT |
860 | } |
861 | EXPORT_SYMBOL_GPL(elv_register); | |
862 | ||
863 | void elv_unregister(struct elevator_type *e) | |
864 | { | |
3d3c2379 | 865 | /* unregister */ |
2a12dcd7 | 866 | spin_lock(&elv_list_lock); |
1da177e4 | 867 | list_del_init(&e->list); |
2a12dcd7 | 868 | spin_unlock(&elv_list_lock); |
3d3c2379 TH |
869 | |
870 | /* | |
871 | * Destroy icq_cache if it exists. icq's are RCU managed. Make | |
872 | * sure all RCU operations are complete before proceeding. | |
873 | */ | |
874 | if (e->icq_cache) { | |
875 | rcu_barrier(); | |
876 | kmem_cache_destroy(e->icq_cache); | |
877 | e->icq_cache = NULL; | |
878 | } | |
1da177e4 LT |
879 | } |
880 | EXPORT_SYMBOL_GPL(elv_unregister); | |
881 | ||
882 | /* | |
883 | * switch to new_e io scheduler. be careful not to introduce deadlocks - | |
884 | * we don't free the old io scheduler, before we have allocated what we | |
885 | * need for the new one. this way we have a chance of going back to the old | |
cb98fc8b | 886 | * one, if the new one fails init for some reason. |
1da177e4 | 887 | */ |
165125e1 | 888 | static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4 | 889 | { |
5a5bafdc TH |
890 | struct elevator_queue *old = q->elevator; |
891 | bool registered = old->registered; | |
e8989fae | 892 | int err; |
1da177e4 | 893 | |
5a5bafdc TH |
894 | /* |
895 | * Turn on BYPASS and drain all requests w/ elevator private data. | |
896 | * Block layer doesn't call into a quiesced elevator - all requests | |
897 | * are directly put on the dispatch list without elevator data | |
898 | * using INSERT_BACK. All requests have SOFTBARRIER set and no | |
899 | * merge happens either. | |
900 | */ | |
d732580b | 901 | blk_queue_bypass_start(q); |
cb98fc8b | 902 | |
5a5bafdc TH |
903 | /* unregister and clear all auxiliary data of the old elevator */ |
904 | if (registered) | |
f8fc877d | 905 | elv_unregister_queue(q); |
1da177e4 | 906 | |
f8fc877d | 907 | spin_lock_irq(q->queue_lock); |
7e5a8794 | 908 | ioc_clear_queue(q); |
f8fc877d TH |
909 | spin_unlock_irq(q->queue_lock); |
910 | ||
5a5bafdc TH |
911 | /* allocate, init and register new elevator */ |
912 | err = -ENOMEM; | |
913 | q->elevator = elevator_alloc(q, new_e); | |
914 | if (!q->elevator) | |
915 | goto fail_init; | |
916 | ||
b2fab5ac | 917 | err = new_e->ops.elevator_init_fn(q); |
5a5bafdc TH |
918 | if (err) { |
919 | kobject_put(&q->elevator->kobj); | |
920 | goto fail_init; | |
921 | } | |
922 | ||
923 | if (registered) { | |
924 | err = elv_register_queue(q); | |
925 | if (err) | |
926 | goto fail_register; | |
927 | } | |
928 | ||
929 | /* done, kill the old one and finish */ | |
930 | elevator_exit(old); | |
d732580b | 931 | blk_queue_bypass_end(q); |
75ad23bc | 932 | |
5a5bafdc | 933 | blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); |
4722dc52 | 934 | |
5dd531a0 | 935 | return 0; |
1da177e4 LT |
936 | |
937 | fail_register: | |
5a5bafdc TH |
938 | elevator_exit(q->elevator); |
939 | fail_init: | |
940 | /* switch failed, restore and re-register old elevator */ | |
941 | q->elevator = old; | |
1da177e4 | 942 | elv_register_queue(q); |
d732580b | 943 | blk_queue_bypass_end(q); |
75ad23bc | 944 | |
5dd531a0 | 945 | return err; |
1da177e4 LT |
946 | } |
947 | ||
5dd531a0 JA |
948 | /* |
949 | * Switch this queue to the given IO scheduler. | |
950 | */ | |
951 | int elevator_change(struct request_queue *q, const char *name) | |
1da177e4 LT |
952 | { |
953 | char elevator_name[ELV_NAME_MAX]; | |
954 | struct elevator_type *e; | |
955 | ||
cd43e26f | 956 | if (!q->elevator) |
5dd531a0 | 957 | return -ENXIO; |
cd43e26f | 958 | |
ee2e992c | 959 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
21c3c5d2 | 960 | e = elevator_get(strstrip(elevator_name), true); |
1da177e4 LT |
961 | if (!e) { |
962 | printk(KERN_ERR "elevator: type %s not found\n", elevator_name); | |
963 | return -EINVAL; | |
964 | } | |
965 | ||
22f746e2 | 966 | if (!strcmp(elevator_name, q->elevator->type->elevator_name)) { |
2ca7d93b | 967 | elevator_put(e); |
5dd531a0 | 968 | return 0; |
2ca7d93b | 969 | } |
1da177e4 | 970 | |
5dd531a0 JA |
971 | return elevator_switch(q, e); |
972 | } | |
973 | EXPORT_SYMBOL(elevator_change); | |
974 | ||
975 | ssize_t elv_iosched_store(struct request_queue *q, const char *name, | |
976 | size_t count) | |
977 | { | |
978 | int ret; | |
979 | ||
980 | if (!q->elevator) | |
981 | return count; | |
982 | ||
983 | ret = elevator_change(q, name); | |
984 | if (!ret) | |
985 | return count; | |
986 | ||
987 | printk(KERN_ERR "elevator: switch to %s failed\n", name); | |
988 | return ret; | |
1da177e4 LT |
989 | } |
990 | ||
165125e1 | 991 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4 | 992 | { |
b374d18a | 993 | struct elevator_queue *e = q->elevator; |
cd43e26f | 994 | struct elevator_type *elv; |
70cee26e | 995 | struct elevator_type *__e; |
1da177e4 LT |
996 | int len = 0; |
997 | ||
e36f724b | 998 | if (!q->elevator || !blk_queue_stackable(q)) |
cd43e26f MP |
999 | return sprintf(name, "none\n"); |
1000 | ||
22f746e2 | 1001 | elv = e->type; |
cd43e26f | 1002 | |
2a12dcd7 | 1003 | spin_lock(&elv_list_lock); |
70cee26e | 1004 | list_for_each_entry(__e, &elv_list, list) { |
1da177e4 LT |
1005 | if (!strcmp(elv->elevator_name, __e->elevator_name)) |
1006 | len += sprintf(name+len, "[%s] ", elv->elevator_name); | |
1007 | else | |
1008 | len += sprintf(name+len, "%s ", __e->elevator_name); | |
1009 | } | |
2a12dcd7 | 1010 | spin_unlock(&elv_list_lock); |
1da177e4 LT |
1011 | |
1012 | len += sprintf(len+name, "\n"); | |
1013 | return len; | |
1014 | } | |
1015 | ||
165125e1 JA |
1016 | struct request *elv_rb_former_request(struct request_queue *q, |
1017 | struct request *rq) | |
2e662b65 JA |
1018 | { |
1019 | struct rb_node *rbprev = rb_prev(&rq->rb_node); | |
1020 | ||
1021 | if (rbprev) | |
1022 | return rb_entry_rq(rbprev); | |
1023 | ||
1024 | return NULL; | |
1025 | } | |
2e662b65 JA |
1026 | EXPORT_SYMBOL(elv_rb_former_request); |
1027 | ||
165125e1 JA |
1028 | struct request *elv_rb_latter_request(struct request_queue *q, |
1029 | struct request *rq) | |
2e662b65 JA |
1030 | { |
1031 | struct rb_node *rbnext = rb_next(&rq->rb_node); | |
1032 | ||
1033 | if (rbnext) | |
1034 | return rb_entry_rq(rbnext); | |
1035 | ||
1036 | return NULL; | |
1037 | } | |
2e662b65 | 1038 | EXPORT_SYMBOL(elv_rb_latter_request); |