Merge git://git.kernel.org/pub/scm/linux/kernel/git/nico/orion into fixes
[deliverable/linux.git] / block / elevator.c
1 /*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/hash.h>
37 #include <linux/uaccess.h>
38
39 #include <trace/events/block.h>
40
41 #include "blk.h"
42
43 static DEFINE_SPINLOCK(elv_list_lock);
44 static LIST_HEAD(elv_list);
45
46 /*
47 * Merge hash stuff.
48 */
49 static const int elv_hash_shift = 6;
50 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
51 #define ELV_HASH_FN(sec) \
52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
53 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
55
56 /*
57 * Query io scheduler to see if the current process issuing bio may be
58 * merged with rq.
59 */
60 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
61 {
62 struct request_queue *q = rq->q;
63 struct elevator_queue *e = q->elevator;
64
65 if (e->ops->elevator_allow_merge_fn)
66 return e->ops->elevator_allow_merge_fn(q, rq, bio);
67
68 return 1;
69 }
70
71 /*
72 * can we safely merge with this request?
73 */
74 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
75 {
76 if (!rq_mergeable(rq))
77 return 0;
78
79 /*
80 * Don't merge file system requests and discard requests
81 */
82 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
83 return 0;
84
85 /*
86 * Don't merge discard requests and secure discard requests
87 */
88 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
89 return 0;
90
91 /*
92 * different data direction or already started, don't merge
93 */
94 if (bio_data_dir(bio) != rq_data_dir(rq))
95 return 0;
96
97 /*
98 * must be same device and not a special request
99 */
100 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
101 return 0;
102
103 /*
104 * only merge integrity protected bio into ditto rq
105 */
106 if (bio_integrity(bio) != blk_integrity_rq(rq))
107 return 0;
108
109 if (!elv_iosched_allow_merge(rq, bio))
110 return 0;
111
112 return 1;
113 }
114 EXPORT_SYMBOL(elv_rq_merge_ok);
115
116 int elv_try_merge(struct request *__rq, struct bio *bio)
117 {
118 int ret = ELEVATOR_NO_MERGE;
119
120 /*
121 * we can merge and sequence is ok, check if it's possible
122 */
123 if (elv_rq_merge_ok(__rq, bio)) {
124 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
125 ret = ELEVATOR_BACK_MERGE;
126 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
127 ret = ELEVATOR_FRONT_MERGE;
128 }
129
130 return ret;
131 }
132
133 static struct elevator_type *elevator_find(const char *name)
134 {
135 struct elevator_type *e;
136
137 list_for_each_entry(e, &elv_list, list) {
138 if (!strcmp(e->elevator_name, name))
139 return e;
140 }
141
142 return NULL;
143 }
144
145 static void elevator_put(struct elevator_type *e)
146 {
147 module_put(e->elevator_owner);
148 }
149
150 static struct elevator_type *elevator_get(const char *name)
151 {
152 struct elevator_type *e;
153
154 spin_lock(&elv_list_lock);
155
156 e = elevator_find(name);
157 if (!e) {
158 char elv[ELV_NAME_MAX + strlen("-iosched")];
159
160 spin_unlock(&elv_list_lock);
161
162 snprintf(elv, sizeof(elv), "%s-iosched", name);
163
164 request_module("%s", elv);
165 spin_lock(&elv_list_lock);
166 e = elevator_find(name);
167 }
168
169 if (e && !try_module_get(e->elevator_owner))
170 e = NULL;
171
172 spin_unlock(&elv_list_lock);
173
174 return e;
175 }
176
177 static void *elevator_init_queue(struct request_queue *q,
178 struct elevator_queue *eq)
179 {
180 return eq->ops->elevator_init_fn(q);
181 }
182
183 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
184 void *data)
185 {
186 q->elevator = eq;
187 eq->elevator_data = data;
188 }
189
190 static char chosen_elevator[16];
191
192 static int __init elevator_setup(char *str)
193 {
194 /*
195 * Be backwards-compatible with previous kernels, so users
196 * won't get the wrong elevator.
197 */
198 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
199 return 1;
200 }
201
202 __setup("elevator=", elevator_setup);
203
204 static struct kobj_type elv_ktype;
205
206 static struct elevator_queue *elevator_alloc(struct request_queue *q,
207 struct elevator_type *e)
208 {
209 struct elevator_queue *eq;
210 int i;
211
212 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
213 if (unlikely(!eq))
214 goto err;
215
216 eq->ops = &e->ops;
217 eq->elevator_type = e;
218 kobject_init(&eq->kobj, &elv_ktype);
219 mutex_init(&eq->sysfs_lock);
220
221 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
222 GFP_KERNEL, q->node);
223 if (!eq->hash)
224 goto err;
225
226 for (i = 0; i < ELV_HASH_ENTRIES; i++)
227 INIT_HLIST_HEAD(&eq->hash[i]);
228
229 return eq;
230 err:
231 kfree(eq);
232 elevator_put(e);
233 return NULL;
234 }
235
236 static void elevator_release(struct kobject *kobj)
237 {
238 struct elevator_queue *e;
239
240 e = container_of(kobj, struct elevator_queue, kobj);
241 elevator_put(e->elevator_type);
242 kfree(e->hash);
243 kfree(e);
244 }
245
246 int elevator_init(struct request_queue *q, char *name)
247 {
248 struct elevator_type *e = NULL;
249 struct elevator_queue *eq;
250 void *data;
251
252 if (unlikely(q->elevator))
253 return 0;
254
255 INIT_LIST_HEAD(&q->queue_head);
256 q->last_merge = NULL;
257 q->end_sector = 0;
258 q->boundary_rq = NULL;
259
260 if (name) {
261 e = elevator_get(name);
262 if (!e)
263 return -EINVAL;
264 }
265
266 if (!e && *chosen_elevator) {
267 e = elevator_get(chosen_elevator);
268 if (!e)
269 printk(KERN_ERR "I/O scheduler %s not found\n",
270 chosen_elevator);
271 }
272
273 if (!e) {
274 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
275 if (!e) {
276 printk(KERN_ERR
277 "Default I/O scheduler not found. " \
278 "Using noop.\n");
279 e = elevator_get("noop");
280 }
281 }
282
283 eq = elevator_alloc(q, e);
284 if (!eq)
285 return -ENOMEM;
286
287 data = elevator_init_queue(q, eq);
288 if (!data) {
289 kobject_put(&eq->kobj);
290 return -ENOMEM;
291 }
292
293 elevator_attach(q, eq, data);
294 return 0;
295 }
296 EXPORT_SYMBOL(elevator_init);
297
298 void elevator_exit(struct elevator_queue *e)
299 {
300 mutex_lock(&e->sysfs_lock);
301 if (e->ops->elevator_exit_fn)
302 e->ops->elevator_exit_fn(e);
303 e->ops = NULL;
304 mutex_unlock(&e->sysfs_lock);
305
306 kobject_put(&e->kobj);
307 }
308 EXPORT_SYMBOL(elevator_exit);
309
310 static inline void __elv_rqhash_del(struct request *rq)
311 {
312 hlist_del_init(&rq->hash);
313 }
314
315 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
316 {
317 if (ELV_ON_HASH(rq))
318 __elv_rqhash_del(rq);
319 }
320
321 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
322 {
323 struct elevator_queue *e = q->elevator;
324
325 BUG_ON(ELV_ON_HASH(rq));
326 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
327 }
328
329 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
330 {
331 __elv_rqhash_del(rq);
332 elv_rqhash_add(q, rq);
333 }
334
335 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
336 {
337 struct elevator_queue *e = q->elevator;
338 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
339 struct hlist_node *entry, *next;
340 struct request *rq;
341
342 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
343 BUG_ON(!ELV_ON_HASH(rq));
344
345 if (unlikely(!rq_mergeable(rq))) {
346 __elv_rqhash_del(rq);
347 continue;
348 }
349
350 if (rq_hash_key(rq) == offset)
351 return rq;
352 }
353
354 return NULL;
355 }
356
357 /*
358 * RB-tree support functions for inserting/lookup/removal of requests
359 * in a sorted RB tree.
360 */
361 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
362 {
363 struct rb_node **p = &root->rb_node;
364 struct rb_node *parent = NULL;
365 struct request *__rq;
366
367 while (*p) {
368 parent = *p;
369 __rq = rb_entry(parent, struct request, rb_node);
370
371 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
372 p = &(*p)->rb_left;
373 else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
374 p = &(*p)->rb_right;
375 else
376 return __rq;
377 }
378
379 rb_link_node(&rq->rb_node, parent, p);
380 rb_insert_color(&rq->rb_node, root);
381 return NULL;
382 }
383 EXPORT_SYMBOL(elv_rb_add);
384
385 void elv_rb_del(struct rb_root *root, struct request *rq)
386 {
387 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
388 rb_erase(&rq->rb_node, root);
389 RB_CLEAR_NODE(&rq->rb_node);
390 }
391 EXPORT_SYMBOL(elv_rb_del);
392
393 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
394 {
395 struct rb_node *n = root->rb_node;
396 struct request *rq;
397
398 while (n) {
399 rq = rb_entry(n, struct request, rb_node);
400
401 if (sector < blk_rq_pos(rq))
402 n = n->rb_left;
403 else if (sector > blk_rq_pos(rq))
404 n = n->rb_right;
405 else
406 return rq;
407 }
408
409 return NULL;
410 }
411 EXPORT_SYMBOL(elv_rb_find);
412
413 /*
414 * Insert rq into dispatch queue of q. Queue lock must be held on
415 * entry. rq is sort instead into the dispatch queue. To be used by
416 * specific elevators.
417 */
418 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
419 {
420 sector_t boundary;
421 struct list_head *entry;
422 int stop_flags;
423
424 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
425
426 if (q->last_merge == rq)
427 q->last_merge = NULL;
428
429 elv_rqhash_del(q, rq);
430
431 q->nr_sorted--;
432
433 boundary = q->end_sector;
434 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
435 list_for_each_prev(entry, &q->queue_head) {
436 struct request *pos = list_entry_rq(entry);
437
438 if ((rq->cmd_flags & REQ_DISCARD) !=
439 (pos->cmd_flags & REQ_DISCARD))
440 break;
441 if (rq_data_dir(rq) != rq_data_dir(pos))
442 break;
443 if (pos->cmd_flags & stop_flags)
444 break;
445 if (blk_rq_pos(rq) >= boundary) {
446 if (blk_rq_pos(pos) < boundary)
447 continue;
448 } else {
449 if (blk_rq_pos(pos) >= boundary)
450 break;
451 }
452 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
453 break;
454 }
455
456 list_add(&rq->queuelist, entry);
457 }
458 EXPORT_SYMBOL(elv_dispatch_sort);
459
460 /*
461 * Insert rq into dispatch queue of q. Queue lock must be held on
462 * entry. rq is added to the back of the dispatch queue. To be used by
463 * specific elevators.
464 */
465 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
466 {
467 if (q->last_merge == rq)
468 q->last_merge = NULL;
469
470 elv_rqhash_del(q, rq);
471
472 q->nr_sorted--;
473
474 q->end_sector = rq_end_sector(rq);
475 q->boundary_rq = rq;
476 list_add_tail(&rq->queuelist, &q->queue_head);
477 }
478 EXPORT_SYMBOL(elv_dispatch_add_tail);
479
480 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
481 {
482 struct elevator_queue *e = q->elevator;
483 struct request *__rq;
484 int ret;
485
486 /*
487 * Levels of merges:
488 * nomerges: No merges at all attempted
489 * noxmerges: Only simple one-hit cache try
490 * merges: All merge tries attempted
491 */
492 if (blk_queue_nomerges(q))
493 return ELEVATOR_NO_MERGE;
494
495 /*
496 * First try one-hit cache.
497 */
498 if (q->last_merge) {
499 ret = elv_try_merge(q->last_merge, bio);
500 if (ret != ELEVATOR_NO_MERGE) {
501 *req = q->last_merge;
502 return ret;
503 }
504 }
505
506 if (blk_queue_noxmerges(q))
507 return ELEVATOR_NO_MERGE;
508
509 /*
510 * See if our hash lookup can find a potential backmerge.
511 */
512 __rq = elv_rqhash_find(q, bio->bi_sector);
513 if (__rq && elv_rq_merge_ok(__rq, bio)) {
514 *req = __rq;
515 return ELEVATOR_BACK_MERGE;
516 }
517
518 if (e->ops->elevator_merge_fn)
519 return e->ops->elevator_merge_fn(q, req, bio);
520
521 return ELEVATOR_NO_MERGE;
522 }
523
524 /*
525 * Attempt to do an insertion back merge. Only check for the case where
526 * we can append 'rq' to an existing request, so we can throw 'rq' away
527 * afterwards.
528 *
529 * Returns true if we merged, false otherwise
530 */
531 static bool elv_attempt_insert_merge(struct request_queue *q,
532 struct request *rq)
533 {
534 struct request *__rq;
535
536 if (blk_queue_nomerges(q))
537 return false;
538
539 /*
540 * First try one-hit cache.
541 */
542 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
543 return true;
544
545 if (blk_queue_noxmerges(q))
546 return false;
547
548 /*
549 * See if our hash lookup can find a potential backmerge.
550 */
551 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
552 if (__rq && blk_attempt_req_merge(q, __rq, rq))
553 return true;
554
555 return false;
556 }
557
558 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
559 {
560 struct elevator_queue *e = q->elevator;
561
562 if (e->ops->elevator_merged_fn)
563 e->ops->elevator_merged_fn(q, rq, type);
564
565 if (type == ELEVATOR_BACK_MERGE)
566 elv_rqhash_reposition(q, rq);
567
568 q->last_merge = rq;
569 }
570
571 void elv_merge_requests(struct request_queue *q, struct request *rq,
572 struct request *next)
573 {
574 struct elevator_queue *e = q->elevator;
575 const int next_sorted = next->cmd_flags & REQ_SORTED;
576
577 if (next_sorted && e->ops->elevator_merge_req_fn)
578 e->ops->elevator_merge_req_fn(q, rq, next);
579
580 elv_rqhash_reposition(q, rq);
581
582 if (next_sorted) {
583 elv_rqhash_del(q, next);
584 q->nr_sorted--;
585 }
586
587 q->last_merge = rq;
588 }
589
590 void elv_bio_merged(struct request_queue *q, struct request *rq,
591 struct bio *bio)
592 {
593 struct elevator_queue *e = q->elevator;
594
595 if (e->ops->elevator_bio_merged_fn)
596 e->ops->elevator_bio_merged_fn(q, rq, bio);
597 }
598
599 void elv_requeue_request(struct request_queue *q, struct request *rq)
600 {
601 /*
602 * it already went through dequeue, we need to decrement the
603 * in_flight count again
604 */
605 if (blk_account_rq(rq)) {
606 q->in_flight[rq_is_sync(rq)]--;
607 if (rq->cmd_flags & REQ_SORTED)
608 elv_deactivate_rq(q, rq);
609 }
610
611 rq->cmd_flags &= ~REQ_STARTED;
612
613 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
614 }
615
616 void elv_drain_elevator(struct request_queue *q)
617 {
618 static int printed;
619 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
620 ;
621 if (q->nr_sorted == 0)
622 return;
623 if (printed++ < 10) {
624 printk(KERN_ERR "%s: forced dispatching is broken "
625 "(nr_sorted=%u), please report this\n",
626 q->elevator->elevator_type->elevator_name, q->nr_sorted);
627 }
628 }
629
630 /*
631 * Call with queue lock held, interrupts disabled
632 */
633 void elv_quiesce_start(struct request_queue *q)
634 {
635 if (!q->elevator)
636 return;
637
638 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
639
640 /*
641 * make sure we don't have any requests in flight
642 */
643 elv_drain_elevator(q);
644 while (q->rq.elvpriv) {
645 __blk_run_queue(q, false);
646 spin_unlock_irq(q->queue_lock);
647 msleep(10);
648 spin_lock_irq(q->queue_lock);
649 elv_drain_elevator(q);
650 }
651 }
652
653 void elv_quiesce_end(struct request_queue *q)
654 {
655 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
656 }
657
658 void elv_insert(struct request_queue *q, struct request *rq, int where)
659 {
660 trace_block_rq_insert(q, rq);
661
662 rq->q = q;
663
664 switch (where) {
665 case ELEVATOR_INSERT_REQUEUE:
666 case ELEVATOR_INSERT_FRONT:
667 rq->cmd_flags |= REQ_SOFTBARRIER;
668 list_add(&rq->queuelist, &q->queue_head);
669 break;
670
671 case ELEVATOR_INSERT_BACK:
672 rq->cmd_flags |= REQ_SOFTBARRIER;
673 elv_drain_elevator(q);
674 list_add_tail(&rq->queuelist, &q->queue_head);
675 /*
676 * We kick the queue here for the following reasons.
677 * - The elevator might have returned NULL previously
678 * to delay requests and returned them now. As the
679 * queue wasn't empty before this request, ll_rw_blk
680 * won't run the queue on return, resulting in hang.
681 * - Usually, back inserted requests won't be merged
682 * with anything. There's no point in delaying queue
683 * processing.
684 */
685 __blk_run_queue(q, false);
686 break;
687
688 case ELEVATOR_INSERT_SORT_MERGE:
689 /*
690 * If we succeed in merging this request with one in the
691 * queue already, we are done - rq has now been freed,
692 * so no need to do anything further.
693 */
694 if (elv_attempt_insert_merge(q, rq))
695 break;
696 case ELEVATOR_INSERT_SORT:
697 BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
698 !(rq->cmd_flags & REQ_DISCARD));
699 rq->cmd_flags |= REQ_SORTED;
700 q->nr_sorted++;
701 if (rq_mergeable(rq)) {
702 elv_rqhash_add(q, rq);
703 if (!q->last_merge)
704 q->last_merge = rq;
705 }
706
707 /*
708 * Some ioscheds (cfq) run q->request_fn directly, so
709 * rq cannot be accessed after calling
710 * elevator_add_req_fn.
711 */
712 q->elevator->ops->elevator_add_req_fn(q, rq);
713 break;
714
715 case ELEVATOR_INSERT_FLUSH:
716 rq->cmd_flags |= REQ_SOFTBARRIER;
717 blk_insert_flush(rq);
718 break;
719 default:
720 printk(KERN_ERR "%s: bad insertion point %d\n",
721 __func__, where);
722 BUG();
723 }
724 }
725
726 void __elv_add_request(struct request_queue *q, struct request *rq, int where)
727 {
728 BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
729
730 if (rq->cmd_flags & REQ_SOFTBARRIER) {
731 /* barriers are scheduling boundary, update end_sector */
732 if (rq->cmd_type == REQ_TYPE_FS ||
733 (rq->cmd_flags & REQ_DISCARD)) {
734 q->end_sector = rq_end_sector(rq);
735 q->boundary_rq = rq;
736 }
737 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
738 where == ELEVATOR_INSERT_SORT)
739 where = ELEVATOR_INSERT_BACK;
740
741 elv_insert(q, rq, where);
742 }
743 EXPORT_SYMBOL(__elv_add_request);
744
745 void elv_add_request(struct request_queue *q, struct request *rq, int where)
746 {
747 unsigned long flags;
748
749 spin_lock_irqsave(q->queue_lock, flags);
750 __elv_add_request(q, rq, where);
751 spin_unlock_irqrestore(q->queue_lock, flags);
752 }
753 EXPORT_SYMBOL(elv_add_request);
754
755 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
756 {
757 struct elevator_queue *e = q->elevator;
758
759 if (e->ops->elevator_latter_req_fn)
760 return e->ops->elevator_latter_req_fn(q, rq);
761 return NULL;
762 }
763
764 struct request *elv_former_request(struct request_queue *q, struct request *rq)
765 {
766 struct elevator_queue *e = q->elevator;
767
768 if (e->ops->elevator_former_req_fn)
769 return e->ops->elevator_former_req_fn(q, rq);
770 return NULL;
771 }
772
773 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
774 {
775 struct elevator_queue *e = q->elevator;
776
777 if (e->ops->elevator_set_req_fn)
778 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
779
780 rq->elevator_private[0] = NULL;
781 return 0;
782 }
783
784 void elv_put_request(struct request_queue *q, struct request *rq)
785 {
786 struct elevator_queue *e = q->elevator;
787
788 if (e->ops->elevator_put_req_fn)
789 e->ops->elevator_put_req_fn(rq);
790 }
791
792 int elv_may_queue(struct request_queue *q, int rw)
793 {
794 struct elevator_queue *e = q->elevator;
795
796 if (e->ops->elevator_may_queue_fn)
797 return e->ops->elevator_may_queue_fn(q, rw);
798
799 return ELV_MQUEUE_MAY;
800 }
801
802 void elv_abort_queue(struct request_queue *q)
803 {
804 struct request *rq;
805
806 blk_abort_flushes(q);
807
808 while (!list_empty(&q->queue_head)) {
809 rq = list_entry_rq(q->queue_head.next);
810 rq->cmd_flags |= REQ_QUIET;
811 trace_block_rq_abort(q, rq);
812 /*
813 * Mark this request as started so we don't trigger
814 * any debug logic in the end I/O path.
815 */
816 blk_start_request(rq);
817 __blk_end_request_all(rq, -EIO);
818 }
819 }
820 EXPORT_SYMBOL(elv_abort_queue);
821
822 void elv_completed_request(struct request_queue *q, struct request *rq)
823 {
824 struct elevator_queue *e = q->elevator;
825
826 /*
827 * request is released from the driver, io must be done
828 */
829 if (blk_account_rq(rq)) {
830 q->in_flight[rq_is_sync(rq)]--;
831 if ((rq->cmd_flags & REQ_SORTED) &&
832 e->ops->elevator_completed_req_fn)
833 e->ops->elevator_completed_req_fn(q, rq);
834 }
835 }
836
837 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
838
839 static ssize_t
840 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
841 {
842 struct elv_fs_entry *entry = to_elv(attr);
843 struct elevator_queue *e;
844 ssize_t error;
845
846 if (!entry->show)
847 return -EIO;
848
849 e = container_of(kobj, struct elevator_queue, kobj);
850 mutex_lock(&e->sysfs_lock);
851 error = e->ops ? entry->show(e, page) : -ENOENT;
852 mutex_unlock(&e->sysfs_lock);
853 return error;
854 }
855
856 static ssize_t
857 elv_attr_store(struct kobject *kobj, struct attribute *attr,
858 const char *page, size_t length)
859 {
860 struct elv_fs_entry *entry = to_elv(attr);
861 struct elevator_queue *e;
862 ssize_t error;
863
864 if (!entry->store)
865 return -EIO;
866
867 e = container_of(kobj, struct elevator_queue, kobj);
868 mutex_lock(&e->sysfs_lock);
869 error = e->ops ? entry->store(e, page, length) : -ENOENT;
870 mutex_unlock(&e->sysfs_lock);
871 return error;
872 }
873
874 static const struct sysfs_ops elv_sysfs_ops = {
875 .show = elv_attr_show,
876 .store = elv_attr_store,
877 };
878
879 static struct kobj_type elv_ktype = {
880 .sysfs_ops = &elv_sysfs_ops,
881 .release = elevator_release,
882 };
883
884 int elv_register_queue(struct request_queue *q)
885 {
886 struct elevator_queue *e = q->elevator;
887 int error;
888
889 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
890 if (!error) {
891 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
892 if (attr) {
893 while (attr->attr.name) {
894 if (sysfs_create_file(&e->kobj, &attr->attr))
895 break;
896 attr++;
897 }
898 }
899 kobject_uevent(&e->kobj, KOBJ_ADD);
900 e->registered = 1;
901 }
902 return error;
903 }
904 EXPORT_SYMBOL(elv_register_queue);
905
906 static void __elv_unregister_queue(struct elevator_queue *e)
907 {
908 kobject_uevent(&e->kobj, KOBJ_REMOVE);
909 kobject_del(&e->kobj);
910 e->registered = 0;
911 }
912
913 void elv_unregister_queue(struct request_queue *q)
914 {
915 if (q)
916 __elv_unregister_queue(q->elevator);
917 }
918 EXPORT_SYMBOL(elv_unregister_queue);
919
920 void elv_register(struct elevator_type *e)
921 {
922 char *def = "";
923
924 spin_lock(&elv_list_lock);
925 BUG_ON(elevator_find(e->elevator_name));
926 list_add_tail(&e->list, &elv_list);
927 spin_unlock(&elv_list_lock);
928
929 if (!strcmp(e->elevator_name, chosen_elevator) ||
930 (!*chosen_elevator &&
931 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
932 def = " (default)";
933
934 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
935 def);
936 }
937 EXPORT_SYMBOL_GPL(elv_register);
938
939 void elv_unregister(struct elevator_type *e)
940 {
941 struct task_struct *g, *p;
942
943 /*
944 * Iterate every thread in the process to remove the io contexts.
945 */
946 if (e->ops.trim) {
947 read_lock(&tasklist_lock);
948 do_each_thread(g, p) {
949 task_lock(p);
950 if (p->io_context)
951 e->ops.trim(p->io_context);
952 task_unlock(p);
953 } while_each_thread(g, p);
954 read_unlock(&tasklist_lock);
955 }
956
957 spin_lock(&elv_list_lock);
958 list_del_init(&e->list);
959 spin_unlock(&elv_list_lock);
960 }
961 EXPORT_SYMBOL_GPL(elv_unregister);
962
963 /*
964 * switch to new_e io scheduler. be careful not to introduce deadlocks -
965 * we don't free the old io scheduler, before we have allocated what we
966 * need for the new one. this way we have a chance of going back to the old
967 * one, if the new one fails init for some reason.
968 */
969 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
970 {
971 struct elevator_queue *old_elevator, *e;
972 void *data;
973 int err;
974
975 /*
976 * Allocate new elevator
977 */
978 e = elevator_alloc(q, new_e);
979 if (!e)
980 return -ENOMEM;
981
982 data = elevator_init_queue(q, e);
983 if (!data) {
984 kobject_put(&e->kobj);
985 return -ENOMEM;
986 }
987
988 /*
989 * Turn on BYPASS and drain all requests w/ elevator private data
990 */
991 spin_lock_irq(q->queue_lock);
992 elv_quiesce_start(q);
993
994 /*
995 * Remember old elevator.
996 */
997 old_elevator = q->elevator;
998
999 /*
1000 * attach and start new elevator
1001 */
1002 elevator_attach(q, e, data);
1003
1004 spin_unlock_irq(q->queue_lock);
1005
1006 if (old_elevator->registered) {
1007 __elv_unregister_queue(old_elevator);
1008
1009 err = elv_register_queue(q);
1010 if (err)
1011 goto fail_register;
1012 }
1013
1014 /*
1015 * finally exit old elevator and turn off BYPASS.
1016 */
1017 elevator_exit(old_elevator);
1018 spin_lock_irq(q->queue_lock);
1019 elv_quiesce_end(q);
1020 spin_unlock_irq(q->queue_lock);
1021
1022 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1023
1024 return 0;
1025
1026 fail_register:
1027 /*
1028 * switch failed, exit the new io scheduler and reattach the old
1029 * one again (along with re-adding the sysfs dir)
1030 */
1031 elevator_exit(e);
1032 q->elevator = old_elevator;
1033 elv_register_queue(q);
1034
1035 spin_lock_irq(q->queue_lock);
1036 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1037 spin_unlock_irq(q->queue_lock);
1038
1039 return err;
1040 }
1041
1042 /*
1043 * Switch this queue to the given IO scheduler.
1044 */
1045 int elevator_change(struct request_queue *q, const char *name)
1046 {
1047 char elevator_name[ELV_NAME_MAX];
1048 struct elevator_type *e;
1049
1050 if (!q->elevator)
1051 return -ENXIO;
1052
1053 strlcpy(elevator_name, name, sizeof(elevator_name));
1054 e = elevator_get(strstrip(elevator_name));
1055 if (!e) {
1056 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1057 return -EINVAL;
1058 }
1059
1060 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1061 elevator_put(e);
1062 return 0;
1063 }
1064
1065 return elevator_switch(q, e);
1066 }
1067 EXPORT_SYMBOL(elevator_change);
1068
1069 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1070 size_t count)
1071 {
1072 int ret;
1073
1074 if (!q->elevator)
1075 return count;
1076
1077 ret = elevator_change(q, name);
1078 if (!ret)
1079 return count;
1080
1081 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1082 return ret;
1083 }
1084
1085 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1086 {
1087 struct elevator_queue *e = q->elevator;
1088 struct elevator_type *elv;
1089 struct elevator_type *__e;
1090 int len = 0;
1091
1092 if (!q->elevator || !blk_queue_stackable(q))
1093 return sprintf(name, "none\n");
1094
1095 elv = e->elevator_type;
1096
1097 spin_lock(&elv_list_lock);
1098 list_for_each_entry(__e, &elv_list, list) {
1099 if (!strcmp(elv->elevator_name, __e->elevator_name))
1100 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1101 else
1102 len += sprintf(name+len, "%s ", __e->elevator_name);
1103 }
1104 spin_unlock(&elv_list_lock);
1105
1106 len += sprintf(len+name, "\n");
1107 return len;
1108 }
1109
1110 struct request *elv_rb_former_request(struct request_queue *q,
1111 struct request *rq)
1112 {
1113 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1114
1115 if (rbprev)
1116 return rb_entry_rq(rbprev);
1117
1118 return NULL;
1119 }
1120 EXPORT_SYMBOL(elv_rb_former_request);
1121
1122 struct request *elv_rb_latter_request(struct request_queue *q,
1123 struct request *rq)
1124 {
1125 struct rb_node *rbnext = rb_next(&rq->rb_node);
1126
1127 if (rbnext)
1128 return rb_entry_rq(rbnext);
1129
1130 return NULL;
1131 }
1132 EXPORT_SYMBOL(elv_rb_latter_request);
This page took 0.056458 seconds and 6 git commands to generate.