[PATCH] as-iosched: reuse rq for fifo
[deliverable/linux.git] / block / as-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Anticipatory & deadline i/o scheduler.
3 *
4 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
f5b3db00 5 * Nick Piggin <nickpiggin@yahoo.com.au>
1da177e4
LT
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/elevator.h>
12#include <linux/bio.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/compiler.h>
1da177e4
LT
17#include <linux/rbtree.h>
18#include <linux/interrupt.h>
19
20#define REQ_SYNC 1
21#define REQ_ASYNC 0
22
23/*
24 * See Documentation/block/as-iosched.txt
25 */
26
27/*
28 * max time before a read is submitted.
29 */
30#define default_read_expire (HZ / 8)
31
32/*
33 * ditto for writes, these limits are not hard, even
34 * if the disk is capable of satisfying them.
35 */
36#define default_write_expire (HZ / 4)
37
38/*
39 * read_batch_expire describes how long we will allow a stream of reads to
40 * persist before looking to see whether it is time to switch over to writes.
41 */
42#define default_read_batch_expire (HZ / 2)
43
44/*
45 * write_batch_expire describes how long we want a stream of writes to run for.
46 * This is not a hard limit, but a target we set for the auto-tuning thingy.
47 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
48 * a short amount of time...
49 */
50#define default_write_batch_expire (HZ / 8)
51
52/*
53 * max time we may wait to anticipate a read (default around 6ms)
54 */
55#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
56
57/*
58 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
59 * however huge values tend to interfere and not decay fast enough. A program
60 * might be in a non-io phase of operation. Waiting on user input for example,
61 * or doing a lengthy computation. A small penalty can be justified there, and
62 * will still catch out those processes that constantly have large thinktimes.
63 */
64#define MAX_THINKTIME (HZ/50UL)
65
66/* Bits in as_io_context.state */
67enum as_io_states {
f5b3db00 68 AS_TASK_RUNNING=0, /* Process has not exited */
1da177e4
LT
69 AS_TASK_IOSTARTED, /* Process has started some IO */
70 AS_TASK_IORUNNING, /* Process has completed some IO */
71};
72
73enum anticipation_status {
74 ANTIC_OFF=0, /* Not anticipating (normal operation) */
75 ANTIC_WAIT_REQ, /* The last read has not yet completed */
76 ANTIC_WAIT_NEXT, /* Currently anticipating a request vs
77 last read (which has completed) */
78 ANTIC_FINISHED, /* Anticipating but have found a candidate
79 * or timed out */
80};
81
82struct as_data {
83 /*
84 * run time data
85 */
86
87 struct request_queue *q; /* the "owner" queue */
88
89 /*
90 * requests (as_rq s) are present on both sort_list and fifo_list
91 */
92 struct rb_root sort_list[2];
93 struct list_head fifo_list[2];
94
95 struct as_rq *next_arq[2]; /* next in sort order */
96 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
1da177e4
LT
97
98 unsigned long exit_prob; /* probability a task will exit while
99 being waited on */
f5b3db00
NP
100 unsigned long exit_no_coop; /* probablility an exited task will
101 not be part of a later cooperating
102 request */
1da177e4
LT
103 unsigned long new_ttime_total; /* mean thinktime on new proc */
104 unsigned long new_ttime_mean;
105 u64 new_seek_total; /* mean seek on new proc */
106 sector_t new_seek_mean;
107
108 unsigned long current_batch_expires;
109 unsigned long last_check_fifo[2];
110 int changed_batch; /* 1: waiting for old batch to end */
111 int new_batch; /* 1: waiting on first read complete */
112 int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */
113 int write_batch_count; /* max # of reqs in a write batch */
114 int current_write_count; /* how many requests left this batch */
115 int write_batch_idled; /* has the write batch gone idle? */
116 mempool_t *arq_pool;
117
118 enum anticipation_status antic_status;
119 unsigned long antic_start; /* jiffies: when it started */
120 struct timer_list antic_timer; /* anticipatory scheduling timer */
121 struct work_struct antic_work; /* Deferred unplugging */
122 struct io_context *io_context; /* Identify the expected process */
123 int ioc_finished; /* IO associated with io_context is finished */
124 int nr_dispatched;
125
126 /*
127 * settings that change how the i/o scheduler behaves
128 */
129 unsigned long fifo_expire[2];
130 unsigned long batch_expire[2];
131 unsigned long antic_expire;
132};
133
1da177e4
LT
134/*
135 * per-request data.
136 */
137enum arq_state {
138 AS_RQ_NEW=0, /* New - not referenced and not on any lists */
139 AS_RQ_QUEUED, /* In the request queue. It belongs to the
140 scheduler */
141 AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the
142 driver now */
143 AS_RQ_PRESCHED, /* Debug poisoning for requests being used */
144 AS_RQ_REMOVED,
145 AS_RQ_MERGED,
146 AS_RQ_POSTSCHED, /* when they shouldn't be */
147};
148
149struct as_rq {
1da177e4
LT
150 struct request *request;
151
152 struct io_context *io_context; /* The submitting task */
153
1da177e4
LT
154 unsigned int is_sync;
155 enum arq_state state;
156};
157
158#define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private)
159
160static kmem_cache_t *arq_pool;
161
334e94de
AV
162static atomic_t ioc_count = ATOMIC_INIT(0);
163static struct completion *ioc_gone;
164
ef9be1d3
TH
165static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
166static void as_antic_stop(struct as_data *ad);
167
1da177e4
LT
168/*
169 * IO Context helper functions
170 */
171
172/* Called to deallocate the as_io_context */
173static void free_as_io_context(struct as_io_context *aic)
174{
175 kfree(aic);
334e94de
AV
176 if (atomic_dec_and_test(&ioc_count) && ioc_gone)
177 complete(ioc_gone);
1da177e4
LT
178}
179
e17a9489
AV
180static void as_trim(struct io_context *ioc)
181{
334e94de
AV
182 if (ioc->aic)
183 free_as_io_context(ioc->aic);
e17a9489
AV
184 ioc->aic = NULL;
185}
186
1da177e4
LT
187/* Called when the task exits */
188static void exit_as_io_context(struct as_io_context *aic)
189{
190 WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
191 clear_bit(AS_TASK_RUNNING, &aic->state);
192}
193
194static struct as_io_context *alloc_as_io_context(void)
195{
196 struct as_io_context *ret;
197
198 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
199 if (ret) {
200 ret->dtor = free_as_io_context;
201 ret->exit = exit_as_io_context;
202 ret->state = 1 << AS_TASK_RUNNING;
203 atomic_set(&ret->nr_queued, 0);
204 atomic_set(&ret->nr_dispatched, 0);
205 spin_lock_init(&ret->lock);
206 ret->ttime_total = 0;
207 ret->ttime_samples = 0;
208 ret->ttime_mean = 0;
209 ret->seek_total = 0;
210 ret->seek_samples = 0;
211 ret->seek_mean = 0;
334e94de 212 atomic_inc(&ioc_count);
1da177e4
LT
213 }
214
215 return ret;
216}
217
218/*
219 * If the current task has no AS IO context then create one and initialise it.
220 * Then take a ref on the task's io context and return it.
221 */
222static struct io_context *as_get_io_context(void)
223{
224 struct io_context *ioc = get_io_context(GFP_ATOMIC);
225 if (ioc && !ioc->aic) {
226 ioc->aic = alloc_as_io_context();
227 if (!ioc->aic) {
228 put_io_context(ioc);
229 ioc = NULL;
230 }
231 }
232 return ioc;
233}
234
b4878f24
JA
235static void as_put_io_context(struct as_rq *arq)
236{
237 struct as_io_context *aic;
238
239 if (unlikely(!arq->io_context))
240 return;
241
242 aic = arq->io_context->aic;
243
244 if (arq->is_sync == REQ_SYNC && aic) {
245 spin_lock(&aic->lock);
246 set_bit(AS_TASK_IORUNNING, &aic->state);
247 aic->last_end_request = jiffies;
248 spin_unlock(&aic->lock);
249 }
250
251 put_io_context(arq->io_context);
252}
253
1da177e4
LT
254/*
255 * rb tree support functions
256 */
1da177e4 257#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
1da177e4 258
e37f346e 259static void as_add_arq_rb(struct as_data *ad, struct request *rq)
ef9be1d3 260{
e37f346e
JA
261 struct as_rq *arq = RQ_DATA(rq);
262 struct request *alias;
ef9be1d3 263
e37f346e
JA
264 while ((unlikely(alias = elv_rb_add(ARQ_RB_ROOT(ad, arq), rq)))) {
265 as_move_to_dispatch(ad, RQ_DATA(alias));
ef9be1d3
TH
266 as_antic_stop(ad);
267 }
268}
269
e37f346e 270static inline void as_del_arq_rb(struct as_data *ad, struct request *rq)
1da177e4 271{
e37f346e 272 elv_rb_del(ARQ_RB_ROOT(ad, RQ_DATA(rq)), rq);
1da177e4
LT
273}
274
275/*
276 * IO Scheduler proper
277 */
278
279#define MAXBACK (1024 * 1024) /*
280 * Maximum distance the disk will go backward
281 * for a request.
282 */
283
284#define BACK_PENALTY 2
285
286/*
287 * as_choose_req selects the preferred one of two requests of the same data_dir
288 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
289 */
290static struct as_rq *
291as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
292{
293 int data_dir;
294 sector_t last, s1, s2, d1, d2;
295 int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */
296 const sector_t maxback = MAXBACK;
297
298 if (arq1 == NULL || arq1 == arq2)
299 return arq2;
300 if (arq2 == NULL)
301 return arq1;
302
303 data_dir = arq1->is_sync;
304
305 last = ad->last_sector[data_dir];
306 s1 = arq1->request->sector;
307 s2 = arq2->request->sector;
308
309 BUG_ON(data_dir != arq2->is_sync);
310
311 /*
312 * Strict one way elevator _except_ in the case where we allow
313 * short backward seeks which are biased as twice the cost of a
314 * similar forward seek.
315 */
316 if (s1 >= last)
317 d1 = s1 - last;
318 else if (s1+maxback >= last)
319 d1 = (last - s1)*BACK_PENALTY;
320 else {
321 r1_wrap = 1;
322 d1 = 0; /* shut up, gcc */
323 }
324
325 if (s2 >= last)
326 d2 = s2 - last;
327 else if (s2+maxback >= last)
328 d2 = (last - s2)*BACK_PENALTY;
329 else {
330 r2_wrap = 1;
331 d2 = 0;
332 }
333
334 /* Found required data */
335 if (!r1_wrap && r2_wrap)
336 return arq1;
337 else if (!r2_wrap && r1_wrap)
338 return arq2;
339 else if (r1_wrap && r2_wrap) {
340 /* both behind the head */
341 if (s1 <= s2)
342 return arq1;
343 else
344 return arq2;
345 }
346
347 /* Both requests in front of the head */
348 if (d1 < d2)
349 return arq1;
350 else if (d2 < d1)
351 return arq2;
352 else {
353 if (s1 >= s2)
354 return arq1;
355 else
356 return arq2;
357 }
358}
359
360/*
361 * as_find_next_arq finds the next request after @prev in elevator order.
362 * this with as_choose_req form the basis for how the scheduler chooses
363 * what request to process next. Anticipation works on top of this.
364 */
e37f346e 365static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq)
1da177e4 366{
e37f346e 367 struct request *last = arq->request;
1da177e4
LT
368 struct rb_node *rbnext = rb_next(&last->rb_node);
369 struct rb_node *rbprev = rb_prev(&last->rb_node);
e37f346e 370 struct as_rq *next = NULL, *prev = NULL;
1da177e4 371
e37f346e 372 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
373
374 if (rbprev)
e37f346e 375 prev = RQ_DATA(rb_entry_rq(rbprev));
1da177e4
LT
376
377 if (rbnext)
e37f346e 378 next = RQ_DATA(rb_entry_rq(rbnext));
1da177e4 379 else {
e37f346e 380 const int data_dir = arq->is_sync;
1da177e4 381
e37f346e
JA
382 rbnext = rb_first(&ad->sort_list[data_dir]);
383 if (rbnext && rbnext != &last->rb_node)
384 next = RQ_DATA(rb_entry_rq(rbnext));
385 }
1da177e4 386
e37f346e 387 return as_choose_req(ad, next, prev);
1da177e4
LT
388}
389
390/*
391 * anticipatory scheduling functions follow
392 */
393
394/*
395 * as_antic_expired tells us when we have anticipated too long.
396 * The funny "absolute difference" math on the elapsed time is to handle
397 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
398 */
399static int as_antic_expired(struct as_data *ad)
400{
401 long delta_jif;
402
403 delta_jif = jiffies - ad->antic_start;
404 if (unlikely(delta_jif < 0))
405 delta_jif = -delta_jif;
406 if (delta_jif < ad->antic_expire)
407 return 0;
408
409 return 1;
410}
411
412/*
413 * as_antic_waitnext starts anticipating that a nice request will soon be
414 * submitted. See also as_antic_waitreq
415 */
416static void as_antic_waitnext(struct as_data *ad)
417{
418 unsigned long timeout;
419
420 BUG_ON(ad->antic_status != ANTIC_OFF
421 && ad->antic_status != ANTIC_WAIT_REQ);
422
423 timeout = ad->antic_start + ad->antic_expire;
424
425 mod_timer(&ad->antic_timer, timeout);
426
427 ad->antic_status = ANTIC_WAIT_NEXT;
428}
429
430/*
431 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
432 * until the request that we're anticipating on has finished. This means we
433 * are timing from when the candidate process wakes up hopefully.
434 */
435static void as_antic_waitreq(struct as_data *ad)
436{
437 BUG_ON(ad->antic_status == ANTIC_FINISHED);
438 if (ad->antic_status == ANTIC_OFF) {
439 if (!ad->io_context || ad->ioc_finished)
440 as_antic_waitnext(ad);
441 else
442 ad->antic_status = ANTIC_WAIT_REQ;
443 }
444}
445
446/*
447 * This is called directly by the functions in this file to stop anticipation.
448 * We kill the timer and schedule a call to the request_fn asap.
449 */
450static void as_antic_stop(struct as_data *ad)
451{
452 int status = ad->antic_status;
453
454 if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
455 if (status == ANTIC_WAIT_NEXT)
456 del_timer(&ad->antic_timer);
457 ad->antic_status = ANTIC_FINISHED;
458 /* see as_work_handler */
459 kblockd_schedule_work(&ad->antic_work);
460 }
461}
462
463/*
464 * as_antic_timeout is the timer function set by as_antic_waitnext.
465 */
466static void as_antic_timeout(unsigned long data)
467{
468 struct request_queue *q = (struct request_queue *)data;
469 struct as_data *ad = q->elevator->elevator_data;
470 unsigned long flags;
471
472 spin_lock_irqsave(q->queue_lock, flags);
473 if (ad->antic_status == ANTIC_WAIT_REQ
474 || ad->antic_status == ANTIC_WAIT_NEXT) {
475 struct as_io_context *aic = ad->io_context->aic;
476
477 ad->antic_status = ANTIC_FINISHED;
478 kblockd_schedule_work(&ad->antic_work);
479
480 if (aic->ttime_samples == 0) {
f5b3db00 481 /* process anticipated on has exited or timed out*/
1da177e4
LT
482 ad->exit_prob = (7*ad->exit_prob + 256)/8;
483 }
f5b3db00
NP
484 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
485 /* process not "saved" by a cooperating request */
486 ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
487 }
1da177e4
LT
488 }
489 spin_unlock_irqrestore(q->queue_lock, flags);
490}
491
f5b3db00
NP
492static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic,
493 unsigned long ttime)
494{
495 /* fixed point: 1.0 == 1<<8 */
496 if (aic->ttime_samples == 0) {
497 ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
498 ad->new_ttime_mean = ad->new_ttime_total / 256;
499
500 ad->exit_prob = (7*ad->exit_prob)/8;
501 }
502 aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
503 aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
504 aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
505}
506
507static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
508 sector_t sdist)
509{
510 u64 total;
511
512 if (aic->seek_samples == 0) {
513 ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
514 ad->new_seek_mean = ad->new_seek_total / 256;
515 }
516
517 /*
518 * Don't allow the seek distance to get too large from the
519 * odd fragment, pagein, etc
520 */
521 if (aic->seek_samples <= 60) /* second&third seek */
522 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
523 else
524 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
525
526 aic->seek_samples = (7*aic->seek_samples + 256) / 8;
527 aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
528 total = aic->seek_total + (aic->seek_samples/2);
529 do_div(total, aic->seek_samples);
530 aic->seek_mean = (sector_t)total;
531}
532
533/*
534 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
535 * updates @aic->ttime_mean based on that. It is called when a new
536 * request is queued.
537 */
538static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
539 struct request *rq)
540{
541 struct as_rq *arq = RQ_DATA(rq);
542 int data_dir = arq->is_sync;
543 unsigned long thinktime = 0;
544 sector_t seek_dist;
545
546 if (aic == NULL)
547 return;
548
549 if (data_dir == REQ_SYNC) {
550 unsigned long in_flight = atomic_read(&aic->nr_queued)
551 + atomic_read(&aic->nr_dispatched);
552 spin_lock(&aic->lock);
553 if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
554 test_bit(AS_TASK_IOSTARTED, &aic->state)) {
555 /* Calculate read -> read thinktime */
556 if (test_bit(AS_TASK_IORUNNING, &aic->state)
557 && in_flight == 0) {
558 thinktime = jiffies - aic->last_end_request;
559 thinktime = min(thinktime, MAX_THINKTIME-1);
560 }
561 as_update_thinktime(ad, aic, thinktime);
562
563 /* Calculate read -> read seek distance */
564 if (aic->last_request_pos < rq->sector)
565 seek_dist = rq->sector - aic->last_request_pos;
566 else
567 seek_dist = aic->last_request_pos - rq->sector;
568 as_update_seekdist(ad, aic, seek_dist);
569 }
570 aic->last_request_pos = rq->sector + rq->nr_sectors;
571 set_bit(AS_TASK_IOSTARTED, &aic->state);
572 spin_unlock(&aic->lock);
573 }
574}
575
1da177e4
LT
576/*
577 * as_close_req decides if one request is considered "close" to the
578 * previous one issued.
579 */
f5b3db00
NP
580static int as_close_req(struct as_data *ad, struct as_io_context *aic,
581 struct as_rq *arq)
1da177e4
LT
582{
583 unsigned long delay; /* milliseconds */
584 sector_t last = ad->last_sector[ad->batch_data_dir];
585 sector_t next = arq->request->sector;
586 sector_t delta; /* acceptable close offset (in sectors) */
f5b3db00 587 sector_t s;
1da177e4
LT
588
589 if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
590 delay = 0;
591 else
592 delay = ((jiffies - ad->antic_start) * 1000) / HZ;
593
f5b3db00
NP
594 if (delay == 0)
595 delta = 8192;
1da177e4 596 else if (delay <= 20 && delay <= ad->antic_expire)
f5b3db00 597 delta = 8192 << delay;
1da177e4
LT
598 else
599 return 1;
600
f5b3db00
NP
601 if ((last <= next + (delta>>1)) && (next <= last + delta))
602 return 1;
603
604 if (last < next)
605 s = next - last;
606 else
607 s = last - next;
608
609 if (aic->seek_samples == 0) {
610 /*
611 * Process has just started IO. Use past statistics to
612 * gauge success possibility
613 */
614 if (ad->new_seek_mean > s) {
615 /* this request is better than what we're expecting */
616 return 1;
617 }
618
619 } else {
620 if (aic->seek_mean > s) {
621 /* this request is better than what we're expecting */
622 return 1;
623 }
624 }
625
626 return 0;
1da177e4
LT
627}
628
629/*
630 * as_can_break_anticipation returns true if we have been anticipating this
631 * request.
632 *
633 * It also returns true if the process against which we are anticipating
634 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
635 * dispatch it ASAP, because we know that application will not be submitting
636 * any new reads.
637 *
f5b3db00 638 * If the task which has submitted the request has exited, break anticipation.
1da177e4
LT
639 *
640 * If this task has queued some other IO, do not enter enticipation.
641 */
642static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
643{
644 struct io_context *ioc;
645 struct as_io_context *aic;
1da177e4
LT
646
647 ioc = ad->io_context;
648 BUG_ON(!ioc);
649
650 if (arq && ioc == arq->io_context) {
651 /* request from same process */
652 return 1;
653 }
654
655 if (ad->ioc_finished && as_antic_expired(ad)) {
656 /*
657 * In this situation status should really be FINISHED,
658 * however the timer hasn't had the chance to run yet.
659 */
660 return 1;
661 }
662
663 aic = ioc->aic;
664 if (!aic)
665 return 0;
666
1da177e4
LT
667 if (atomic_read(&aic->nr_queued) > 0) {
668 /* process has more requests queued */
669 return 1;
670 }
671
672 if (atomic_read(&aic->nr_dispatched) > 0) {
673 /* process has more requests dispatched */
674 return 1;
675 }
676
f5b3db00 677 if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, aic, arq)) {
1da177e4
LT
678 /*
679 * Found a close request that is not one of ours.
680 *
f5b3db00
NP
681 * This makes close requests from another process update
682 * our IO history. Is generally useful when there are
1da177e4
LT
683 * two or more cooperating processes working in the same
684 * area.
685 */
f5b3db00
NP
686 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
687 if (aic->ttime_samples == 0)
688 ad->exit_prob = (7*ad->exit_prob + 256)/8;
689
690 ad->exit_no_coop = (7*ad->exit_no_coop)/8;
691 }
692
693 as_update_iohist(ad, aic, arq->request);
1da177e4
LT
694 return 1;
695 }
696
f5b3db00
NP
697 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
698 /* process anticipated on has exited */
699 if (aic->ttime_samples == 0)
700 ad->exit_prob = (7*ad->exit_prob + 256)/8;
701
702 if (ad->exit_no_coop > 128)
703 return 1;
704 }
1da177e4
LT
705
706 if (aic->ttime_samples == 0) {
707 if (ad->new_ttime_mean > ad->antic_expire)
708 return 1;
f5b3db00 709 if (ad->exit_prob * ad->exit_no_coop > 128*256)
1da177e4
LT
710 return 1;
711 } else if (aic->ttime_mean > ad->antic_expire) {
712 /* the process thinks too much between requests */
713 return 1;
714 }
715
1da177e4
LT
716 return 0;
717}
718
719/*
d6e05edc 720 * as_can_anticipate indicates whether we should either run arq
1da177e4
LT
721 * or keep anticipating a better request.
722 */
723static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
724{
725 if (!ad->io_context)
726 /*
727 * Last request submitted was a write
728 */
729 return 0;
730
731 if (ad->antic_status == ANTIC_FINISHED)
732 /*
733 * Don't restart if we have just finished. Run the next request
734 */
735 return 0;
736
737 if (as_can_break_anticipation(ad, arq))
738 /*
739 * This request is a good candidate. Don't keep anticipating,
740 * run it.
741 */
742 return 0;
743
744 /*
745 * OK from here, we haven't finished, and don't have a decent request!
746 * Status is either ANTIC_OFF so start waiting,
747 * ANTIC_WAIT_REQ so continue waiting for request to finish
748 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
1da177e4
LT
749 */
750
751 return 1;
752}
753
1da177e4
LT
754/*
755 * as_update_arq must be called whenever a request (arq) is added to
756 * the sort_list. This function keeps caches up to date, and checks if the
757 * request might be one we are "anticipating"
758 */
759static void as_update_arq(struct as_data *ad, struct as_rq *arq)
760{
761 const int data_dir = arq->is_sync;
762
763 /* keep the next_arq cache up to date */
764 ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
765
766 /*
767 * have we been anticipating this request?
768 * or does it come from the same process as the one we are anticipating
769 * for?
770 */
771 if (ad->antic_status == ANTIC_WAIT_REQ
772 || ad->antic_status == ANTIC_WAIT_NEXT) {
773 if (as_can_break_anticipation(ad, arq))
774 as_antic_stop(ad);
775 }
776}
777
778/*
779 * Gathers timings and resizes the write batch automatically
780 */
781static void update_write_batch(struct as_data *ad)
782{
783 unsigned long batch = ad->batch_expire[REQ_ASYNC];
784 long write_time;
785
786 write_time = (jiffies - ad->current_batch_expires) + batch;
787 if (write_time < 0)
788 write_time = 0;
789
790 if (write_time > batch && !ad->write_batch_idled) {
791 if (write_time > batch * 3)
792 ad->write_batch_count /= 2;
793 else
794 ad->write_batch_count--;
795 } else if (write_time < batch && ad->current_write_count == 0) {
796 if (batch > write_time * 3)
797 ad->write_batch_count *= 2;
798 else
799 ad->write_batch_count++;
800 }
801
802 if (ad->write_batch_count < 1)
803 ad->write_batch_count = 1;
804}
805
806/*
807 * as_completed_request is to be called when a request has completed and
808 * returned something to the requesting process, be it an error or data.
809 */
810static void as_completed_request(request_queue_t *q, struct request *rq)
811{
812 struct as_data *ad = q->elevator->elevator_data;
813 struct as_rq *arq = RQ_DATA(rq);
814
815 WARN_ON(!list_empty(&rq->queuelist));
816
1da177e4
LT
817 if (arq->state != AS_RQ_REMOVED) {
818 printk("arq->state %d\n", arq->state);
819 WARN_ON(1);
820 goto out;
821 }
822
1da177e4
LT
823 if (ad->changed_batch && ad->nr_dispatched == 1) {
824 kblockd_schedule_work(&ad->antic_work);
825 ad->changed_batch = 0;
826
827 if (ad->batch_data_dir == REQ_SYNC)
828 ad->new_batch = 1;
829 }
830 WARN_ON(ad->nr_dispatched == 0);
831 ad->nr_dispatched--;
832
833 /*
834 * Start counting the batch from when a request of that direction is
835 * actually serviced. This should help devices with big TCQ windows
836 * and writeback caches
837 */
838 if (ad->new_batch && ad->batch_data_dir == arq->is_sync) {
839 update_write_batch(ad);
840 ad->current_batch_expires = jiffies +
841 ad->batch_expire[REQ_SYNC];
842 ad->new_batch = 0;
843 }
844
845 if (ad->io_context == arq->io_context && ad->io_context) {
846 ad->antic_start = jiffies;
847 ad->ioc_finished = 1;
848 if (ad->antic_status == ANTIC_WAIT_REQ) {
849 /*
850 * We were waiting on this request, now anticipate
851 * the next one
852 */
853 as_antic_waitnext(ad);
854 }
855 }
856
b4878f24 857 as_put_io_context(arq);
1da177e4
LT
858out:
859 arq->state = AS_RQ_POSTSCHED;
860}
861
862/*
863 * as_remove_queued_request removes a request from the pre dispatch queue
864 * without updating refcounts. It is expected the caller will drop the
865 * reference unless it replaces the request at somepart of the elevator
866 * (ie. the dispatch queue)
867 */
868static void as_remove_queued_request(request_queue_t *q, struct request *rq)
869{
870 struct as_rq *arq = RQ_DATA(rq);
871 const int data_dir = arq->is_sync;
872 struct as_data *ad = q->elevator->elevator_data;
873
874 WARN_ON(arq->state != AS_RQ_QUEUED);
875
876 if (arq->io_context && arq->io_context->aic) {
877 BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued));
878 atomic_dec(&arq->io_context->aic->nr_queued);
879 }
880
881 /*
882 * Update the "next_arq" cache if we are about to remove its
883 * entry
884 */
885 if (ad->next_arq[data_dir] == arq)
886 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
887
d4f2f462 888 rq_fifo_clear(rq);
e37f346e 889 as_del_arq_rb(ad, rq);
1da177e4
LT
890}
891
1da177e4
LT
892/*
893 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
894 * 1 otherwise. It is ratelimited so that we only perform the check once per
895 * `fifo_expire' interval. Otherwise a large number of expired requests
896 * would create a hopeless seekstorm.
897 *
898 * See as_antic_expired comment.
899 */
900static int as_fifo_expired(struct as_data *ad, int adir)
901{
d4f2f462 902 struct request *rq;
1da177e4
LT
903 long delta_jif;
904
905 delta_jif = jiffies - ad->last_check_fifo[adir];
906 if (unlikely(delta_jif < 0))
907 delta_jif = -delta_jif;
908 if (delta_jif < ad->fifo_expire[adir])
909 return 0;
910
911 ad->last_check_fifo[adir] = jiffies;
912
913 if (list_empty(&ad->fifo_list[adir]))
914 return 0;
915
d4f2f462 916 rq = rq_entry_fifo(ad->fifo_list[adir].next);
1da177e4 917
d4f2f462 918 return time_after(jiffies, rq_fifo_time(rq));
1da177e4
LT
919}
920
921/*
922 * as_batch_expired returns true if the current batch has expired. A batch
923 * is a set of reads or a set of writes.
924 */
925static inline int as_batch_expired(struct as_data *ad)
926{
927 if (ad->changed_batch || ad->new_batch)
928 return 0;
929
930 if (ad->batch_data_dir == REQ_SYNC)
931 /* TODO! add a check so a complete fifo gets written? */
932 return time_after(jiffies, ad->current_batch_expires);
933
934 return time_after(jiffies, ad->current_batch_expires)
935 || ad->current_write_count == 0;
936}
937
938/*
939 * move an entry to dispatch queue
940 */
941static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
942{
943 struct request *rq = arq->request;
1da177e4
LT
944 const int data_dir = arq->is_sync;
945
e37f346e 946 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
1da177e4
LT
947
948 as_antic_stop(ad);
949 ad->antic_status = ANTIC_OFF;
950
951 /*
952 * This has to be set in order to be correctly updated by
953 * as_find_next_arq
954 */
955 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
956
957 if (data_dir == REQ_SYNC) {
958 /* In case we have to anticipate after this */
959 copy_io_context(&ad->io_context, &arq->io_context);
960 } else {
961 if (ad->io_context) {
962 put_io_context(ad->io_context);
963 ad->io_context = NULL;
964 }
965
966 if (ad->current_write_count != 0)
967 ad->current_write_count--;
968 }
969 ad->ioc_finished = 0;
970
971 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
972
973 /*
974 * take it off the sort and fifo list, add to dispatch queue
975 */
1da177e4
LT
976 as_remove_queued_request(ad->q, rq);
977 WARN_ON(arq->state != AS_RQ_QUEUED);
978
b4878f24
JA
979 elv_dispatch_sort(ad->q, rq);
980
1da177e4
LT
981 arq->state = AS_RQ_DISPATCHED;
982 if (arq->io_context && arq->io_context->aic)
983 atomic_inc(&arq->io_context->aic->nr_dispatched);
984 ad->nr_dispatched++;
985}
986
987/*
988 * as_dispatch_request selects the best request according to
989 * read/write expire, batch expire, etc, and moves it to the dispatch
990 * queue. Returns 1 if a request was found, 0 otherwise.
991 */
b4878f24 992static int as_dispatch_request(request_queue_t *q, int force)
1da177e4 993{
b4878f24 994 struct as_data *ad = q->elevator->elevator_data;
1da177e4
LT
995 struct as_rq *arq;
996 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
997 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
998
b4878f24
JA
999 if (unlikely(force)) {
1000 /*
1001 * Forced dispatch, accounting is useless. Reset
1002 * accounting states and dump fifo_lists. Note that
1003 * batch_data_dir is reset to REQ_SYNC to avoid
1004 * screwing write batch accounting as write batch
1005 * accounting occurs on W->R transition.
1006 */
1007 int dispatched = 0;
1008
1009 ad->batch_data_dir = REQ_SYNC;
1010 ad->changed_batch = 0;
1011 ad->new_batch = 0;
1012
1013 while (ad->next_arq[REQ_SYNC]) {
1014 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1015 dispatched++;
1016 }
1017 ad->last_check_fifo[REQ_SYNC] = jiffies;
1018
1019 while (ad->next_arq[REQ_ASYNC]) {
1020 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1021 dispatched++;
1022 }
1023 ad->last_check_fifo[REQ_ASYNC] = jiffies;
1024
1025 return dispatched;
1026 }
1027
1da177e4
LT
1028 /* Signal that the write batch was uncontended, so we can't time it */
1029 if (ad->batch_data_dir == REQ_ASYNC && !reads) {
1030 if (ad->current_write_count == 0 || !writes)
1031 ad->write_batch_idled = 1;
1032 }
1033
1034 if (!(reads || writes)
1035 || ad->antic_status == ANTIC_WAIT_REQ
1036 || ad->antic_status == ANTIC_WAIT_NEXT
1037 || ad->changed_batch)
1038 return 0;
1039
f5b3db00 1040 if (!(reads && writes && as_batch_expired(ad))) {
1da177e4
LT
1041 /*
1042 * batch is still running or no reads or no writes
1043 */
1044 arq = ad->next_arq[ad->batch_data_dir];
1045
1046 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
1047 if (as_fifo_expired(ad, REQ_SYNC))
1048 goto fifo_expired;
1049
1050 if (as_can_anticipate(ad, arq)) {
1051 as_antic_waitreq(ad);
1052 return 0;
1053 }
1054 }
1055
1056 if (arq) {
1057 /* we have a "next request" */
1058 if (reads && !writes)
1059 ad->current_batch_expires =
1060 jiffies + ad->batch_expire[REQ_SYNC];
1061 goto dispatch_request;
1062 }
1063 }
1064
1065 /*
1066 * at this point we are not running a batch. select the appropriate
1067 * data direction (read / write)
1068 */
1069
1070 if (reads) {
dd67d051 1071 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
1da177e4
LT
1072
1073 if (writes && ad->batch_data_dir == REQ_SYNC)
1074 /*
1075 * Last batch was a read, switch to writes
1076 */
1077 goto dispatch_writes;
1078
1079 if (ad->batch_data_dir == REQ_ASYNC) {
1080 WARN_ON(ad->new_batch);
1081 ad->changed_batch = 1;
1082 }
1083 ad->batch_data_dir = REQ_SYNC;
d4f2f462 1084 arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[REQ_SYNC].next));
1da177e4
LT
1085 ad->last_check_fifo[ad->batch_data_dir] = jiffies;
1086 goto dispatch_request;
1087 }
1088
1089 /*
1090 * the last batch was a read
1091 */
1092
1093 if (writes) {
1094dispatch_writes:
dd67d051 1095 BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
1da177e4
LT
1096
1097 if (ad->batch_data_dir == REQ_SYNC) {
1098 ad->changed_batch = 1;
1099
1100 /*
1101 * new_batch might be 1 when the queue runs out of
1102 * reads. A subsequent submission of a write might
1103 * cause a change of batch before the read is finished.
1104 */
1105 ad->new_batch = 0;
1106 }
1107 ad->batch_data_dir = REQ_ASYNC;
1108 ad->current_write_count = ad->write_batch_count;
1109 ad->write_batch_idled = 0;
1110 arq = ad->next_arq[ad->batch_data_dir];
1111 goto dispatch_request;
1112 }
1113
1114 BUG();
1115 return 0;
1116
1117dispatch_request:
1118 /*
1119 * If a request has expired, service it.
1120 */
1121
1122 if (as_fifo_expired(ad, ad->batch_data_dir)) {
1123fifo_expired:
d4f2f462 1124 arq = RQ_DATA(rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next));
1da177e4
LT
1125 }
1126
1127 if (ad->changed_batch) {
1128 WARN_ON(ad->new_batch);
1129
1130 if (ad->nr_dispatched)
1131 return 0;
1132
1133 if (ad->batch_data_dir == REQ_ASYNC)
1134 ad->current_batch_expires = jiffies +
1135 ad->batch_expire[REQ_ASYNC];
1136 else
1137 ad->new_batch = 1;
1138
1139 ad->changed_batch = 0;
1140 }
1141
1142 /*
1143 * arq is the selected appropriate request.
1144 */
1145 as_move_to_dispatch(ad, arq);
1146
1147 return 1;
1148}
1149
1da177e4
LT
1150/*
1151 * add arq to rbtree and fifo
1152 */
b4878f24 1153static void as_add_request(request_queue_t *q, struct request *rq)
1da177e4 1154{
b4878f24
JA
1155 struct as_data *ad = q->elevator->elevator_data;
1156 struct as_rq *arq = RQ_DATA(rq);
1da177e4
LT
1157 int data_dir;
1158
b4878f24
JA
1159 arq->state = AS_RQ_NEW;
1160
1da177e4 1161 if (rq_data_dir(arq->request) == READ
4aff5e23 1162 || (arq->request->cmd_flags & REQ_RW_SYNC))
1da177e4
LT
1163 arq->is_sync = 1;
1164 else
1165 arq->is_sync = 0;
1166 data_dir = arq->is_sync;
1167
1168 arq->io_context = as_get_io_context();
1169
1170 if (arq->io_context) {
1171 as_update_iohist(ad, arq->io_context->aic, arq->request);
1172 atomic_inc(&arq->io_context->aic->nr_queued);
1173 }
1174
e37f346e 1175 as_add_arq_rb(ad, rq);
1da177e4 1176
ef9be1d3
TH
1177 /*
1178 * set expire time (only used for reads) and add to fifo list
1179 */
d4f2f462
JA
1180 rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
1181 list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
1da177e4 1182
ef9be1d3 1183 as_update_arq(ad, arq); /* keep state machine up to date */
1da177e4
LT
1184 arq->state = AS_RQ_QUEUED;
1185}
1186
b4878f24 1187static void as_activate_request(request_queue_t *q, struct request *rq)
1da177e4 1188{
1da177e4
LT
1189 struct as_rq *arq = RQ_DATA(rq);
1190
b4878f24
JA
1191 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1192 arq->state = AS_RQ_REMOVED;
1193 if (arq->io_context && arq->io_context->aic)
1194 atomic_dec(&arq->io_context->aic->nr_dispatched);
1da177e4
LT
1195}
1196
b4878f24 1197static void as_deactivate_request(request_queue_t *q, struct request *rq)
1da177e4 1198{
1da177e4
LT
1199 struct as_rq *arq = RQ_DATA(rq);
1200
b4878f24
JA
1201 WARN_ON(arq->state != AS_RQ_REMOVED);
1202 arq->state = AS_RQ_DISPATCHED;
1203 if (arq->io_context && arq->io_context->aic)
1204 atomic_inc(&arq->io_context->aic->nr_dispatched);
1da177e4
LT
1205}
1206
1207/*
1208 * as_queue_empty tells us if there are requests left in the device. It may
1209 * not be the case that a driver can get the next request even if the queue
1210 * is not empty - it is used in the block layer to check for plugging and
1211 * merging opportunities
1212 */
1213static int as_queue_empty(request_queue_t *q)
1214{
1215 struct as_data *ad = q->elevator->elevator_data;
1216
b4878f24
JA
1217 return list_empty(&ad->fifo_list[REQ_ASYNC])
1218 && list_empty(&ad->fifo_list[REQ_SYNC]);
1da177e4
LT
1219}
1220
1da177e4
LT
1221static int
1222as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1223{
1224 struct as_data *ad = q->elevator->elevator_data;
1225 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
1226 struct request *__rq;
1da177e4
LT
1227
1228 /*
1229 * check for front merge
1230 */
e37f346e 1231 __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key);
9817064b
JA
1232 if (__rq && elv_rq_merge_ok(__rq, bio)) {
1233 *req = __rq;
1234 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
1235 }
1236
1237 return ELEVATOR_NO_MERGE;
1da177e4
LT
1238}
1239
e37f346e 1240static void as_merged_request(request_queue_t *q, struct request *req, int type)
1da177e4
LT
1241{
1242 struct as_data *ad = q->elevator->elevator_data;
1da177e4 1243
1da177e4
LT
1244 /*
1245 * if the merge was a front merge, we need to reposition request
1246 */
e37f346e
JA
1247 if (type == ELEVATOR_FRONT_MERGE) {
1248 as_del_arq_rb(ad, req);
1249 as_add_arq_rb(ad, req);
1da177e4
LT
1250 /*
1251 * Note! At this stage of this and the next function, our next
1252 * request may not be optimal - eg the request may have "grown"
1253 * behind the disk head. We currently don't bother adjusting.
1254 */
1255 }
1da177e4
LT
1256}
1257
f5b3db00
NP
1258static void as_merged_requests(request_queue_t *q, struct request *req,
1259 struct request *next)
1da177e4 1260{
1da177e4
LT
1261 struct as_rq *arq = RQ_DATA(req);
1262 struct as_rq *anext = RQ_DATA(next);
1263
1264 BUG_ON(!arq);
1265 BUG_ON(!anext);
1266
1da177e4
LT
1267 /*
1268 * if anext expires before arq, assign its expire time to arq
1269 * and move into anext position (anext will be deleted) in fifo
1270 */
d4f2f462
JA
1271 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
1272 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
1273 list_move(&req->queuelist, &next->queuelist);
1274 rq_set_fifo_time(req, rq_fifo_time(next));
1da177e4
LT
1275 /*
1276 * Don't copy here but swap, because when anext is
1277 * removed below, it must contain the unused context
1278 */
1279 swap_io_context(&arq->io_context, &anext->io_context);
1280 }
1281 }
1282
1da177e4
LT
1283 /*
1284 * kill knowledge of next, this one is a goner
1285 */
1286 as_remove_queued_request(q, next);
b4878f24 1287 as_put_io_context(anext);
1da177e4
LT
1288
1289 anext->state = AS_RQ_MERGED;
1290}
1291
1292/*
1293 * This is executed in a "deferred" process context, by kblockd. It calls the
1294 * driver's request_fn so the driver can submit that request.
1295 *
1296 * IMPORTANT! This guy will reenter the elevator, so set up all queue global
1297 * state before calling, and don't rely on any state over calls.
1298 *
1299 * FIXME! dispatch queue is not a queue at all!
1300 */
1301static void as_work_handler(void *data)
1302{
1303 struct request_queue *q = data;
1304 unsigned long flags;
1305
1306 spin_lock_irqsave(q->queue_lock, flags);
b4878f24 1307 if (!as_queue_empty(q))
1da177e4
LT
1308 q->request_fn(q);
1309 spin_unlock_irqrestore(q->queue_lock, flags);
1310}
1311
1312static void as_put_request(request_queue_t *q, struct request *rq)
1313{
1314 struct as_data *ad = q->elevator->elevator_data;
1315 struct as_rq *arq = RQ_DATA(rq);
1316
1317 if (!arq) {
1318 WARN_ON(1);
1319 return;
1320 }
1321
b4878f24
JA
1322 if (unlikely(arq->state != AS_RQ_POSTSCHED &&
1323 arq->state != AS_RQ_PRESCHED &&
1324 arq->state != AS_RQ_MERGED)) {
1da177e4
LT
1325 printk("arq->state %d\n", arq->state);
1326 WARN_ON(1);
1327 }
1328
1329 mempool_free(arq, ad->arq_pool);
1330 rq->elevator_private = NULL;
1331}
1332
22e2c507 1333static int as_set_request(request_queue_t *q, struct request *rq,
8267e268 1334 struct bio *bio, gfp_t gfp_mask)
1da177e4
LT
1335{
1336 struct as_data *ad = q->elevator->elevator_data;
1337 struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
1338
1339 if (arq) {
1340 memset(arq, 0, sizeof(*arq));
1da177e4
LT
1341 arq->request = rq;
1342 arq->state = AS_RQ_PRESCHED;
1343 arq->io_context = NULL;
1da177e4
LT
1344 rq->elevator_private = arq;
1345 return 0;
1346 }
1347
1348 return 1;
1349}
1350
22e2c507 1351static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
1da177e4
LT
1352{
1353 int ret = ELV_MQUEUE_MAY;
1354 struct as_data *ad = q->elevator->elevator_data;
1355 struct io_context *ioc;
1356 if (ad->antic_status == ANTIC_WAIT_REQ ||
1357 ad->antic_status == ANTIC_WAIT_NEXT) {
1358 ioc = as_get_io_context();
1359 if (ad->io_context == ioc)
1360 ret = ELV_MQUEUE_MUST;
1361 put_io_context(ioc);
1362 }
1363
1364 return ret;
1365}
1366
1367static void as_exit_queue(elevator_t *e)
1368{
1369 struct as_data *ad = e->elevator_data;
1370
1371 del_timer_sync(&ad->antic_timer);
1372 kblockd_flush();
1373
1374 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
1375 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
1376
1377 mempool_destroy(ad->arq_pool);
1378 put_io_context(ad->io_context);
1da177e4
LT
1379 kfree(ad);
1380}
1381
1382/*
1383 * initialize elevator private data (as_data), and alloc a arq for
1384 * each request on the free lists
1385 */
bc1c1169 1386static void *as_init_queue(request_queue_t *q, elevator_t *e)
1da177e4
LT
1387{
1388 struct as_data *ad;
1da177e4
LT
1389
1390 if (!arq_pool)
bc1c1169 1391 return NULL;
1da177e4 1392
1946089a 1393 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
1da177e4 1394 if (!ad)
bc1c1169 1395 return NULL;
1da177e4
LT
1396 memset(ad, 0, sizeof(*ad));
1397
1398 ad->q = q; /* Identify what queue the data belongs to */
1399
1946089a
CL
1400 ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
1401 mempool_free_slab, arq_pool, q->node);
1da177e4 1402 if (!ad->arq_pool) {
1da177e4 1403 kfree(ad);
bc1c1169 1404 return NULL;
1da177e4
LT
1405 }
1406
1407 /* anticipatory scheduling helpers */
1408 ad->antic_timer.function = as_antic_timeout;
1409 ad->antic_timer.data = (unsigned long)q;
1410 init_timer(&ad->antic_timer);
1411 INIT_WORK(&ad->antic_work, as_work_handler, q);
1412
1da177e4
LT
1413 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1414 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1415 ad->sort_list[REQ_SYNC] = RB_ROOT;
1416 ad->sort_list[REQ_ASYNC] = RB_ROOT;
1da177e4
LT
1417 ad->fifo_expire[REQ_SYNC] = default_read_expire;
1418 ad->fifo_expire[REQ_ASYNC] = default_write_expire;
1419 ad->antic_expire = default_antic_expire;
1420 ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
1421 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
1da177e4
LT
1422
1423 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
1424 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
1425 if (ad->write_batch_count < 2)
1426 ad->write_batch_count = 2;
1427
bc1c1169 1428 return ad;
1da177e4
LT
1429}
1430
1431/*
1432 * sysfs parts below
1433 */
1da177e4
LT
1434
1435static ssize_t
1436as_var_show(unsigned int var, char *page)
1437{
1da177e4
LT
1438 return sprintf(page, "%d\n", var);
1439}
1440
1441static ssize_t
1442as_var_store(unsigned long *var, const char *page, size_t count)
1443{
1da177e4
LT
1444 char *p = (char *) page;
1445
c9b3ad67 1446 *var = simple_strtoul(p, &p, 10);
1da177e4
LT
1447 return count;
1448}
1449
e572ec7e 1450static ssize_t est_time_show(elevator_t *e, char *page)
1da177e4 1451{
3d1ab40f 1452 struct as_data *ad = e->elevator_data;
1da177e4
LT
1453 int pos = 0;
1454
f5b3db00
NP
1455 pos += sprintf(page+pos, "%lu %% exit probability\n",
1456 100*ad->exit_prob/256);
1457 pos += sprintf(page+pos, "%lu %% probability of exiting without a "
1458 "cooperating process submitting IO\n",
1459 100*ad->exit_no_coop/256);
1da177e4 1460 pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
f5b3db00
NP
1461 pos += sprintf(page+pos, "%llu sectors new seek distance\n",
1462 (unsigned long long)ad->new_seek_mean);
1da177e4
LT
1463
1464 return pos;
1465}
1466
1467#define SHOW_FUNCTION(__FUNC, __VAR) \
3d1ab40f 1468static ssize_t __FUNC(elevator_t *e, char *page) \
1da177e4 1469{ \
3d1ab40f 1470 struct as_data *ad = e->elevator_data; \
1da177e4
LT
1471 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1472}
e572ec7e
AV
1473SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
1474SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
1475SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
1476SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
1477SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
1da177e4
LT
1478#undef SHOW_FUNCTION
1479
1480#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
3d1ab40f 1481static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
1da177e4 1482{ \
3d1ab40f
AV
1483 struct as_data *ad = e->elevator_data; \
1484 int ret = as_var_store(__PTR, (page), count); \
1da177e4
LT
1485 if (*(__PTR) < (MIN)) \
1486 *(__PTR) = (MIN); \
1487 else if (*(__PTR) > (MAX)) \
1488 *(__PTR) = (MAX); \
1489 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1490 return ret; \
1491}
e572ec7e
AV
1492STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
1493STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
1494STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
1495STORE_FUNCTION(as_read_batch_expire_store,
1da177e4 1496 &ad->batch_expire[REQ_SYNC], 0, INT_MAX);
e572ec7e 1497STORE_FUNCTION(as_write_batch_expire_store,
1da177e4
LT
1498 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
1499#undef STORE_FUNCTION
1500
e572ec7e
AV
1501#define AS_ATTR(name) \
1502 __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
1503
1504static struct elv_fs_entry as_attrs[] = {
1505 __ATTR_RO(est_time),
1506 AS_ATTR(read_expire),
1507 AS_ATTR(write_expire),
1508 AS_ATTR(antic_expire),
1509 AS_ATTR(read_batch_expire),
1510 AS_ATTR(write_batch_expire),
1511 __ATTR_NULL
1da177e4
LT
1512};
1513
1da177e4
LT
1514static struct elevator_type iosched_as = {
1515 .ops = {
1516 .elevator_merge_fn = as_merge,
1517 .elevator_merged_fn = as_merged_request,
1518 .elevator_merge_req_fn = as_merged_requests,
b4878f24
JA
1519 .elevator_dispatch_fn = as_dispatch_request,
1520 .elevator_add_req_fn = as_add_request,
1521 .elevator_activate_req_fn = as_activate_request,
1da177e4
LT
1522 .elevator_deactivate_req_fn = as_deactivate_request,
1523 .elevator_queue_empty_fn = as_queue_empty,
1524 .elevator_completed_req_fn = as_completed_request,
e37f346e
JA
1525 .elevator_former_req_fn = elv_rb_former_request,
1526 .elevator_latter_req_fn = elv_rb_latter_request,
1da177e4
LT
1527 .elevator_set_req_fn = as_set_request,
1528 .elevator_put_req_fn = as_put_request,
1529 .elevator_may_queue_fn = as_may_queue,
1530 .elevator_init_fn = as_init_queue,
1531 .elevator_exit_fn = as_exit_queue,
e17a9489 1532 .trim = as_trim,
1da177e4
LT
1533 },
1534
3d1ab40f 1535 .elevator_attrs = as_attrs,
1da177e4
LT
1536 .elevator_name = "anticipatory",
1537 .elevator_owner = THIS_MODULE,
1538};
1539
1540static int __init as_init(void)
1541{
1542 int ret;
1543
1544 arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
1545 0, 0, NULL, NULL);
1546 if (!arq_pool)
1547 return -ENOMEM;
1548
1549 ret = elv_register(&iosched_as);
1550 if (!ret) {
1551 /*
1552 * don't allow AS to get unregistered, since we would have
1553 * to browse all tasks in the system and release their
1554 * as_io_context first
1555 */
1556 __module_get(THIS_MODULE);
1557 return 0;
1558 }
1559
1560 kmem_cache_destroy(arq_pool);
1561 return ret;
1562}
1563
1564static void __exit as_exit(void)
1565{
334e94de 1566 DECLARE_COMPLETION(all_gone);
1da177e4 1567 elv_unregister(&iosched_as);
334e94de 1568 ioc_gone = &all_gone;
fba82272
OH
1569 /* ioc_gone's update must be visible before reading ioc_count */
1570 smp_wmb();
334e94de 1571 if (atomic_read(&ioc_count))
fba82272 1572 wait_for_completion(ioc_gone);
334e94de 1573 synchronize_rcu();
83521d3e 1574 kmem_cache_destroy(arq_pool);
1da177e4
LT
1575}
1576
1577module_init(as_init);
1578module_exit(as_exit);
1579
1580MODULE_AUTHOR("Nick Piggin");
1581MODULE_LICENSE("GPL");
1582MODULE_DESCRIPTION("anticipatory IO scheduler");
This page took 0.230381 seconds and 5 git commands to generate.