blk-cgroup: be able to remove the record of unplugged device
[deliverable/linux.git] / block / blk-core.c
1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11 /*
12 * This handles all read/write requests to block devices
13 */
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/backing-dev.h>
17 #include <linux/bio.h>
18 #include <linux/blkdev.h>
19 #include <linux/highmem.h>
20 #include <linux/mm.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/string.h>
23 #include <linux/init.h>
24 #include <linux/completion.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
28 #include <linux/task_io_accounting_ops.h>
29 #include <linux/fault-inject.h>
30 #include <linux/list_sort.h>
31
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/block.h>
34
35 #include "blk.h"
36
37 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
39 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
40
41 static int __make_request(struct request_queue *q, struct bio *bio);
42
43 /*
44 * For the allocated request tables
45 */
46 static struct kmem_cache *request_cachep;
47
48 /*
49 * For queue allocation
50 */
51 struct kmem_cache *blk_requestq_cachep;
52
53 /*
54 * Controlling structure to kblockd
55 */
56 static struct workqueue_struct *kblockd_workqueue;
57
58 static void drive_stat_acct(struct request *rq, int new_io)
59 {
60 struct hd_struct *part;
61 int rw = rq_data_dir(rq);
62 int cpu;
63
64 if (!blk_do_io_stat(rq))
65 return;
66
67 cpu = part_stat_lock();
68
69 if (!new_io) {
70 part = rq->part;
71 part_stat_inc(cpu, part, merges[rw]);
72 } else {
73 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
74 if (!hd_struct_try_get(part)) {
75 /*
76 * The partition is already being removed,
77 * the request will be accounted on the disk only
78 *
79 * We take a reference on disk->part0 although that
80 * partition will never be deleted, so we can treat
81 * it as any other partition.
82 */
83 part = &rq->rq_disk->part0;
84 hd_struct_get(part);
85 }
86 part_round_stats(cpu, part);
87 part_inc_in_flight(part, rw);
88 rq->part = part;
89 }
90
91 part_stat_unlock();
92 }
93
94 void blk_queue_congestion_threshold(struct request_queue *q)
95 {
96 int nr;
97
98 nr = q->nr_requests - (q->nr_requests / 8) + 1;
99 if (nr > q->nr_requests)
100 nr = q->nr_requests;
101 q->nr_congestion_on = nr;
102
103 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
104 if (nr < 1)
105 nr = 1;
106 q->nr_congestion_off = nr;
107 }
108
109 /**
110 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
111 * @bdev: device
112 *
113 * Locates the passed device's request queue and returns the address of its
114 * backing_dev_info
115 *
116 * Will return NULL if the request queue cannot be located.
117 */
118 struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
119 {
120 struct backing_dev_info *ret = NULL;
121 struct request_queue *q = bdev_get_queue(bdev);
122
123 if (q)
124 ret = &q->backing_dev_info;
125 return ret;
126 }
127 EXPORT_SYMBOL(blk_get_backing_dev_info);
128
129 void blk_rq_init(struct request_queue *q, struct request *rq)
130 {
131 memset(rq, 0, sizeof(*rq));
132
133 INIT_LIST_HEAD(&rq->queuelist);
134 INIT_LIST_HEAD(&rq->timeout_list);
135 rq->cpu = -1;
136 rq->q = q;
137 rq->__sector = (sector_t) -1;
138 INIT_HLIST_NODE(&rq->hash);
139 RB_CLEAR_NODE(&rq->rb_node);
140 rq->cmd = rq->__cmd;
141 rq->cmd_len = BLK_MAX_CDB;
142 rq->tag = -1;
143 rq->ref_count = 1;
144 rq->start_time = jiffies;
145 set_start_time_ns(rq);
146 rq->part = NULL;
147 }
148 EXPORT_SYMBOL(blk_rq_init);
149
150 static void req_bio_endio(struct request *rq, struct bio *bio,
151 unsigned int nbytes, int error)
152 {
153 if (error)
154 clear_bit(BIO_UPTODATE, &bio->bi_flags);
155 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
156 error = -EIO;
157
158 if (unlikely(nbytes > bio->bi_size)) {
159 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
160 __func__, nbytes, bio->bi_size);
161 nbytes = bio->bi_size;
162 }
163
164 if (unlikely(rq->cmd_flags & REQ_QUIET))
165 set_bit(BIO_QUIET, &bio->bi_flags);
166
167 bio->bi_size -= nbytes;
168 bio->bi_sector += (nbytes >> 9);
169
170 if (bio_integrity(bio))
171 bio_integrity_advance(bio, nbytes);
172
173 /* don't actually finish bio if it's part of flush sequence */
174 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
175 bio_endio(bio, error);
176 }
177
178 void blk_dump_rq_flags(struct request *rq, char *msg)
179 {
180 int bit;
181
182 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
183 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
184 rq->cmd_flags);
185
186 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
187 (unsigned long long)blk_rq_pos(rq),
188 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
189 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
190 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
191
192 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
193 printk(KERN_INFO " cdb: ");
194 for (bit = 0; bit < BLK_MAX_CDB; bit++)
195 printk("%02x ", rq->cmd[bit]);
196 printk("\n");
197 }
198 }
199 EXPORT_SYMBOL(blk_dump_rq_flags);
200
201 static void blk_delay_work(struct work_struct *work)
202 {
203 struct request_queue *q;
204
205 q = container_of(work, struct request_queue, delay_work.work);
206 spin_lock_irq(q->queue_lock);
207 __blk_run_queue(q);
208 spin_unlock_irq(q->queue_lock);
209 }
210
211 /**
212 * blk_delay_queue - restart queueing after defined interval
213 * @q: The &struct request_queue in question
214 * @msecs: Delay in msecs
215 *
216 * Description:
217 * Sometimes queueing needs to be postponed for a little while, to allow
218 * resources to come back. This function will make sure that queueing is
219 * restarted around the specified time.
220 */
221 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
222 {
223 queue_delayed_work(kblockd_workqueue, &q->delay_work,
224 msecs_to_jiffies(msecs));
225 }
226 EXPORT_SYMBOL(blk_delay_queue);
227
228 /**
229 * blk_start_queue - restart a previously stopped queue
230 * @q: The &struct request_queue in question
231 *
232 * Description:
233 * blk_start_queue() will clear the stop flag on the queue, and call
234 * the request_fn for the queue if it was in a stopped state when
235 * entered. Also see blk_stop_queue(). Queue lock must be held.
236 **/
237 void blk_start_queue(struct request_queue *q)
238 {
239 WARN_ON(!irqs_disabled());
240
241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
242 __blk_run_queue(q);
243 }
244 EXPORT_SYMBOL(blk_start_queue);
245
246 /**
247 * blk_stop_queue - stop a queue
248 * @q: The &struct request_queue in question
249 *
250 * Description:
251 * The Linux block layer assumes that a block driver will consume all
252 * entries on the request queue when the request_fn strategy is called.
253 * Often this will not happen, because of hardware limitations (queue
254 * depth settings). If a device driver gets a 'queue full' response,
255 * or if it simply chooses not to queue more I/O at one point, it can
256 * call this function to prevent the request_fn from being called until
257 * the driver has signalled it's ready to go again. This happens by calling
258 * blk_start_queue() to restart queue operations. Queue lock must be held.
259 **/
260 void blk_stop_queue(struct request_queue *q)
261 {
262 __cancel_delayed_work(&q->delay_work);
263 queue_flag_set(QUEUE_FLAG_STOPPED, q);
264 }
265 EXPORT_SYMBOL(blk_stop_queue);
266
267 /**
268 * blk_sync_queue - cancel any pending callbacks on a queue
269 * @q: the queue
270 *
271 * Description:
272 * The block layer may perform asynchronous callback activity
273 * on a queue, such as calling the unplug function after a timeout.
274 * A block device may call blk_sync_queue to ensure that any
275 * such activity is cancelled, thus allowing it to release resources
276 * that the callbacks might use. The caller must already have made sure
277 * that its ->make_request_fn will not re-add plugging prior to calling
278 * this function.
279 *
280 * This function does not cancel any asynchronous activity arising
281 * out of elevator or throttling code. That would require elevaotor_exit()
282 * and blk_throtl_exit() to be called with queue lock initialized.
283 *
284 */
285 void blk_sync_queue(struct request_queue *q)
286 {
287 del_timer_sync(&q->timeout);
288 cancel_delayed_work_sync(&q->delay_work);
289 }
290 EXPORT_SYMBOL(blk_sync_queue);
291
292 /**
293 * __blk_run_queue - run a single device queue
294 * @q: The queue to run
295 *
296 * Description:
297 * See @blk_run_queue. This variant must be called with the queue lock
298 * held and interrupts disabled.
299 */
300 void __blk_run_queue(struct request_queue *q)
301 {
302 if (unlikely(blk_queue_stopped(q)))
303 return;
304
305 q->request_fn(q);
306 }
307 EXPORT_SYMBOL(__blk_run_queue);
308
309 /**
310 * blk_run_queue_async - run a single device queue in workqueue context
311 * @q: The queue to run
312 *
313 * Description:
314 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
315 * of us.
316 */
317 void blk_run_queue_async(struct request_queue *q)
318 {
319 if (likely(!blk_queue_stopped(q))) {
320 __cancel_delayed_work(&q->delay_work);
321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
322 }
323 }
324 EXPORT_SYMBOL(blk_run_queue_async);
325
326 /**
327 * blk_run_queue - run a single device queue
328 * @q: The queue to run
329 *
330 * Description:
331 * Invoke request handling on this queue, if it has pending work to do.
332 * May be used to restart queueing when a request has completed.
333 */
334 void blk_run_queue(struct request_queue *q)
335 {
336 unsigned long flags;
337
338 spin_lock_irqsave(q->queue_lock, flags);
339 __blk_run_queue(q);
340 spin_unlock_irqrestore(q->queue_lock, flags);
341 }
342 EXPORT_SYMBOL(blk_run_queue);
343
344 void blk_put_queue(struct request_queue *q)
345 {
346 kobject_put(&q->kobj);
347 }
348 EXPORT_SYMBOL(blk_put_queue);
349
350 /*
351 * Note: If a driver supplied the queue lock, it should not zap that lock
352 * unexpectedly as some queue cleanup components like elevator_exit() and
353 * blk_throtl_exit() need queue lock.
354 */
355 void blk_cleanup_queue(struct request_queue *q)
356 {
357 /*
358 * We know we have process context here, so we can be a little
359 * cautious and ensure that pending block actions on this device
360 * are done before moving on. Going into this function, we should
361 * not have processes doing IO to this device.
362 */
363 blk_sync_queue(q);
364
365 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
366 mutex_lock(&q->sysfs_lock);
367 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
368 mutex_unlock(&q->sysfs_lock);
369
370 if (q->elevator)
371 elevator_exit(q->elevator);
372
373 blk_throtl_exit(q);
374
375 blk_put_queue(q);
376 }
377 EXPORT_SYMBOL(blk_cleanup_queue);
378
379 static int blk_init_free_list(struct request_queue *q)
380 {
381 struct request_list *rl = &q->rq;
382
383 if (unlikely(rl->rq_pool))
384 return 0;
385
386 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
387 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
388 rl->elvpriv = 0;
389 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
390 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
391
392 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
393 mempool_free_slab, request_cachep, q->node);
394
395 if (!rl->rq_pool)
396 return -ENOMEM;
397
398 return 0;
399 }
400
401 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
402 {
403 return blk_alloc_queue_node(gfp_mask, -1);
404 }
405 EXPORT_SYMBOL(blk_alloc_queue);
406
407 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
408 {
409 struct request_queue *q;
410 int err;
411
412 q = kmem_cache_alloc_node(blk_requestq_cachep,
413 gfp_mask | __GFP_ZERO, node_id);
414 if (!q)
415 return NULL;
416
417 q->backing_dev_info.ra_pages =
418 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
419 q->backing_dev_info.state = 0;
420 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
421 q->backing_dev_info.name = "block";
422
423 err = bdi_init(&q->backing_dev_info);
424 if (err) {
425 kmem_cache_free(blk_requestq_cachep, q);
426 return NULL;
427 }
428
429 if (blk_throtl_init(q)) {
430 kmem_cache_free(blk_requestq_cachep, q);
431 return NULL;
432 }
433
434 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
435 laptop_mode_timer_fn, (unsigned long) q);
436 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
437 INIT_LIST_HEAD(&q->timeout_list);
438 INIT_LIST_HEAD(&q->flush_queue[0]);
439 INIT_LIST_HEAD(&q->flush_queue[1]);
440 INIT_LIST_HEAD(&q->flush_data_in_flight);
441 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
442
443 kobject_init(&q->kobj, &blk_queue_ktype);
444
445 mutex_init(&q->sysfs_lock);
446 spin_lock_init(&q->__queue_lock);
447
448 /*
449 * By default initialize queue_lock to internal lock and driver can
450 * override it later if need be.
451 */
452 q->queue_lock = &q->__queue_lock;
453
454 return q;
455 }
456 EXPORT_SYMBOL(blk_alloc_queue_node);
457
458 /**
459 * blk_init_queue - prepare a request queue for use with a block device
460 * @rfn: The function to be called to process requests that have been
461 * placed on the queue.
462 * @lock: Request queue spin lock
463 *
464 * Description:
465 * If a block device wishes to use the standard request handling procedures,
466 * which sorts requests and coalesces adjacent requests, then it must
467 * call blk_init_queue(). The function @rfn will be called when there
468 * are requests on the queue that need to be processed. If the device
469 * supports plugging, then @rfn may not be called immediately when requests
470 * are available on the queue, but may be called at some time later instead.
471 * Plugged queues are generally unplugged when a buffer belonging to one
472 * of the requests on the queue is needed, or due to memory pressure.
473 *
474 * @rfn is not required, or even expected, to remove all requests off the
475 * queue, but only as many as it can handle at a time. If it does leave
476 * requests on the queue, it is responsible for arranging that the requests
477 * get dealt with eventually.
478 *
479 * The queue spin lock must be held while manipulating the requests on the
480 * request queue; this lock will be taken also from interrupt context, so irq
481 * disabling is needed for it.
482 *
483 * Function returns a pointer to the initialized request queue, or %NULL if
484 * it didn't succeed.
485 *
486 * Note:
487 * blk_init_queue() must be paired with a blk_cleanup_queue() call
488 * when the block device is deactivated (such as at module unload).
489 **/
490
491 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
492 {
493 return blk_init_queue_node(rfn, lock, -1);
494 }
495 EXPORT_SYMBOL(blk_init_queue);
496
497 struct request_queue *
498 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
499 {
500 struct request_queue *uninit_q, *q;
501
502 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
503 if (!uninit_q)
504 return NULL;
505
506 q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
507 if (!q)
508 blk_cleanup_queue(uninit_q);
509
510 return q;
511 }
512 EXPORT_SYMBOL(blk_init_queue_node);
513
514 struct request_queue *
515 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
516 spinlock_t *lock)
517 {
518 return blk_init_allocated_queue_node(q, rfn, lock, -1);
519 }
520 EXPORT_SYMBOL(blk_init_allocated_queue);
521
522 struct request_queue *
523 blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
524 spinlock_t *lock, int node_id)
525 {
526 if (!q)
527 return NULL;
528
529 q->node = node_id;
530 if (blk_init_free_list(q))
531 return NULL;
532
533 q->request_fn = rfn;
534 q->prep_rq_fn = NULL;
535 q->unprep_rq_fn = NULL;
536 q->queue_flags = QUEUE_FLAG_DEFAULT;
537
538 /* Override internal queue lock with supplied lock pointer */
539 if (lock)
540 q->queue_lock = lock;
541
542 /*
543 * This also sets hw/phys segments, boundary and size
544 */
545 blk_queue_make_request(q, __make_request);
546
547 q->sg_reserved_size = INT_MAX;
548
549 /*
550 * all done
551 */
552 if (!elevator_init(q, NULL)) {
553 blk_queue_congestion_threshold(q);
554 return q;
555 }
556
557 return NULL;
558 }
559 EXPORT_SYMBOL(blk_init_allocated_queue_node);
560
561 int blk_get_queue(struct request_queue *q)
562 {
563 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
564 kobject_get(&q->kobj);
565 return 0;
566 }
567
568 return 1;
569 }
570 EXPORT_SYMBOL(blk_get_queue);
571
572 static inline void blk_free_request(struct request_queue *q, struct request *rq)
573 {
574 if (rq->cmd_flags & REQ_ELVPRIV)
575 elv_put_request(q, rq);
576 mempool_free(rq, q->rq.rq_pool);
577 }
578
579 static struct request *
580 blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
581 {
582 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
583
584 if (!rq)
585 return NULL;
586
587 blk_rq_init(q, rq);
588
589 rq->cmd_flags = flags | REQ_ALLOCED;
590
591 if (priv) {
592 if (unlikely(elv_set_request(q, rq, gfp_mask))) {
593 mempool_free(rq, q->rq.rq_pool);
594 return NULL;
595 }
596 rq->cmd_flags |= REQ_ELVPRIV;
597 }
598
599 return rq;
600 }
601
602 /*
603 * ioc_batching returns true if the ioc is a valid batching request and
604 * should be given priority access to a request.
605 */
606 static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
607 {
608 if (!ioc)
609 return 0;
610
611 /*
612 * Make sure the process is able to allocate at least 1 request
613 * even if the batch times out, otherwise we could theoretically
614 * lose wakeups.
615 */
616 return ioc->nr_batch_requests == q->nr_batching ||
617 (ioc->nr_batch_requests > 0
618 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
619 }
620
621 /*
622 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
623 * will cause the process to be a "batcher" on all queues in the system. This
624 * is the behaviour we want though - once it gets a wakeup it should be given
625 * a nice run.
626 */
627 static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
628 {
629 if (!ioc || ioc_batching(q, ioc))
630 return;
631
632 ioc->nr_batch_requests = q->nr_batching;
633 ioc->last_waited = jiffies;
634 }
635
636 static void __freed_request(struct request_queue *q, int sync)
637 {
638 struct request_list *rl = &q->rq;
639
640 if (rl->count[sync] < queue_congestion_off_threshold(q))
641 blk_clear_queue_congested(q, sync);
642
643 if (rl->count[sync] + 1 <= q->nr_requests) {
644 if (waitqueue_active(&rl->wait[sync]))
645 wake_up(&rl->wait[sync]);
646
647 blk_clear_queue_full(q, sync);
648 }
649 }
650
651 /*
652 * A request has just been released. Account for it, update the full and
653 * congestion status, wake up any waiters. Called under q->queue_lock.
654 */
655 static void freed_request(struct request_queue *q, int sync, int priv)
656 {
657 struct request_list *rl = &q->rq;
658
659 rl->count[sync]--;
660 if (priv)
661 rl->elvpriv--;
662
663 __freed_request(q, sync);
664
665 if (unlikely(rl->starved[sync ^ 1]))
666 __freed_request(q, sync ^ 1);
667 }
668
669 /*
670 * Determine if elevator data should be initialized when allocating the
671 * request associated with @bio.
672 */
673 static bool blk_rq_should_init_elevator(struct bio *bio)
674 {
675 if (!bio)
676 return true;
677
678 /*
679 * Flush requests do not use the elevator so skip initialization.
680 * This allows a request to share the flush and elevator data.
681 */
682 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
683 return false;
684
685 return true;
686 }
687
688 /*
689 * Get a free request, queue_lock must be held.
690 * Returns NULL on failure, with queue_lock held.
691 * Returns !NULL on success, with queue_lock *not held*.
692 */
693 static struct request *get_request(struct request_queue *q, int rw_flags,
694 struct bio *bio, gfp_t gfp_mask)
695 {
696 struct request *rq = NULL;
697 struct request_list *rl = &q->rq;
698 struct io_context *ioc = NULL;
699 const bool is_sync = rw_is_sync(rw_flags) != 0;
700 int may_queue, priv = 0;
701
702 may_queue = elv_may_queue(q, rw_flags);
703 if (may_queue == ELV_MQUEUE_NO)
704 goto rq_starved;
705
706 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
707 if (rl->count[is_sync]+1 >= q->nr_requests) {
708 ioc = current_io_context(GFP_ATOMIC, q->node);
709 /*
710 * The queue will fill after this allocation, so set
711 * it as full, and mark this process as "batching".
712 * This process will be allowed to complete a batch of
713 * requests, others will be blocked.
714 */
715 if (!blk_queue_full(q, is_sync)) {
716 ioc_set_batching(q, ioc);
717 blk_set_queue_full(q, is_sync);
718 } else {
719 if (may_queue != ELV_MQUEUE_MUST
720 && !ioc_batching(q, ioc)) {
721 /*
722 * The queue is full and the allocating
723 * process is not a "batcher", and not
724 * exempted by the IO scheduler
725 */
726 goto out;
727 }
728 }
729 }
730 blk_set_queue_congested(q, is_sync);
731 }
732
733 /*
734 * Only allow batching queuers to allocate up to 50% over the defined
735 * limit of requests, otherwise we could have thousands of requests
736 * allocated with any setting of ->nr_requests
737 */
738 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
739 goto out;
740
741 rl->count[is_sync]++;
742 rl->starved[is_sync] = 0;
743
744 if (blk_rq_should_init_elevator(bio)) {
745 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
746 if (priv)
747 rl->elvpriv++;
748 }
749
750 if (blk_queue_io_stat(q))
751 rw_flags |= REQ_IO_STAT;
752 spin_unlock_irq(q->queue_lock);
753
754 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
755 if (unlikely(!rq)) {
756 /*
757 * Allocation failed presumably due to memory. Undo anything
758 * we might have messed up.
759 *
760 * Allocating task should really be put onto the front of the
761 * wait queue, but this is pretty rare.
762 */
763 spin_lock_irq(q->queue_lock);
764 freed_request(q, is_sync, priv);
765
766 /*
767 * in the very unlikely event that allocation failed and no
768 * requests for this direction was pending, mark us starved
769 * so that freeing of a request in the other direction will
770 * notice us. another possible fix would be to split the
771 * rq mempool into READ and WRITE
772 */
773 rq_starved:
774 if (unlikely(rl->count[is_sync] == 0))
775 rl->starved[is_sync] = 1;
776
777 goto out;
778 }
779
780 /*
781 * ioc may be NULL here, and ioc_batching will be false. That's
782 * OK, if the queue is under the request limit then requests need
783 * not count toward the nr_batch_requests limit. There will always
784 * be some limit enforced by BLK_BATCH_TIME.
785 */
786 if (ioc_batching(q, ioc))
787 ioc->nr_batch_requests--;
788
789 trace_block_getrq(q, bio, rw_flags & 1);
790 out:
791 return rq;
792 }
793
794 /*
795 * No available requests for this queue, wait for some requests to become
796 * available.
797 *
798 * Called with q->queue_lock held, and returns with it unlocked.
799 */
800 static struct request *get_request_wait(struct request_queue *q, int rw_flags,
801 struct bio *bio)
802 {
803 const bool is_sync = rw_is_sync(rw_flags) != 0;
804 struct request *rq;
805
806 rq = get_request(q, rw_flags, bio, GFP_NOIO);
807 while (!rq) {
808 DEFINE_WAIT(wait);
809 struct io_context *ioc;
810 struct request_list *rl = &q->rq;
811
812 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
813 TASK_UNINTERRUPTIBLE);
814
815 trace_block_sleeprq(q, bio, rw_flags & 1);
816
817 spin_unlock_irq(q->queue_lock);
818 io_schedule();
819
820 /*
821 * After sleeping, we become a "batching" process and
822 * will be able to allocate at least one request, and
823 * up to a big batch of them for a small period time.
824 * See ioc_batching, ioc_set_batching
825 */
826 ioc = current_io_context(GFP_NOIO, q->node);
827 ioc_set_batching(q, ioc);
828
829 spin_lock_irq(q->queue_lock);
830 finish_wait(&rl->wait[is_sync], &wait);
831
832 rq = get_request(q, rw_flags, bio, GFP_NOIO);
833 };
834
835 return rq;
836 }
837
838 struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
839 {
840 struct request *rq;
841
842 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
843 return NULL;
844
845 BUG_ON(rw != READ && rw != WRITE);
846
847 spin_lock_irq(q->queue_lock);
848 if (gfp_mask & __GFP_WAIT) {
849 rq = get_request_wait(q, rw, NULL);
850 } else {
851 rq = get_request(q, rw, NULL, gfp_mask);
852 if (!rq)
853 spin_unlock_irq(q->queue_lock);
854 }
855 /* q->queue_lock is unlocked at this point */
856
857 return rq;
858 }
859 EXPORT_SYMBOL(blk_get_request);
860
861 /**
862 * blk_make_request - given a bio, allocate a corresponding struct request.
863 * @q: target request queue
864 * @bio: The bio describing the memory mappings that will be submitted for IO.
865 * It may be a chained-bio properly constructed by block/bio layer.
866 * @gfp_mask: gfp flags to be used for memory allocation
867 *
868 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
869 * type commands. Where the struct request needs to be farther initialized by
870 * the caller. It is passed a &struct bio, which describes the memory info of
871 * the I/O transfer.
872 *
873 * The caller of blk_make_request must make sure that bi_io_vec
874 * are set to describe the memory buffers. That bio_data_dir() will return
875 * the needed direction of the request. (And all bio's in the passed bio-chain
876 * are properly set accordingly)
877 *
878 * If called under none-sleepable conditions, mapped bio buffers must not
879 * need bouncing, by calling the appropriate masked or flagged allocator,
880 * suitable for the target device. Otherwise the call to blk_queue_bounce will
881 * BUG.
882 *
883 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
884 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
885 * anything but the first bio in the chain. Otherwise you risk waiting for IO
886 * completion of a bio that hasn't been submitted yet, thus resulting in a
887 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
888 * of bio_alloc(), as that avoids the mempool deadlock.
889 * If possible a big IO should be split into smaller parts when allocation
890 * fails. Partial allocation should not be an error, or you risk a live-lock.
891 */
892 struct request *blk_make_request(struct request_queue *q, struct bio *bio,
893 gfp_t gfp_mask)
894 {
895 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
896
897 if (unlikely(!rq))
898 return ERR_PTR(-ENOMEM);
899
900 for_each_bio(bio) {
901 struct bio *bounce_bio = bio;
902 int ret;
903
904 blk_queue_bounce(q, &bounce_bio);
905 ret = blk_rq_append_bio(q, rq, bounce_bio);
906 if (unlikely(ret)) {
907 blk_put_request(rq);
908 return ERR_PTR(ret);
909 }
910 }
911
912 return rq;
913 }
914 EXPORT_SYMBOL(blk_make_request);
915
916 /**
917 * blk_requeue_request - put a request back on queue
918 * @q: request queue where request should be inserted
919 * @rq: request to be inserted
920 *
921 * Description:
922 * Drivers often keep queueing requests until the hardware cannot accept
923 * more, when that condition happens we need to put the request back
924 * on the queue. Must be called with queue lock held.
925 */
926 void blk_requeue_request(struct request_queue *q, struct request *rq)
927 {
928 blk_delete_timer(rq);
929 blk_clear_rq_complete(rq);
930 trace_block_rq_requeue(q, rq);
931
932 if (blk_rq_tagged(rq))
933 blk_queue_end_tag(q, rq);
934
935 BUG_ON(blk_queued_rq(rq));
936
937 elv_requeue_request(q, rq);
938 }
939 EXPORT_SYMBOL(blk_requeue_request);
940
941 static void add_acct_request(struct request_queue *q, struct request *rq,
942 int where)
943 {
944 drive_stat_acct(rq, 1);
945 __elv_add_request(q, rq, where);
946 }
947
948 /**
949 * blk_insert_request - insert a special request into a request queue
950 * @q: request queue where request should be inserted
951 * @rq: request to be inserted
952 * @at_head: insert request at head or tail of queue
953 * @data: private data
954 *
955 * Description:
956 * Many block devices need to execute commands asynchronously, so they don't
957 * block the whole kernel from preemption during request execution. This is
958 * accomplished normally by inserting aritficial requests tagged as
959 * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
960 * be scheduled for actual execution by the request queue.
961 *
962 * We have the option of inserting the head or the tail of the queue.
963 * Typically we use the tail for new ioctls and so forth. We use the head
964 * of the queue for things like a QUEUE_FULL message from a device, or a
965 * host that is unable to accept a particular command.
966 */
967 void blk_insert_request(struct request_queue *q, struct request *rq,
968 int at_head, void *data)
969 {
970 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
971 unsigned long flags;
972
973 /*
974 * tell I/O scheduler that this isn't a regular read/write (ie it
975 * must not attempt merges on this) and that it acts as a soft
976 * barrier
977 */
978 rq->cmd_type = REQ_TYPE_SPECIAL;
979
980 rq->special = data;
981
982 spin_lock_irqsave(q->queue_lock, flags);
983
984 /*
985 * If command is tagged, release the tag
986 */
987 if (blk_rq_tagged(rq))
988 blk_queue_end_tag(q, rq);
989
990 add_acct_request(q, rq, where);
991 __blk_run_queue(q);
992 spin_unlock_irqrestore(q->queue_lock, flags);
993 }
994 EXPORT_SYMBOL(blk_insert_request);
995
996 static void part_round_stats_single(int cpu, struct hd_struct *part,
997 unsigned long now)
998 {
999 if (now == part->stamp)
1000 return;
1001
1002 if (part_in_flight(part)) {
1003 __part_stat_add(cpu, part, time_in_queue,
1004 part_in_flight(part) * (now - part->stamp));
1005 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1006 }
1007 part->stamp = now;
1008 }
1009
1010 /**
1011 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1012 * @cpu: cpu number for stats access
1013 * @part: target partition
1014 *
1015 * The average IO queue length and utilisation statistics are maintained
1016 * by observing the current state of the queue length and the amount of
1017 * time it has been in this state for.
1018 *
1019 * Normally, that accounting is done on IO completion, but that can result
1020 * in more than a second's worth of IO being accounted for within any one
1021 * second, leading to >100% utilisation. To deal with that, we call this
1022 * function to do a round-off before returning the results when reading
1023 * /proc/diskstats. This accounts immediately for all queue usage up to
1024 * the current jiffies and restarts the counters again.
1025 */
1026 void part_round_stats(int cpu, struct hd_struct *part)
1027 {
1028 unsigned long now = jiffies;
1029
1030 if (part->partno)
1031 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1032 part_round_stats_single(cpu, part, now);
1033 }
1034 EXPORT_SYMBOL_GPL(part_round_stats);
1035
1036 /*
1037 * queue lock must be held
1038 */
1039 void __blk_put_request(struct request_queue *q, struct request *req)
1040 {
1041 if (unlikely(!q))
1042 return;
1043 if (unlikely(--req->ref_count))
1044 return;
1045
1046 elv_completed_request(q, req);
1047
1048 /* this is a bio leak */
1049 WARN_ON(req->bio != NULL);
1050
1051 /*
1052 * Request may not have originated from ll_rw_blk. if not,
1053 * it didn't come out of our reserved rq pools
1054 */
1055 if (req->cmd_flags & REQ_ALLOCED) {
1056 int is_sync = rq_is_sync(req) != 0;
1057 int priv = req->cmd_flags & REQ_ELVPRIV;
1058
1059 BUG_ON(!list_empty(&req->queuelist));
1060 BUG_ON(!hlist_unhashed(&req->hash));
1061
1062 blk_free_request(q, req);
1063 freed_request(q, is_sync, priv);
1064 }
1065 }
1066 EXPORT_SYMBOL_GPL(__blk_put_request);
1067
1068 void blk_put_request(struct request *req)
1069 {
1070 unsigned long flags;
1071 struct request_queue *q = req->q;
1072
1073 spin_lock_irqsave(q->queue_lock, flags);
1074 __blk_put_request(q, req);
1075 spin_unlock_irqrestore(q->queue_lock, flags);
1076 }
1077 EXPORT_SYMBOL(blk_put_request);
1078
1079 /**
1080 * blk_add_request_payload - add a payload to a request
1081 * @rq: request to update
1082 * @page: page backing the payload
1083 * @len: length of the payload.
1084 *
1085 * This allows to later add a payload to an already submitted request by
1086 * a block driver. The driver needs to take care of freeing the payload
1087 * itself.
1088 *
1089 * Note that this is a quite horrible hack and nothing but handling of
1090 * discard requests should ever use it.
1091 */
1092 void blk_add_request_payload(struct request *rq, struct page *page,
1093 unsigned int len)
1094 {
1095 struct bio *bio = rq->bio;
1096
1097 bio->bi_io_vec->bv_page = page;
1098 bio->bi_io_vec->bv_offset = 0;
1099 bio->bi_io_vec->bv_len = len;
1100
1101 bio->bi_size = len;
1102 bio->bi_vcnt = 1;
1103 bio->bi_phys_segments = 1;
1104
1105 rq->__data_len = rq->resid_len = len;
1106 rq->nr_phys_segments = 1;
1107 rq->buffer = bio_data(bio);
1108 }
1109 EXPORT_SYMBOL_GPL(blk_add_request_payload);
1110
1111 static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1112 struct bio *bio)
1113 {
1114 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1115
1116 if (!ll_back_merge_fn(q, req, bio))
1117 return false;
1118
1119 trace_block_bio_backmerge(q, bio);
1120
1121 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1122 blk_rq_set_mixed_merge(req);
1123
1124 req->biotail->bi_next = bio;
1125 req->biotail = bio;
1126 req->__data_len += bio->bi_size;
1127 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1128
1129 drive_stat_acct(req, 0);
1130 elv_bio_merged(q, req, bio);
1131 return true;
1132 }
1133
1134 static bool bio_attempt_front_merge(struct request_queue *q,
1135 struct request *req, struct bio *bio)
1136 {
1137 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1138
1139 if (!ll_front_merge_fn(q, req, bio))
1140 return false;
1141
1142 trace_block_bio_frontmerge(q, bio);
1143
1144 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1145 blk_rq_set_mixed_merge(req);
1146
1147 bio->bi_next = req->bio;
1148 req->bio = bio;
1149
1150 /*
1151 * may not be valid. if the low level driver said
1152 * it didn't need a bounce buffer then it better
1153 * not touch req->buffer either...
1154 */
1155 req->buffer = bio_data(bio);
1156 req->__sector = bio->bi_sector;
1157 req->__data_len += bio->bi_size;
1158 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1159
1160 drive_stat_acct(req, 0);
1161 elv_bio_merged(q, req, bio);
1162 return true;
1163 }
1164
1165 /*
1166 * Attempts to merge with the plugged list in the current process. Returns
1167 * true if merge was successful, otherwise false.
1168 */
1169 static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
1170 struct bio *bio, unsigned int *request_count)
1171 {
1172 struct blk_plug *plug;
1173 struct request *rq;
1174 bool ret = false;
1175
1176 plug = tsk->plug;
1177 if (!plug)
1178 goto out;
1179 *request_count = 0;
1180
1181 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1182 int el_ret;
1183
1184 (*request_count)++;
1185
1186 if (rq->q != q)
1187 continue;
1188
1189 el_ret = elv_try_merge(rq, bio);
1190 if (el_ret == ELEVATOR_BACK_MERGE) {
1191 ret = bio_attempt_back_merge(q, rq, bio);
1192 if (ret)
1193 break;
1194 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1195 ret = bio_attempt_front_merge(q, rq, bio);
1196 if (ret)
1197 break;
1198 }
1199 }
1200 out:
1201 return ret;
1202 }
1203
1204 void init_request_from_bio(struct request *req, struct bio *bio)
1205 {
1206 req->cpu = bio->bi_comp_cpu;
1207 req->cmd_type = REQ_TYPE_FS;
1208
1209 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1210 if (bio->bi_rw & REQ_RAHEAD)
1211 req->cmd_flags |= REQ_FAILFAST_MASK;
1212
1213 req->errors = 0;
1214 req->__sector = bio->bi_sector;
1215 req->ioprio = bio_prio(bio);
1216 blk_rq_bio_prep(req->q, req, bio);
1217 }
1218
1219 static int __make_request(struct request_queue *q, struct bio *bio)
1220 {
1221 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1222 struct blk_plug *plug;
1223 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1224 struct request *req;
1225 unsigned int request_count = 0;
1226
1227 /*
1228 * low level driver can indicate that it wants pages above a
1229 * certain limit bounced to low memory (ie for highmem, or even
1230 * ISA dma in theory)
1231 */
1232 blk_queue_bounce(q, &bio);
1233
1234 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1235 spin_lock_irq(q->queue_lock);
1236 where = ELEVATOR_INSERT_FLUSH;
1237 goto get_rq;
1238 }
1239
1240 /*
1241 * Check if we can merge with the plugged list before grabbing
1242 * any locks.
1243 */
1244 if (attempt_plug_merge(current, q, bio, &request_count))
1245 goto out;
1246
1247 spin_lock_irq(q->queue_lock);
1248
1249 el_ret = elv_merge(q, &req, bio);
1250 if (el_ret == ELEVATOR_BACK_MERGE) {
1251 if (bio_attempt_back_merge(q, req, bio)) {
1252 if (!attempt_back_merge(q, req))
1253 elv_merged_request(q, req, el_ret);
1254 goto out_unlock;
1255 }
1256 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1257 if (bio_attempt_front_merge(q, req, bio)) {
1258 if (!attempt_front_merge(q, req))
1259 elv_merged_request(q, req, el_ret);
1260 goto out_unlock;
1261 }
1262 }
1263
1264 get_rq:
1265 /*
1266 * This sync check and mask will be re-done in init_request_from_bio(),
1267 * but we need to set it earlier to expose the sync flag to the
1268 * rq allocator and io schedulers.
1269 */
1270 rw_flags = bio_data_dir(bio);
1271 if (sync)
1272 rw_flags |= REQ_SYNC;
1273
1274 /*
1275 * Grab a free request. This is might sleep but can not fail.
1276 * Returns with the queue unlocked.
1277 */
1278 req = get_request_wait(q, rw_flags, bio);
1279
1280 /*
1281 * After dropping the lock and possibly sleeping here, our request
1282 * may now be mergeable after it had proven unmergeable (above).
1283 * We don't worry about that case for efficiency. It won't happen
1284 * often, and the elevators are able to handle it.
1285 */
1286 init_request_from_bio(req, bio);
1287
1288 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
1289 bio_flagged(bio, BIO_CPU_AFFINE))
1290 req->cpu = raw_smp_processor_id();
1291
1292 plug = current->plug;
1293 if (plug) {
1294 /*
1295 * If this is the first request added after a plug, fire
1296 * of a plug trace. If others have been added before, check
1297 * if we have multiple devices in this plug. If so, make a
1298 * note to sort the list before dispatch.
1299 */
1300 if (list_empty(&plug->list))
1301 trace_block_plug(q);
1302 else if (!plug->should_sort) {
1303 struct request *__rq;
1304
1305 __rq = list_entry_rq(plug->list.prev);
1306 if (__rq->q != q)
1307 plug->should_sort = 1;
1308 }
1309 if (request_count >= BLK_MAX_REQUEST_COUNT)
1310 blk_flush_plug_list(plug, false);
1311 list_add_tail(&req->queuelist, &plug->list);
1312 drive_stat_acct(req, 1);
1313 } else {
1314 spin_lock_irq(q->queue_lock);
1315 add_acct_request(q, req, where);
1316 __blk_run_queue(q);
1317 out_unlock:
1318 spin_unlock_irq(q->queue_lock);
1319 }
1320 out:
1321 return 0;
1322 }
1323
1324 /*
1325 * If bio->bi_dev is a partition, remap the location
1326 */
1327 static inline void blk_partition_remap(struct bio *bio)
1328 {
1329 struct block_device *bdev = bio->bi_bdev;
1330
1331 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1332 struct hd_struct *p = bdev->bd_part;
1333
1334 bio->bi_sector += p->start_sect;
1335 bio->bi_bdev = bdev->bd_contains;
1336
1337 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1338 bdev->bd_dev,
1339 bio->bi_sector - p->start_sect);
1340 }
1341 }
1342
1343 static void handle_bad_sector(struct bio *bio)
1344 {
1345 char b[BDEVNAME_SIZE];
1346
1347 printk(KERN_INFO "attempt to access beyond end of device\n");
1348 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1349 bdevname(bio->bi_bdev, b),
1350 bio->bi_rw,
1351 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1352 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1353
1354 set_bit(BIO_EOF, &bio->bi_flags);
1355 }
1356
1357 #ifdef CONFIG_FAIL_MAKE_REQUEST
1358
1359 static DECLARE_FAULT_ATTR(fail_make_request);
1360
1361 static int __init setup_fail_make_request(char *str)
1362 {
1363 return setup_fault_attr(&fail_make_request, str);
1364 }
1365 __setup("fail_make_request=", setup_fail_make_request);
1366
1367 static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1368 {
1369 return part->make_it_fail && should_fail(&fail_make_request, bytes);
1370 }
1371
1372 static int __init fail_make_request_debugfs(void)
1373 {
1374 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1375 NULL, &fail_make_request);
1376
1377 return IS_ERR(dir) ? PTR_ERR(dir) : 0;
1378 }
1379
1380 late_initcall(fail_make_request_debugfs);
1381
1382 #else /* CONFIG_FAIL_MAKE_REQUEST */
1383
1384 static inline bool should_fail_request(struct hd_struct *part,
1385 unsigned int bytes)
1386 {
1387 return false;
1388 }
1389
1390 #endif /* CONFIG_FAIL_MAKE_REQUEST */
1391
1392 /*
1393 * Check whether this bio extends beyond the end of the device.
1394 */
1395 static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1396 {
1397 sector_t maxsector;
1398
1399 if (!nr_sectors)
1400 return 0;
1401
1402 /* Test device or partition size, when known. */
1403 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1404 if (maxsector) {
1405 sector_t sector = bio->bi_sector;
1406
1407 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1408 /*
1409 * This may well happen - the kernel calls bread()
1410 * without checking the size of the device, e.g., when
1411 * mounting a device.
1412 */
1413 handle_bad_sector(bio);
1414 return 1;
1415 }
1416 }
1417
1418 return 0;
1419 }
1420
1421 /**
1422 * generic_make_request - hand a buffer to its device driver for I/O
1423 * @bio: The bio describing the location in memory and on the device.
1424 *
1425 * generic_make_request() is used to make I/O requests of block
1426 * devices. It is passed a &struct bio, which describes the I/O that needs
1427 * to be done.
1428 *
1429 * generic_make_request() does not return any status. The
1430 * success/failure status of the request, along with notification of
1431 * completion, is delivered asynchronously through the bio->bi_end_io
1432 * function described (one day) else where.
1433 *
1434 * The caller of generic_make_request must make sure that bi_io_vec
1435 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1436 * set to describe the device address, and the
1437 * bi_end_io and optionally bi_private are set to describe how
1438 * completion notification should be signaled.
1439 *
1440 * generic_make_request and the drivers it calls may use bi_next if this
1441 * bio happens to be merged with someone else, and may change bi_dev and
1442 * bi_sector for remaps as it sees fit. So the values of these fields
1443 * should NOT be depended on after the call to generic_make_request.
1444 */
1445 static inline void __generic_make_request(struct bio *bio)
1446 {
1447 struct request_queue *q;
1448 sector_t old_sector;
1449 int ret, nr_sectors = bio_sectors(bio);
1450 dev_t old_dev;
1451 int err = -EIO;
1452
1453 might_sleep();
1454
1455 if (bio_check_eod(bio, nr_sectors))
1456 goto end_io;
1457
1458 /*
1459 * Resolve the mapping until finished. (drivers are
1460 * still free to implement/resolve their own stacking
1461 * by explicitly returning 0)
1462 *
1463 * NOTE: we don't repeat the blk_size check for each new device.
1464 * Stacking drivers are expected to know what they are doing.
1465 */
1466 old_sector = -1;
1467 old_dev = 0;
1468 do {
1469 char b[BDEVNAME_SIZE];
1470 struct hd_struct *part;
1471
1472 q = bdev_get_queue(bio->bi_bdev);
1473 if (unlikely(!q)) {
1474 printk(KERN_ERR
1475 "generic_make_request: Trying to access "
1476 "nonexistent block-device %s (%Lu)\n",
1477 bdevname(bio->bi_bdev, b),
1478 (long long) bio->bi_sector);
1479 goto end_io;
1480 }
1481
1482 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1483 nr_sectors > queue_max_hw_sectors(q))) {
1484 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1485 bdevname(bio->bi_bdev, b),
1486 bio_sectors(bio),
1487 queue_max_hw_sectors(q));
1488 goto end_io;
1489 }
1490
1491 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1492 goto end_io;
1493
1494 part = bio->bi_bdev->bd_part;
1495 if (should_fail_request(part, bio->bi_size) ||
1496 should_fail_request(&part_to_disk(part)->part0,
1497 bio->bi_size))
1498 goto end_io;
1499
1500 /*
1501 * If this device has partitions, remap block n
1502 * of partition p to block n+start(p) of the disk.
1503 */
1504 blk_partition_remap(bio);
1505
1506 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1507 goto end_io;
1508
1509 if (old_sector != -1)
1510 trace_block_bio_remap(q, bio, old_dev, old_sector);
1511
1512 old_sector = bio->bi_sector;
1513 old_dev = bio->bi_bdev->bd_dev;
1514
1515 if (bio_check_eod(bio, nr_sectors))
1516 goto end_io;
1517
1518 /*
1519 * Filter flush bio's early so that make_request based
1520 * drivers without flush support don't have to worry
1521 * about them.
1522 */
1523 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1524 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1525 if (!nr_sectors) {
1526 err = 0;
1527 goto end_io;
1528 }
1529 }
1530
1531 if ((bio->bi_rw & REQ_DISCARD) &&
1532 (!blk_queue_discard(q) ||
1533 ((bio->bi_rw & REQ_SECURE) &&
1534 !blk_queue_secdiscard(q)))) {
1535 err = -EOPNOTSUPP;
1536 goto end_io;
1537 }
1538
1539 if (blk_throtl_bio(q, &bio))
1540 goto end_io;
1541
1542 /*
1543 * If bio = NULL, bio has been throttled and will be submitted
1544 * later.
1545 */
1546 if (!bio)
1547 break;
1548
1549 trace_block_bio_queue(q, bio);
1550
1551 ret = q->make_request_fn(q, bio);
1552 } while (ret);
1553
1554 return;
1555
1556 end_io:
1557 bio_endio(bio, err);
1558 }
1559
1560 /*
1561 * We only want one ->make_request_fn to be active at a time,
1562 * else stack usage with stacked devices could be a problem.
1563 * So use current->bio_list to keep a list of requests
1564 * submited by a make_request_fn function.
1565 * current->bio_list is also used as a flag to say if
1566 * generic_make_request is currently active in this task or not.
1567 * If it is NULL, then no make_request is active. If it is non-NULL,
1568 * then a make_request is active, and new requests should be added
1569 * at the tail
1570 */
1571 void generic_make_request(struct bio *bio)
1572 {
1573 struct bio_list bio_list_on_stack;
1574
1575 if (current->bio_list) {
1576 /* make_request is active */
1577 bio_list_add(current->bio_list, bio);
1578 return;
1579 }
1580 /* following loop may be a bit non-obvious, and so deserves some
1581 * explanation.
1582 * Before entering the loop, bio->bi_next is NULL (as all callers
1583 * ensure that) so we have a list with a single bio.
1584 * We pretend that we have just taken it off a longer list, so
1585 * we assign bio_list to a pointer to the bio_list_on_stack,
1586 * thus initialising the bio_list of new bios to be
1587 * added. __generic_make_request may indeed add some more bios
1588 * through a recursive call to generic_make_request. If it
1589 * did, we find a non-NULL value in bio_list and re-enter the loop
1590 * from the top. In this case we really did just take the bio
1591 * of the top of the list (no pretending) and so remove it from
1592 * bio_list, and call into __generic_make_request again.
1593 *
1594 * The loop was structured like this to make only one call to
1595 * __generic_make_request (which is important as it is large and
1596 * inlined) and to keep the structure simple.
1597 */
1598 BUG_ON(bio->bi_next);
1599 bio_list_init(&bio_list_on_stack);
1600 current->bio_list = &bio_list_on_stack;
1601 do {
1602 __generic_make_request(bio);
1603 bio = bio_list_pop(current->bio_list);
1604 } while (bio);
1605 current->bio_list = NULL; /* deactivate */
1606 }
1607 EXPORT_SYMBOL(generic_make_request);
1608
1609 /**
1610 * submit_bio - submit a bio to the block device layer for I/O
1611 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1612 * @bio: The &struct bio which describes the I/O
1613 *
1614 * submit_bio() is very similar in purpose to generic_make_request(), and
1615 * uses that function to do most of the work. Both are fairly rough
1616 * interfaces; @bio must be presetup and ready for I/O.
1617 *
1618 */
1619 void submit_bio(int rw, struct bio *bio)
1620 {
1621 int count = bio_sectors(bio);
1622
1623 bio->bi_rw |= rw;
1624
1625 /*
1626 * If it's a regular read/write or a barrier with data attached,
1627 * go through the normal accounting stuff before submission.
1628 */
1629 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
1630 if (rw & WRITE) {
1631 count_vm_events(PGPGOUT, count);
1632 } else {
1633 task_io_account_read(bio->bi_size);
1634 count_vm_events(PGPGIN, count);
1635 }
1636
1637 if (unlikely(block_dump)) {
1638 char b[BDEVNAME_SIZE];
1639 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1640 current->comm, task_pid_nr(current),
1641 (rw & WRITE) ? "WRITE" : "READ",
1642 (unsigned long long)bio->bi_sector,
1643 bdevname(bio->bi_bdev, b),
1644 count);
1645 }
1646 }
1647
1648 generic_make_request(bio);
1649 }
1650 EXPORT_SYMBOL(submit_bio);
1651
1652 /**
1653 * blk_rq_check_limits - Helper function to check a request for the queue limit
1654 * @q: the queue
1655 * @rq: the request being checked
1656 *
1657 * Description:
1658 * @rq may have been made based on weaker limitations of upper-level queues
1659 * in request stacking drivers, and it may violate the limitation of @q.
1660 * Since the block layer and the underlying device driver trust @rq
1661 * after it is inserted to @q, it should be checked against @q before
1662 * the insertion using this generic function.
1663 *
1664 * This function should also be useful for request stacking drivers
1665 * in some cases below, so export this function.
1666 * Request stacking drivers like request-based dm may change the queue
1667 * limits while requests are in the queue (e.g. dm's table swapping).
1668 * Such request stacking drivers should check those requests agaist
1669 * the new queue limits again when they dispatch those requests,
1670 * although such checkings are also done against the old queue limits
1671 * when submitting requests.
1672 */
1673 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1674 {
1675 if (rq->cmd_flags & REQ_DISCARD)
1676 return 0;
1677
1678 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1679 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1680 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1681 return -EIO;
1682 }
1683
1684 /*
1685 * queue's settings related to segment counting like q->bounce_pfn
1686 * may differ from that of other stacking queues.
1687 * Recalculate it to check the request correctly on this queue's
1688 * limitation.
1689 */
1690 blk_recalc_rq_segments(rq);
1691 if (rq->nr_phys_segments > queue_max_segments(q)) {
1692 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1693 return -EIO;
1694 }
1695
1696 return 0;
1697 }
1698 EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1699
1700 /**
1701 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1702 * @q: the queue to submit the request
1703 * @rq: the request being queued
1704 */
1705 int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1706 {
1707 unsigned long flags;
1708 int where = ELEVATOR_INSERT_BACK;
1709
1710 if (blk_rq_check_limits(q, rq))
1711 return -EIO;
1712
1713 if (rq->rq_disk &&
1714 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1715 return -EIO;
1716
1717 spin_lock_irqsave(q->queue_lock, flags);
1718
1719 /*
1720 * Submitting request must be dequeued before calling this function
1721 * because it will be linked to another request_queue
1722 */
1723 BUG_ON(blk_queued_rq(rq));
1724
1725 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1726 where = ELEVATOR_INSERT_FLUSH;
1727
1728 add_acct_request(q, rq, where);
1729 spin_unlock_irqrestore(q->queue_lock, flags);
1730
1731 return 0;
1732 }
1733 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1734
1735 /**
1736 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1737 * @rq: request to examine
1738 *
1739 * Description:
1740 * A request could be merge of IOs which require different failure
1741 * handling. This function determines the number of bytes which
1742 * can be failed from the beginning of the request without
1743 * crossing into area which need to be retried further.
1744 *
1745 * Return:
1746 * The number of bytes to fail.
1747 *
1748 * Context:
1749 * queue_lock must be held.
1750 */
1751 unsigned int blk_rq_err_bytes(const struct request *rq)
1752 {
1753 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1754 unsigned int bytes = 0;
1755 struct bio *bio;
1756
1757 if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1758 return blk_rq_bytes(rq);
1759
1760 /*
1761 * Currently the only 'mixing' which can happen is between
1762 * different fastfail types. We can safely fail portions
1763 * which have all the failfast bits that the first one has -
1764 * the ones which are at least as eager to fail as the first
1765 * one.
1766 */
1767 for (bio = rq->bio; bio; bio = bio->bi_next) {
1768 if ((bio->bi_rw & ff) != ff)
1769 break;
1770 bytes += bio->bi_size;
1771 }
1772
1773 /* this could lead to infinite loop */
1774 BUG_ON(blk_rq_bytes(rq) && !bytes);
1775 return bytes;
1776 }
1777 EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1778
1779 static void blk_account_io_completion(struct request *req, unsigned int bytes)
1780 {
1781 if (blk_do_io_stat(req)) {
1782 const int rw = rq_data_dir(req);
1783 struct hd_struct *part;
1784 int cpu;
1785
1786 cpu = part_stat_lock();
1787 part = req->part;
1788 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1789 part_stat_unlock();
1790 }
1791 }
1792
1793 static void blk_account_io_done(struct request *req)
1794 {
1795 /*
1796 * Account IO completion. flush_rq isn't accounted as a
1797 * normal IO on queueing nor completion. Accounting the
1798 * containing request is enough.
1799 */
1800 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
1801 unsigned long duration = jiffies - req->start_time;
1802 const int rw = rq_data_dir(req);
1803 struct hd_struct *part;
1804 int cpu;
1805
1806 cpu = part_stat_lock();
1807 part = req->part;
1808
1809 part_stat_inc(cpu, part, ios[rw]);
1810 part_stat_add(cpu, part, ticks[rw], duration);
1811 part_round_stats(cpu, part);
1812 part_dec_in_flight(part, rw);
1813
1814 hd_struct_put(part);
1815 part_stat_unlock();
1816 }
1817 }
1818
1819 /**
1820 * blk_peek_request - peek at the top of a request queue
1821 * @q: request queue to peek at
1822 *
1823 * Description:
1824 * Return the request at the top of @q. The returned request
1825 * should be started using blk_start_request() before LLD starts
1826 * processing it.
1827 *
1828 * Return:
1829 * Pointer to the request at the top of @q if available. Null
1830 * otherwise.
1831 *
1832 * Context:
1833 * queue_lock must be held.
1834 */
1835 struct request *blk_peek_request(struct request_queue *q)
1836 {
1837 struct request *rq;
1838 int ret;
1839
1840 while ((rq = __elv_next_request(q)) != NULL) {
1841 if (!(rq->cmd_flags & REQ_STARTED)) {
1842 /*
1843 * This is the first time the device driver
1844 * sees this request (possibly after
1845 * requeueing). Notify IO scheduler.
1846 */
1847 if (rq->cmd_flags & REQ_SORTED)
1848 elv_activate_rq(q, rq);
1849
1850 /*
1851 * just mark as started even if we don't start
1852 * it, a request that has been delayed should
1853 * not be passed by new incoming requests
1854 */
1855 rq->cmd_flags |= REQ_STARTED;
1856 trace_block_rq_issue(q, rq);
1857 }
1858
1859 if (!q->boundary_rq || q->boundary_rq == rq) {
1860 q->end_sector = rq_end_sector(rq);
1861 q->boundary_rq = NULL;
1862 }
1863
1864 if (rq->cmd_flags & REQ_DONTPREP)
1865 break;
1866
1867 if (q->dma_drain_size && blk_rq_bytes(rq)) {
1868 /*
1869 * make sure space for the drain appears we
1870 * know we can do this because max_hw_segments
1871 * has been adjusted to be one fewer than the
1872 * device can handle
1873 */
1874 rq->nr_phys_segments++;
1875 }
1876
1877 if (!q->prep_rq_fn)
1878 break;
1879
1880 ret = q->prep_rq_fn(q, rq);
1881 if (ret == BLKPREP_OK) {
1882 break;
1883 } else if (ret == BLKPREP_DEFER) {
1884 /*
1885 * the request may have been (partially) prepped.
1886 * we need to keep this request in the front to
1887 * avoid resource deadlock. REQ_STARTED will
1888 * prevent other fs requests from passing this one.
1889 */
1890 if (q->dma_drain_size && blk_rq_bytes(rq) &&
1891 !(rq->cmd_flags & REQ_DONTPREP)) {
1892 /*
1893 * remove the space for the drain we added
1894 * so that we don't add it again
1895 */
1896 --rq->nr_phys_segments;
1897 }
1898
1899 rq = NULL;
1900 break;
1901 } else if (ret == BLKPREP_KILL) {
1902 rq->cmd_flags |= REQ_QUIET;
1903 /*
1904 * Mark this request as started so we don't trigger
1905 * any debug logic in the end I/O path.
1906 */
1907 blk_start_request(rq);
1908 __blk_end_request_all(rq, -EIO);
1909 } else {
1910 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1911 break;
1912 }
1913 }
1914
1915 return rq;
1916 }
1917 EXPORT_SYMBOL(blk_peek_request);
1918
1919 void blk_dequeue_request(struct request *rq)
1920 {
1921 struct request_queue *q = rq->q;
1922
1923 BUG_ON(list_empty(&rq->queuelist));
1924 BUG_ON(ELV_ON_HASH(rq));
1925
1926 list_del_init(&rq->queuelist);
1927
1928 /*
1929 * the time frame between a request being removed from the lists
1930 * and to it is freed is accounted as io that is in progress at
1931 * the driver side.
1932 */
1933 if (blk_account_rq(rq)) {
1934 q->in_flight[rq_is_sync(rq)]++;
1935 set_io_start_time_ns(rq);
1936 }
1937 }
1938
1939 /**
1940 * blk_start_request - start request processing on the driver
1941 * @req: request to dequeue
1942 *
1943 * Description:
1944 * Dequeue @req and start timeout timer on it. This hands off the
1945 * request to the driver.
1946 *
1947 * Block internal functions which don't want to start timer should
1948 * call blk_dequeue_request().
1949 *
1950 * Context:
1951 * queue_lock must be held.
1952 */
1953 void blk_start_request(struct request *req)
1954 {
1955 blk_dequeue_request(req);
1956
1957 /*
1958 * We are now handing the request to the hardware, initialize
1959 * resid_len to full count and add the timeout handler.
1960 */
1961 req->resid_len = blk_rq_bytes(req);
1962 if (unlikely(blk_bidi_rq(req)))
1963 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
1964
1965 blk_add_timer(req);
1966 }
1967 EXPORT_SYMBOL(blk_start_request);
1968
1969 /**
1970 * blk_fetch_request - fetch a request from a request queue
1971 * @q: request queue to fetch a request from
1972 *
1973 * Description:
1974 * Return the request at the top of @q. The request is started on
1975 * return and LLD can start processing it immediately.
1976 *
1977 * Return:
1978 * Pointer to the request at the top of @q if available. Null
1979 * otherwise.
1980 *
1981 * Context:
1982 * queue_lock must be held.
1983 */
1984 struct request *blk_fetch_request(struct request_queue *q)
1985 {
1986 struct request *rq;
1987
1988 rq = blk_peek_request(q);
1989 if (rq)
1990 blk_start_request(rq);
1991 return rq;
1992 }
1993 EXPORT_SYMBOL(blk_fetch_request);
1994
1995 /**
1996 * blk_update_request - Special helper function for request stacking drivers
1997 * @req: the request being processed
1998 * @error: %0 for success, < %0 for error
1999 * @nr_bytes: number of bytes to complete @req
2000 *
2001 * Description:
2002 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2003 * the request structure even if @req doesn't have leftover.
2004 * If @req has leftover, sets it up for the next range of segments.
2005 *
2006 * This special helper function is only for request stacking drivers
2007 * (e.g. request-based dm) so that they can handle partial completion.
2008 * Actual device drivers should use blk_end_request instead.
2009 *
2010 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2011 * %false return from this function.
2012 *
2013 * Return:
2014 * %false - this request doesn't have any more data
2015 * %true - this request has more data
2016 **/
2017 bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2018 {
2019 int total_bytes, bio_nbytes, next_idx = 0;
2020 struct bio *bio;
2021
2022 if (!req->bio)
2023 return false;
2024
2025 trace_block_rq_complete(req->q, req);
2026
2027 /*
2028 * For fs requests, rq is just carrier of independent bio's
2029 * and each partial completion should be handled separately.
2030 * Reset per-request error on each partial completion.
2031 *
2032 * TODO: tj: This is too subtle. It would be better to let
2033 * low level drivers do what they see fit.
2034 */
2035 if (req->cmd_type == REQ_TYPE_FS)
2036 req->errors = 0;
2037
2038 if (error && req->cmd_type == REQ_TYPE_FS &&
2039 !(req->cmd_flags & REQ_QUIET)) {
2040 char *error_type;
2041
2042 switch (error) {
2043 case -ENOLINK:
2044 error_type = "recoverable transport";
2045 break;
2046 case -EREMOTEIO:
2047 error_type = "critical target";
2048 break;
2049 case -EBADE:
2050 error_type = "critical nexus";
2051 break;
2052 case -EIO:
2053 default:
2054 error_type = "I/O";
2055 break;
2056 }
2057 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
2058 error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
2059 (unsigned long long)blk_rq_pos(req));
2060 }
2061
2062 blk_account_io_completion(req, nr_bytes);
2063
2064 total_bytes = bio_nbytes = 0;
2065 while ((bio = req->bio) != NULL) {
2066 int nbytes;
2067
2068 if (nr_bytes >= bio->bi_size) {
2069 req->bio = bio->bi_next;
2070 nbytes = bio->bi_size;
2071 req_bio_endio(req, bio, nbytes, error);
2072 next_idx = 0;
2073 bio_nbytes = 0;
2074 } else {
2075 int idx = bio->bi_idx + next_idx;
2076
2077 if (unlikely(idx >= bio->bi_vcnt)) {
2078 blk_dump_rq_flags(req, "__end_that");
2079 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
2080 __func__, idx, bio->bi_vcnt);
2081 break;
2082 }
2083
2084 nbytes = bio_iovec_idx(bio, idx)->bv_len;
2085 BIO_BUG_ON(nbytes > bio->bi_size);
2086
2087 /*
2088 * not a complete bvec done
2089 */
2090 if (unlikely(nbytes > nr_bytes)) {
2091 bio_nbytes += nr_bytes;
2092 total_bytes += nr_bytes;
2093 break;
2094 }
2095
2096 /*
2097 * advance to the next vector
2098 */
2099 next_idx++;
2100 bio_nbytes += nbytes;
2101 }
2102
2103 total_bytes += nbytes;
2104 nr_bytes -= nbytes;
2105
2106 bio = req->bio;
2107 if (bio) {
2108 /*
2109 * end more in this run, or just return 'not-done'
2110 */
2111 if (unlikely(nr_bytes <= 0))
2112 break;
2113 }
2114 }
2115
2116 /*
2117 * completely done
2118 */
2119 if (!req->bio) {
2120 /*
2121 * Reset counters so that the request stacking driver
2122 * can find how many bytes remain in the request
2123 * later.
2124 */
2125 req->__data_len = 0;
2126 return false;
2127 }
2128
2129 /*
2130 * if the request wasn't completed, update state
2131 */
2132 if (bio_nbytes) {
2133 req_bio_endio(req, bio, bio_nbytes, error);
2134 bio->bi_idx += next_idx;
2135 bio_iovec(bio)->bv_offset += nr_bytes;
2136 bio_iovec(bio)->bv_len -= nr_bytes;
2137 }
2138
2139 req->__data_len -= total_bytes;
2140 req->buffer = bio_data(req->bio);
2141
2142 /* update sector only for requests with clear definition of sector */
2143 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
2144 req->__sector += total_bytes >> 9;
2145
2146 /* mixed attributes always follow the first bio */
2147 if (req->cmd_flags & REQ_MIXED_MERGE) {
2148 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2149 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2150 }
2151
2152 /*
2153 * If total number of sectors is less than the first segment
2154 * size, something has gone terribly wrong.
2155 */
2156 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2157 blk_dump_rq_flags(req, "request botched");
2158 req->__data_len = blk_rq_cur_bytes(req);
2159 }
2160
2161 /* recalculate the number of segments */
2162 blk_recalc_rq_segments(req);
2163
2164 return true;
2165 }
2166 EXPORT_SYMBOL_GPL(blk_update_request);
2167
2168 static bool blk_update_bidi_request(struct request *rq, int error,
2169 unsigned int nr_bytes,
2170 unsigned int bidi_bytes)
2171 {
2172 if (blk_update_request(rq, error, nr_bytes))
2173 return true;
2174
2175 /* Bidi request must be completed as a whole */
2176 if (unlikely(blk_bidi_rq(rq)) &&
2177 blk_update_request(rq->next_rq, error, bidi_bytes))
2178 return true;
2179
2180 if (blk_queue_add_random(rq->q))
2181 add_disk_randomness(rq->rq_disk);
2182
2183 return false;
2184 }
2185
2186 /**
2187 * blk_unprep_request - unprepare a request
2188 * @req: the request
2189 *
2190 * This function makes a request ready for complete resubmission (or
2191 * completion). It happens only after all error handling is complete,
2192 * so represents the appropriate moment to deallocate any resources
2193 * that were allocated to the request in the prep_rq_fn. The queue
2194 * lock is held when calling this.
2195 */
2196 void blk_unprep_request(struct request *req)
2197 {
2198 struct request_queue *q = req->q;
2199
2200 req->cmd_flags &= ~REQ_DONTPREP;
2201 if (q->unprep_rq_fn)
2202 q->unprep_rq_fn(q, req);
2203 }
2204 EXPORT_SYMBOL_GPL(blk_unprep_request);
2205
2206 /*
2207 * queue lock must be held
2208 */
2209 static void blk_finish_request(struct request *req, int error)
2210 {
2211 if (blk_rq_tagged(req))
2212 blk_queue_end_tag(req->q, req);
2213
2214 BUG_ON(blk_queued_rq(req));
2215
2216 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2217 laptop_io_completion(&req->q->backing_dev_info);
2218
2219 blk_delete_timer(req);
2220
2221 if (req->cmd_flags & REQ_DONTPREP)
2222 blk_unprep_request(req);
2223
2224
2225 blk_account_io_done(req);
2226
2227 if (req->end_io)
2228 req->end_io(req, error);
2229 else {
2230 if (blk_bidi_rq(req))
2231 __blk_put_request(req->next_rq->q, req->next_rq);
2232
2233 __blk_put_request(req->q, req);
2234 }
2235 }
2236
2237 /**
2238 * blk_end_bidi_request - Complete a bidi request
2239 * @rq: the request to complete
2240 * @error: %0 for success, < %0 for error
2241 * @nr_bytes: number of bytes to complete @rq
2242 * @bidi_bytes: number of bytes to complete @rq->next_rq
2243 *
2244 * Description:
2245 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2246 * Drivers that supports bidi can safely call this member for any
2247 * type of request, bidi or uni. In the later case @bidi_bytes is
2248 * just ignored.
2249 *
2250 * Return:
2251 * %false - we are done with this request
2252 * %true - still buffers pending for this request
2253 **/
2254 static bool blk_end_bidi_request(struct request *rq, int error,
2255 unsigned int nr_bytes, unsigned int bidi_bytes)
2256 {
2257 struct request_queue *q = rq->q;
2258 unsigned long flags;
2259
2260 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2261 return true;
2262
2263 spin_lock_irqsave(q->queue_lock, flags);
2264 blk_finish_request(rq, error);
2265 spin_unlock_irqrestore(q->queue_lock, flags);
2266
2267 return false;
2268 }
2269
2270 /**
2271 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2272 * @rq: the request to complete
2273 * @error: %0 for success, < %0 for error
2274 * @nr_bytes: number of bytes to complete @rq
2275 * @bidi_bytes: number of bytes to complete @rq->next_rq
2276 *
2277 * Description:
2278 * Identical to blk_end_bidi_request() except that queue lock is
2279 * assumed to be locked on entry and remains so on return.
2280 *
2281 * Return:
2282 * %false - we are done with this request
2283 * %true - still buffers pending for this request
2284 **/
2285 bool __blk_end_bidi_request(struct request *rq, int error,
2286 unsigned int nr_bytes, unsigned int bidi_bytes)
2287 {
2288 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2289 return true;
2290
2291 blk_finish_request(rq, error);
2292
2293 return false;
2294 }
2295
2296 /**
2297 * blk_end_request - Helper function for drivers to complete the request.
2298 * @rq: the request being processed
2299 * @error: %0 for success, < %0 for error
2300 * @nr_bytes: number of bytes to complete
2301 *
2302 * Description:
2303 * Ends I/O on a number of bytes attached to @rq.
2304 * If @rq has leftover, sets it up for the next range of segments.
2305 *
2306 * Return:
2307 * %false - we are done with this request
2308 * %true - still buffers pending for this request
2309 **/
2310 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2311 {
2312 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2313 }
2314 EXPORT_SYMBOL(blk_end_request);
2315
2316 /**
2317 * blk_end_request_all - Helper function for drives to finish the request.
2318 * @rq: the request to finish
2319 * @error: %0 for success, < %0 for error
2320 *
2321 * Description:
2322 * Completely finish @rq.
2323 */
2324 void blk_end_request_all(struct request *rq, int error)
2325 {
2326 bool pending;
2327 unsigned int bidi_bytes = 0;
2328
2329 if (unlikely(blk_bidi_rq(rq)))
2330 bidi_bytes = blk_rq_bytes(rq->next_rq);
2331
2332 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2333 BUG_ON(pending);
2334 }
2335 EXPORT_SYMBOL(blk_end_request_all);
2336
2337 /**
2338 * blk_end_request_cur - Helper function to finish the current request chunk.
2339 * @rq: the request to finish the current chunk for
2340 * @error: %0 for success, < %0 for error
2341 *
2342 * Description:
2343 * Complete the current consecutively mapped chunk from @rq.
2344 *
2345 * Return:
2346 * %false - we are done with this request
2347 * %true - still buffers pending for this request
2348 */
2349 bool blk_end_request_cur(struct request *rq, int error)
2350 {
2351 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2352 }
2353 EXPORT_SYMBOL(blk_end_request_cur);
2354
2355 /**
2356 * blk_end_request_err - Finish a request till the next failure boundary.
2357 * @rq: the request to finish till the next failure boundary for
2358 * @error: must be negative errno
2359 *
2360 * Description:
2361 * Complete @rq till the next failure boundary.
2362 *
2363 * Return:
2364 * %false - we are done with this request
2365 * %true - still buffers pending for this request
2366 */
2367 bool blk_end_request_err(struct request *rq, int error)
2368 {
2369 WARN_ON(error >= 0);
2370 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2371 }
2372 EXPORT_SYMBOL_GPL(blk_end_request_err);
2373
2374 /**
2375 * __blk_end_request - Helper function for drivers to complete the request.
2376 * @rq: the request being processed
2377 * @error: %0 for success, < %0 for error
2378 * @nr_bytes: number of bytes to complete
2379 *
2380 * Description:
2381 * Must be called with queue lock held unlike blk_end_request().
2382 *
2383 * Return:
2384 * %false - we are done with this request
2385 * %true - still buffers pending for this request
2386 **/
2387 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2388 {
2389 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2390 }
2391 EXPORT_SYMBOL(__blk_end_request);
2392
2393 /**
2394 * __blk_end_request_all - Helper function for drives to finish the request.
2395 * @rq: the request to finish
2396 * @error: %0 for success, < %0 for error
2397 *
2398 * Description:
2399 * Completely finish @rq. Must be called with queue lock held.
2400 */
2401 void __blk_end_request_all(struct request *rq, int error)
2402 {
2403 bool pending;
2404 unsigned int bidi_bytes = 0;
2405
2406 if (unlikely(blk_bidi_rq(rq)))
2407 bidi_bytes = blk_rq_bytes(rq->next_rq);
2408
2409 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2410 BUG_ON(pending);
2411 }
2412 EXPORT_SYMBOL(__blk_end_request_all);
2413
2414 /**
2415 * __blk_end_request_cur - Helper function to finish the current request chunk.
2416 * @rq: the request to finish the current chunk for
2417 * @error: %0 for success, < %0 for error
2418 *
2419 * Description:
2420 * Complete the current consecutively mapped chunk from @rq. Must
2421 * be called with queue lock held.
2422 *
2423 * Return:
2424 * %false - we are done with this request
2425 * %true - still buffers pending for this request
2426 */
2427 bool __blk_end_request_cur(struct request *rq, int error)
2428 {
2429 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2430 }
2431 EXPORT_SYMBOL(__blk_end_request_cur);
2432
2433 /**
2434 * __blk_end_request_err - Finish a request till the next failure boundary.
2435 * @rq: the request to finish till the next failure boundary for
2436 * @error: must be negative errno
2437 *
2438 * Description:
2439 * Complete @rq till the next failure boundary. Must be called
2440 * with queue lock held.
2441 *
2442 * Return:
2443 * %false - we are done with this request
2444 * %true - still buffers pending for this request
2445 */
2446 bool __blk_end_request_err(struct request *rq, int error)
2447 {
2448 WARN_ON(error >= 0);
2449 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2450 }
2451 EXPORT_SYMBOL_GPL(__blk_end_request_err);
2452
2453 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2454 struct bio *bio)
2455 {
2456 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2457 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2458
2459 if (bio_has_data(bio)) {
2460 rq->nr_phys_segments = bio_phys_segments(q, bio);
2461 rq->buffer = bio_data(bio);
2462 }
2463 rq->__data_len = bio->bi_size;
2464 rq->bio = rq->biotail = bio;
2465
2466 if (bio->bi_bdev)
2467 rq->rq_disk = bio->bi_bdev->bd_disk;
2468 }
2469
2470 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2471 /**
2472 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2473 * @rq: the request to be flushed
2474 *
2475 * Description:
2476 * Flush all pages in @rq.
2477 */
2478 void rq_flush_dcache_pages(struct request *rq)
2479 {
2480 struct req_iterator iter;
2481 struct bio_vec *bvec;
2482
2483 rq_for_each_segment(bvec, rq, iter)
2484 flush_dcache_page(bvec->bv_page);
2485 }
2486 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2487 #endif
2488
2489 /**
2490 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2491 * @q : the queue of the device being checked
2492 *
2493 * Description:
2494 * Check if underlying low-level drivers of a device are busy.
2495 * If the drivers want to export their busy state, they must set own
2496 * exporting function using blk_queue_lld_busy() first.
2497 *
2498 * Basically, this function is used only by request stacking drivers
2499 * to stop dispatching requests to underlying devices when underlying
2500 * devices are busy. This behavior helps more I/O merging on the queue
2501 * of the request stacking driver and prevents I/O throughput regression
2502 * on burst I/O load.
2503 *
2504 * Return:
2505 * 0 - Not busy (The request stacking driver should dispatch request)
2506 * 1 - Busy (The request stacking driver should stop dispatching request)
2507 */
2508 int blk_lld_busy(struct request_queue *q)
2509 {
2510 if (q->lld_busy_fn)
2511 return q->lld_busy_fn(q);
2512
2513 return 0;
2514 }
2515 EXPORT_SYMBOL_GPL(blk_lld_busy);
2516
2517 /**
2518 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2519 * @rq: the clone request to be cleaned up
2520 *
2521 * Description:
2522 * Free all bios in @rq for a cloned request.
2523 */
2524 void blk_rq_unprep_clone(struct request *rq)
2525 {
2526 struct bio *bio;
2527
2528 while ((bio = rq->bio) != NULL) {
2529 rq->bio = bio->bi_next;
2530
2531 bio_put(bio);
2532 }
2533 }
2534 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2535
2536 /*
2537 * Copy attributes of the original request to the clone request.
2538 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2539 */
2540 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2541 {
2542 dst->cpu = src->cpu;
2543 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2544 dst->cmd_type = src->cmd_type;
2545 dst->__sector = blk_rq_pos(src);
2546 dst->__data_len = blk_rq_bytes(src);
2547 dst->nr_phys_segments = src->nr_phys_segments;
2548 dst->ioprio = src->ioprio;
2549 dst->extra_len = src->extra_len;
2550 }
2551
2552 /**
2553 * blk_rq_prep_clone - Helper function to setup clone request
2554 * @rq: the request to be setup
2555 * @rq_src: original request to be cloned
2556 * @bs: bio_set that bios for clone are allocated from
2557 * @gfp_mask: memory allocation mask for bio
2558 * @bio_ctr: setup function to be called for each clone bio.
2559 * Returns %0 for success, non %0 for failure.
2560 * @data: private data to be passed to @bio_ctr
2561 *
2562 * Description:
2563 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2564 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2565 * are not copied, and copying such parts is the caller's responsibility.
2566 * Also, pages which the original bios are pointing to are not copied
2567 * and the cloned bios just point same pages.
2568 * So cloned bios must be completed before original bios, which means
2569 * the caller must complete @rq before @rq_src.
2570 */
2571 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2572 struct bio_set *bs, gfp_t gfp_mask,
2573 int (*bio_ctr)(struct bio *, struct bio *, void *),
2574 void *data)
2575 {
2576 struct bio *bio, *bio_src;
2577
2578 if (!bs)
2579 bs = fs_bio_set;
2580
2581 blk_rq_init(NULL, rq);
2582
2583 __rq_for_each_bio(bio_src, rq_src) {
2584 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
2585 if (!bio)
2586 goto free_and_out;
2587
2588 __bio_clone(bio, bio_src);
2589
2590 if (bio_integrity(bio_src) &&
2591 bio_integrity_clone(bio, bio_src, gfp_mask, bs))
2592 goto free_and_out;
2593
2594 if (bio_ctr && bio_ctr(bio, bio_src, data))
2595 goto free_and_out;
2596
2597 if (rq->bio) {
2598 rq->biotail->bi_next = bio;
2599 rq->biotail = bio;
2600 } else
2601 rq->bio = rq->biotail = bio;
2602 }
2603
2604 __blk_rq_prep_clone(rq, rq_src);
2605
2606 return 0;
2607
2608 free_and_out:
2609 if (bio)
2610 bio_free(bio, bs);
2611 blk_rq_unprep_clone(rq);
2612
2613 return -ENOMEM;
2614 }
2615 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2616
2617 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2618 {
2619 return queue_work(kblockd_workqueue, work);
2620 }
2621 EXPORT_SYMBOL(kblockd_schedule_work);
2622
2623 int kblockd_schedule_delayed_work(struct request_queue *q,
2624 struct delayed_work *dwork, unsigned long delay)
2625 {
2626 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2627 }
2628 EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2629
2630 #define PLUG_MAGIC 0x91827364
2631
2632 void blk_start_plug(struct blk_plug *plug)
2633 {
2634 struct task_struct *tsk = current;
2635
2636 plug->magic = PLUG_MAGIC;
2637 INIT_LIST_HEAD(&plug->list);
2638 INIT_LIST_HEAD(&plug->cb_list);
2639 plug->should_sort = 0;
2640
2641 /*
2642 * If this is a nested plug, don't actually assign it. It will be
2643 * flushed on its own.
2644 */
2645 if (!tsk->plug) {
2646 /*
2647 * Store ordering should not be needed here, since a potential
2648 * preempt will imply a full memory barrier
2649 */
2650 tsk->plug = plug;
2651 }
2652 }
2653 EXPORT_SYMBOL(blk_start_plug);
2654
2655 static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2656 {
2657 struct request *rqa = container_of(a, struct request, queuelist);
2658 struct request *rqb = container_of(b, struct request, queuelist);
2659
2660 return !(rqa->q <= rqb->q);
2661 }
2662
2663 /*
2664 * If 'from_schedule' is true, then postpone the dispatch of requests
2665 * until a safe kblockd context. We due this to avoid accidental big
2666 * additional stack usage in driver dispatch, in places where the originally
2667 * plugger did not intend it.
2668 */
2669 static void queue_unplugged(struct request_queue *q, unsigned int depth,
2670 bool from_schedule)
2671 __releases(q->queue_lock)
2672 {
2673 trace_block_unplug(q, depth, !from_schedule);
2674
2675 /*
2676 * If we are punting this to kblockd, then we can safely drop
2677 * the queue_lock before waking kblockd (which needs to take
2678 * this lock).
2679 */
2680 if (from_schedule) {
2681 spin_unlock(q->queue_lock);
2682 blk_run_queue_async(q);
2683 } else {
2684 __blk_run_queue(q);
2685 spin_unlock(q->queue_lock);
2686 }
2687
2688 }
2689
2690 static void flush_plug_callbacks(struct blk_plug *plug)
2691 {
2692 LIST_HEAD(callbacks);
2693
2694 if (list_empty(&plug->cb_list))
2695 return;
2696
2697 list_splice_init(&plug->cb_list, &callbacks);
2698
2699 while (!list_empty(&callbacks)) {
2700 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2701 struct blk_plug_cb,
2702 list);
2703 list_del(&cb->list);
2704 cb->callback(cb);
2705 }
2706 }
2707
2708 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2709 {
2710 struct request_queue *q;
2711 unsigned long flags;
2712 struct request *rq;
2713 LIST_HEAD(list);
2714 unsigned int depth;
2715
2716 BUG_ON(plug->magic != PLUG_MAGIC);
2717
2718 flush_plug_callbacks(plug);
2719 if (list_empty(&plug->list))
2720 return;
2721
2722 list_splice_init(&plug->list, &list);
2723
2724 if (plug->should_sort) {
2725 list_sort(NULL, &list, plug_rq_cmp);
2726 plug->should_sort = 0;
2727 }
2728
2729 q = NULL;
2730 depth = 0;
2731
2732 /*
2733 * Save and disable interrupts here, to avoid doing it for every
2734 * queue lock we have to take.
2735 */
2736 local_irq_save(flags);
2737 while (!list_empty(&list)) {
2738 rq = list_entry_rq(list.next);
2739 list_del_init(&rq->queuelist);
2740 BUG_ON(!rq->q);
2741 if (rq->q != q) {
2742 /*
2743 * This drops the queue lock
2744 */
2745 if (q)
2746 queue_unplugged(q, depth, from_schedule);
2747 q = rq->q;
2748 depth = 0;
2749 spin_lock(q->queue_lock);
2750 }
2751 /*
2752 * rq is already accounted, so use raw insert
2753 */
2754 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
2755 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2756 else
2757 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2758
2759 depth++;
2760 }
2761
2762 /*
2763 * This drops the queue lock
2764 */
2765 if (q)
2766 queue_unplugged(q, depth, from_schedule);
2767
2768 local_irq_restore(flags);
2769 }
2770
2771 void blk_finish_plug(struct blk_plug *plug)
2772 {
2773 blk_flush_plug_list(plug, false);
2774
2775 if (plug == current->plug)
2776 current->plug = NULL;
2777 }
2778 EXPORT_SYMBOL(blk_finish_plug);
2779
2780 int __init blk_dev_init(void)
2781 {
2782 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
2783 sizeof(((struct request *)0)->cmd_flags));
2784
2785 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
2786 kblockd_workqueue = alloc_workqueue("kblockd",
2787 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2788 if (!kblockd_workqueue)
2789 panic("Failed to create kblockd\n");
2790
2791 request_cachep = kmem_cache_create("blkdev_requests",
2792 sizeof(struct request), 0, SLAB_PANIC, NULL);
2793
2794 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2795 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2796
2797 return 0;
2798 }
This page took 0.09756 seconds and 5 git commands to generate.