2 * Functions related to segment and merge handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
12 static struct bio
*blk_bio_discard_split(struct request_queue
*q
,
16 unsigned int max_discard_sectors
, granularity
;
19 unsigned split_sectors
;
21 /* Zero-sector (unknown) and one-sector granularities are the same. */
22 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
24 max_discard_sectors
= min(q
->limits
.max_discard_sectors
, UINT_MAX
>> 9);
25 max_discard_sectors
-= max_discard_sectors
% granularity
;
27 if (unlikely(!max_discard_sectors
)) {
32 if (bio_sectors(bio
) <= max_discard_sectors
)
35 split_sectors
= max_discard_sectors
;
38 * If the next starting sector would be misaligned, stop the discard at
39 * the previous aligned sector.
41 alignment
= (q
->limits
.discard_alignment
>> 9) % granularity
;
43 tmp
= bio
->bi_iter
.bi_sector
+ split_sectors
- alignment
;
44 tmp
= sector_div(tmp
, granularity
);
46 if (split_sectors
> tmp
)
49 return bio_split(bio
, split_sectors
, GFP_NOIO
, bs
);
52 static struct bio
*blk_bio_write_same_split(struct request_queue
*q
,
56 if (!q
->limits
.max_write_same_sectors
)
59 if (bio_sectors(bio
) <= q
->limits
.max_write_same_sectors
)
62 return bio_split(bio
, q
->limits
.max_write_same_sectors
, GFP_NOIO
, bs
);
65 static struct bio
*blk_bio_segment_split(struct request_queue
*q
,
69 struct bio_vec bv
, bvprv
, *bvprvp
= NULL
;
70 struct bvec_iter iter
;
71 unsigned seg_size
= 0, nsegs
= 0, sectors
= 0;
73 bio_for_each_segment(bv
, bio
, iter
) {
74 if (sectors
+ (bv
.bv_len
>> 9) > queue_max_sectors(q
))
78 * If the queue doesn't support SG gaps and adding this
79 * offset would create a gap, disallow it.
81 if (bvprvp
&& bvec_gap_to_prev(q
, bvprvp
, bv
.bv_offset
))
84 if (bvprvp
&& blk_queue_cluster(q
)) {
85 if (seg_size
+ bv
.bv_len
> queue_max_segment_size(q
))
87 if (!BIOVEC_PHYS_MERGEABLE(bvprvp
, &bv
))
89 if (!BIOVEC_SEG_BOUNDARY(q
, bvprvp
, &bv
))
92 seg_size
+= bv
.bv_len
;
95 sectors
+= bv
.bv_len
>> 9;
99 if (nsegs
== queue_max_segments(q
))
105 seg_size
= bv
.bv_len
;
106 sectors
+= bv
.bv_len
>> 9;
111 return bio_split(bio
, sectors
, GFP_NOIO
, bs
);
114 void blk_queue_split(struct request_queue
*q
, struct bio
**bio
,
119 if ((*bio
)->bi_rw
& REQ_DISCARD
)
120 split
= blk_bio_discard_split(q
, *bio
, bs
);
121 else if ((*bio
)->bi_rw
& REQ_WRITE_SAME
)
122 split
= blk_bio_write_same_split(q
, *bio
, bs
);
124 split
= blk_bio_segment_split(q
, *bio
, q
->bio_split
);
127 bio_chain(split
, *bio
);
128 generic_make_request(*bio
);
132 EXPORT_SYMBOL(blk_queue_split
);
134 static unsigned int __blk_recalc_rq_segments(struct request_queue
*q
,
138 struct bio_vec bv
, bvprv
= { NULL
};
139 int cluster
, prev
= 0;
140 unsigned int seg_size
, nr_phys_segs
;
141 struct bio
*fbio
, *bbio
;
142 struct bvec_iter iter
;
148 * This should probably be returning 0, but blk_add_request_payload()
151 if (bio
->bi_rw
& REQ_DISCARD
)
154 if (bio
->bi_rw
& REQ_WRITE_SAME
)
158 cluster
= blk_queue_cluster(q
);
162 bio_for_each_segment(bv
, bio
, iter
) {
164 * If SG merging is disabled, each bio vector is
170 if (prev
&& cluster
) {
171 if (seg_size
+ bv
.bv_len
172 > queue_max_segment_size(q
))
174 if (!BIOVEC_PHYS_MERGEABLE(&bvprv
, &bv
))
176 if (!BIOVEC_SEG_BOUNDARY(q
, &bvprv
, &bv
))
179 seg_size
+= bv
.bv_len
;
184 if (nr_phys_segs
== 1 && seg_size
>
185 fbio
->bi_seg_front_size
)
186 fbio
->bi_seg_front_size
= seg_size
;
191 seg_size
= bv
.bv_len
;
196 if (nr_phys_segs
== 1 && seg_size
> fbio
->bi_seg_front_size
)
197 fbio
->bi_seg_front_size
= seg_size
;
198 if (seg_size
> bbio
->bi_seg_back_size
)
199 bbio
->bi_seg_back_size
= seg_size
;
204 void blk_recalc_rq_segments(struct request
*rq
)
206 bool no_sg_merge
= !!test_bit(QUEUE_FLAG_NO_SG_MERGE
,
207 &rq
->q
->queue_flags
);
209 rq
->nr_phys_segments
= __blk_recalc_rq_segments(rq
->q
, rq
->bio
,
213 void blk_recount_segments(struct request_queue
*q
, struct bio
*bio
)
215 unsigned short seg_cnt
;
217 /* estimate segment number by bi_vcnt for non-cloned bio */
218 if (bio_flagged(bio
, BIO_CLONED
))
219 seg_cnt
= bio_segments(bio
);
221 seg_cnt
= bio
->bi_vcnt
;
223 if (test_bit(QUEUE_FLAG_NO_SG_MERGE
, &q
->queue_flags
) &&
224 (seg_cnt
< queue_max_segments(q
)))
225 bio
->bi_phys_segments
= seg_cnt
;
227 struct bio
*nxt
= bio
->bi_next
;
230 bio
->bi_phys_segments
= __blk_recalc_rq_segments(q
, bio
, false);
234 bio_set_flag(bio
, BIO_SEG_VALID
);
236 EXPORT_SYMBOL(blk_recount_segments
);
238 static int blk_phys_contig_segment(struct request_queue
*q
, struct bio
*bio
,
241 struct bio_vec end_bv
= { NULL
}, nxt_bv
;
242 struct bvec_iter iter
;
244 if (!blk_queue_cluster(q
))
247 if (bio
->bi_seg_back_size
+ nxt
->bi_seg_front_size
>
248 queue_max_segment_size(q
))
251 if (!bio_has_data(bio
))
254 bio_for_each_segment(end_bv
, bio
, iter
)
255 if (end_bv
.bv_len
== iter
.bi_size
)
258 nxt_bv
= bio_iovec(nxt
);
260 if (!BIOVEC_PHYS_MERGEABLE(&end_bv
, &nxt_bv
))
264 * bio and nxt are contiguous in memory; check if the queue allows
265 * these two to be merged into one
267 if (BIOVEC_SEG_BOUNDARY(q
, &end_bv
, &nxt_bv
))
274 __blk_segment_map_sg(struct request_queue
*q
, struct bio_vec
*bvec
,
275 struct scatterlist
*sglist
, struct bio_vec
*bvprv
,
276 struct scatterlist
**sg
, int *nsegs
, int *cluster
)
279 int nbytes
= bvec
->bv_len
;
281 if (*sg
&& *cluster
) {
282 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
285 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
))
287 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bvec
))
290 (*sg
)->length
+= nbytes
;
297 * If the driver previously mapped a shorter
298 * list, we could see a termination bit
299 * prematurely unless it fully inits the sg
300 * table on each mapping. We KNOW that there
301 * must be more entries here or the driver
302 * would be buggy, so force clear the
303 * termination bit to avoid doing a full
304 * sg_init_table() in drivers for each command.
310 sg_set_page(*sg
, bvec
->bv_page
, nbytes
, bvec
->bv_offset
);
316 static int __blk_bios_map_sg(struct request_queue
*q
, struct bio
*bio
,
317 struct scatterlist
*sglist
,
318 struct scatterlist
**sg
)
320 struct bio_vec bvec
, bvprv
= { NULL
};
321 struct bvec_iter iter
;
325 cluster
= blk_queue_cluster(q
);
327 if (bio
->bi_rw
& REQ_DISCARD
) {
329 * This is a hack - drivers should be neither modifying the
330 * biovec, nor relying on bi_vcnt - but because of
331 * blk_add_request_payload(), a discard bio may or may not have
332 * a payload we need to set up here (thank you Christoph) and
333 * bi_vcnt is really the only way of telling if we need to.
342 if (bio
->bi_rw
& REQ_WRITE_SAME
) {
345 bvec
= bio_iovec(bio
);
346 sg_set_page(*sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
351 bio_for_each_segment(bvec
, bio
, iter
)
352 __blk_segment_map_sg(q
, &bvec
, sglist
, &bvprv
, sg
,
359 * map a request to scatterlist, return number of sg entries setup. Caller
360 * must make sure sg can hold rq->nr_phys_segments entries
362 int blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
363 struct scatterlist
*sglist
)
365 struct scatterlist
*sg
= NULL
;
369 nsegs
= __blk_bios_map_sg(q
, rq
->bio
, sglist
, &sg
);
371 if (unlikely(rq
->cmd_flags
& REQ_COPY_USER
) &&
372 (blk_rq_bytes(rq
) & q
->dma_pad_mask
)) {
373 unsigned int pad_len
=
374 (q
->dma_pad_mask
& ~blk_rq_bytes(rq
)) + 1;
376 sg
->length
+= pad_len
;
377 rq
->extra_len
+= pad_len
;
380 if (q
->dma_drain_size
&& q
->dma_drain_needed(rq
)) {
381 if (rq
->cmd_flags
& REQ_WRITE
)
382 memset(q
->dma_drain_buffer
, 0, q
->dma_drain_size
);
386 sg_set_page(sg
, virt_to_page(q
->dma_drain_buffer
),
388 ((unsigned long)q
->dma_drain_buffer
) &
391 rq
->extra_len
+= q
->dma_drain_size
;
399 EXPORT_SYMBOL(blk_rq_map_sg
);
401 static inline int ll_new_hw_segment(struct request_queue
*q
,
405 int nr_phys_segs
= bio_phys_segments(q
, bio
);
407 if (req
->nr_phys_segments
+ nr_phys_segs
> queue_max_segments(q
))
410 if (blk_integrity_merge_bio(q
, req
, bio
) == false)
414 * This will form the start of a new hw segment. Bump both
417 req
->nr_phys_segments
+= nr_phys_segs
;
421 req
->cmd_flags
|= REQ_NOMERGE
;
422 if (req
== q
->last_merge
)
423 q
->last_merge
= NULL
;
427 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
430 if (req_gap_back_merge(req
, bio
))
432 if (blk_integrity_rq(req
) &&
433 integrity_req_gap_back_merge(req
, bio
))
435 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
436 blk_rq_get_max_sectors(req
)) {
437 req
->cmd_flags
|= REQ_NOMERGE
;
438 if (req
== q
->last_merge
)
439 q
->last_merge
= NULL
;
442 if (!bio_flagged(req
->biotail
, BIO_SEG_VALID
))
443 blk_recount_segments(q
, req
->biotail
);
444 if (!bio_flagged(bio
, BIO_SEG_VALID
))
445 blk_recount_segments(q
, bio
);
447 return ll_new_hw_segment(q
, req
, bio
);
450 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
454 if (req_gap_front_merge(req
, bio
))
456 if (blk_integrity_rq(req
) &&
457 integrity_req_gap_front_merge(req
, bio
))
459 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
460 blk_rq_get_max_sectors(req
)) {
461 req
->cmd_flags
|= REQ_NOMERGE
;
462 if (req
== q
->last_merge
)
463 q
->last_merge
= NULL
;
466 if (!bio_flagged(bio
, BIO_SEG_VALID
))
467 blk_recount_segments(q
, bio
);
468 if (!bio_flagged(req
->bio
, BIO_SEG_VALID
))
469 blk_recount_segments(q
, req
->bio
);
471 return ll_new_hw_segment(q
, req
, bio
);
475 * blk-mq uses req->special to carry normal driver per-request payload, it
476 * does not indicate a prepared command that we cannot merge with.
478 static bool req_no_special_merge(struct request
*req
)
480 struct request_queue
*q
= req
->q
;
482 return !q
->mq_ops
&& req
->special
;
485 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
486 struct request
*next
)
488 int total_phys_segments
;
489 unsigned int seg_size
=
490 req
->biotail
->bi_seg_back_size
+ next
->bio
->bi_seg_front_size
;
493 * First check if the either of the requests are re-queued
494 * requests. Can't merge them if they are.
496 if (req_no_special_merge(req
) || req_no_special_merge(next
))
499 if (req_gap_back_merge(req
, next
->bio
))
503 * Will it become too large?
505 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
506 blk_rq_get_max_sectors(req
))
509 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
510 if (blk_phys_contig_segment(q
, req
->biotail
, next
->bio
)) {
511 if (req
->nr_phys_segments
== 1)
512 req
->bio
->bi_seg_front_size
= seg_size
;
513 if (next
->nr_phys_segments
== 1)
514 next
->biotail
->bi_seg_back_size
= seg_size
;
515 total_phys_segments
--;
518 if (total_phys_segments
> queue_max_segments(q
))
521 if (blk_integrity_merge_rq(q
, req
, next
) == false)
525 req
->nr_phys_segments
= total_phys_segments
;
530 * blk_rq_set_mixed_merge - mark a request as mixed merge
531 * @rq: request to mark as mixed merge
534 * @rq is about to be mixed merged. Make sure the attributes
535 * which can be mixed are set in each bio and mark @rq as mixed
538 void blk_rq_set_mixed_merge(struct request
*rq
)
540 unsigned int ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
543 if (rq
->cmd_flags
& REQ_MIXED_MERGE
)
547 * @rq will no longer represent mixable attributes for all the
548 * contained bios. It will just track those of the first one.
549 * Distributes the attributs to each bio.
551 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
552 WARN_ON_ONCE((bio
->bi_rw
& REQ_FAILFAST_MASK
) &&
553 (bio
->bi_rw
& REQ_FAILFAST_MASK
) != ff
);
556 rq
->cmd_flags
|= REQ_MIXED_MERGE
;
559 static void blk_account_io_merge(struct request
*req
)
561 if (blk_do_io_stat(req
)) {
562 struct hd_struct
*part
;
565 cpu
= part_stat_lock();
568 part_round_stats(cpu
, part
);
569 part_dec_in_flight(part
, rq_data_dir(req
));
577 * Has to be called with the request spinlock acquired
579 static int attempt_merge(struct request_queue
*q
, struct request
*req
,
580 struct request
*next
)
582 if (!rq_mergeable(req
) || !rq_mergeable(next
))
585 if (!blk_check_merge_flags(req
->cmd_flags
, next
->cmd_flags
))
591 if (blk_rq_pos(req
) + blk_rq_sectors(req
) != blk_rq_pos(next
))
594 if (rq_data_dir(req
) != rq_data_dir(next
)
595 || req
->rq_disk
!= next
->rq_disk
596 || req_no_special_merge(next
))
599 if (req
->cmd_flags
& REQ_WRITE_SAME
&&
600 !blk_write_same_mergeable(req
->bio
, next
->bio
))
604 * If we are allowed to merge, then append bio list
605 * from next to rq and release next. merge_requests_fn
606 * will have updated segment counts, update sector
609 if (!ll_merge_requests_fn(q
, req
, next
))
613 * If failfast settings disagree or any of the two is already
614 * a mixed merge, mark both as mixed before proceeding. This
615 * makes sure that all involved bios have mixable attributes
618 if ((req
->cmd_flags
| next
->cmd_flags
) & REQ_MIXED_MERGE
||
619 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
620 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
621 blk_rq_set_mixed_merge(req
);
622 blk_rq_set_mixed_merge(next
);
626 * At this point we have either done a back merge
627 * or front merge. We need the smaller start_time of
628 * the merged requests to be the current request
629 * for accounting purposes.
631 if (time_after(req
->start_time
, next
->start_time
))
632 req
->start_time
= next
->start_time
;
634 req
->biotail
->bi_next
= next
->bio
;
635 req
->biotail
= next
->biotail
;
637 req
->__data_len
+= blk_rq_bytes(next
);
639 elv_merge_requests(q
, req
, next
);
642 * 'next' is going away, so update stats accordingly
644 blk_account_io_merge(next
);
646 req
->ioprio
= ioprio_best(req
->ioprio
, next
->ioprio
);
647 if (blk_rq_cpu_valid(next
))
648 req
->cpu
= next
->cpu
;
650 /* owner-ship of bio passed from next to req */
652 __blk_put_request(q
, next
);
656 int attempt_back_merge(struct request_queue
*q
, struct request
*rq
)
658 struct request
*next
= elv_latter_request(q
, rq
);
661 return attempt_merge(q
, rq
, next
);
666 int attempt_front_merge(struct request_queue
*q
, struct request
*rq
)
668 struct request
*prev
= elv_former_request(q
, rq
);
671 return attempt_merge(q
, prev
, rq
);
676 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
677 struct request
*next
)
679 return attempt_merge(q
, rq
, next
);
682 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
684 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
687 if (!blk_check_merge_flags(rq
->cmd_flags
, bio
->bi_rw
))
690 /* different data direction or already started, don't merge */
691 if (bio_data_dir(bio
) != rq_data_dir(rq
))
694 /* must be same device and not a special request */
695 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
|| req_no_special_merge(rq
))
698 /* only merge integrity protected bio into ditto rq */
699 if (blk_integrity_merge_bio(rq
->q
, rq
, bio
) == false)
702 /* must be using the same buffer */
703 if (rq
->cmd_flags
& REQ_WRITE_SAME
&&
704 !blk_write_same_mergeable(rq
->bio
, bio
))
710 int blk_try_merge(struct request
*rq
, struct bio
*bio
)
712 if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_iter
.bi_sector
)
713 return ELEVATOR_BACK_MERGE
;
714 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_iter
.bi_sector
)
715 return ELEVATOR_FRONT_MERGE
;
716 return ELEVATOR_NO_MERGE
;
This page took 0.045052 seconds and 6 git commands to generate.