Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
1e428079 | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
59247eae | 13 | struct bio *bio) |
d6d48196 | 14 | { |
d6d48196 | 15 | unsigned int phys_size; |
d6d48196 | 16 | struct bio_vec *bv, *bvprv = NULL; |
1e428079 JA |
17 | int cluster, i, high, highprv = 1; |
18 | unsigned int seg_size, nr_phys_segs; | |
59247eae | 19 | struct bio *fbio, *bbio; |
d6d48196 | 20 | |
1e428079 JA |
21 | if (!bio) |
22 | return 0; | |
d6d48196 | 23 | |
1e428079 | 24 | fbio = bio; |
75ad23bc | 25 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
5df97b91 MP |
26 | seg_size = 0; |
27 | phys_size = nr_phys_segs = 0; | |
1e428079 JA |
28 | for_each_bio(bio) { |
29 | bio_for_each_segment(bv, bio, i) { | |
30 | /* | |
31 | * the trick here is making sure that a high page is | |
32 | * never considered part of another segment, since that | |
33 | * might change with the bounce page. | |
34 | */ | |
ae03bf63 | 35 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); |
1e428079 | 36 | if (high || highprv) |
d6d48196 | 37 | goto new_segment; |
1e428079 | 38 | if (cluster) { |
ae03bf63 MP |
39 | if (seg_size + bv->bv_len |
40 | > queue_max_segment_size(q)) | |
1e428079 JA |
41 | goto new_segment; |
42 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | |
43 | goto new_segment; | |
44 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | |
45 | goto new_segment; | |
d6d48196 | 46 | |
1e428079 JA |
47 | seg_size += bv->bv_len; |
48 | bvprv = bv; | |
49 | continue; | |
50 | } | |
d6d48196 | 51 | new_segment: |
1e428079 JA |
52 | if (nr_phys_segs == 1 && seg_size > |
53 | fbio->bi_seg_front_size) | |
54 | fbio->bi_seg_front_size = seg_size; | |
86771427 | 55 | |
1e428079 JA |
56 | nr_phys_segs++; |
57 | bvprv = bv; | |
58 | seg_size = bv->bv_len; | |
59 | highprv = high; | |
60 | } | |
59247eae | 61 | bbio = bio; |
d6d48196 JA |
62 | } |
63 | ||
59247eae JA |
64 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
65 | fbio->bi_seg_front_size = seg_size; | |
66 | if (seg_size > bbio->bi_seg_back_size) | |
67 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
68 | |
69 | return nr_phys_segs; | |
70 | } | |
71 | ||
72 | void blk_recalc_rq_segments(struct request *rq) | |
73 | { | |
59247eae | 74 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
d6d48196 JA |
75 | } |
76 | ||
77 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
78 | { | |
d6d48196 | 79 | struct bio *nxt = bio->bi_next; |
1e428079 | 80 | |
d6d48196 | 81 | bio->bi_next = NULL; |
59247eae | 82 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); |
d6d48196 | 83 | bio->bi_next = nxt; |
d6d48196 JA |
84 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
85 | } | |
86 | EXPORT_SYMBOL(blk_recount_segments); | |
87 | ||
88 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
89 | struct bio *nxt) | |
90 | { | |
75ad23bc | 91 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
d6d48196 JA |
92 | return 0; |
93 | ||
86771427 | 94 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
ae03bf63 | 95 | queue_max_segment_size(q)) |
d6d48196 JA |
96 | return 0; |
97 | ||
e17fc0a1 DW |
98 | if (!bio_has_data(bio)) |
99 | return 1; | |
100 | ||
101 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | |
102 | return 0; | |
103 | ||
d6d48196 | 104 | /* |
e17fc0a1 | 105 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
106 | * these two to be merged into one |
107 | */ | |
108 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | |
109 | return 1; | |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
d6d48196 JA |
114 | /* |
115 | * map a request to scatterlist, return number of sg entries setup. Caller | |
116 | * must make sure sg can hold rq->nr_phys_segments entries | |
117 | */ | |
118 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
119 | struct scatterlist *sglist) | |
120 | { | |
121 | struct bio_vec *bvec, *bvprv; | |
122 | struct req_iterator iter; | |
123 | struct scatterlist *sg; | |
124 | int nsegs, cluster; | |
125 | ||
126 | nsegs = 0; | |
75ad23bc | 127 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
d6d48196 JA |
128 | |
129 | /* | |
130 | * for each bio in rq | |
131 | */ | |
132 | bvprv = NULL; | |
133 | sg = NULL; | |
134 | rq_for_each_segment(bvec, rq, iter) { | |
135 | int nbytes = bvec->bv_len; | |
136 | ||
137 | if (bvprv && cluster) { | |
ae03bf63 | 138 | if (sg->length + nbytes > queue_max_segment_size(q)) |
d6d48196 JA |
139 | goto new_segment; |
140 | ||
141 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | |
142 | goto new_segment; | |
143 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | |
144 | goto new_segment; | |
145 | ||
146 | sg->length += nbytes; | |
147 | } else { | |
148 | new_segment: | |
149 | if (!sg) | |
150 | sg = sglist; | |
151 | else { | |
152 | /* | |
153 | * If the driver previously mapped a shorter | |
154 | * list, we could see a termination bit | |
155 | * prematurely unless it fully inits the sg | |
156 | * table on each mapping. We KNOW that there | |
157 | * must be more entries here or the driver | |
158 | * would be buggy, so force clear the | |
159 | * termination bit to avoid doing a full | |
160 | * sg_init_table() in drivers for each command. | |
161 | */ | |
162 | sg->page_link &= ~0x02; | |
163 | sg = sg_next(sg); | |
164 | } | |
165 | ||
166 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
167 | nsegs++; | |
168 | } | |
169 | bvprv = bvec; | |
170 | } /* segments in rq */ | |
171 | ||
f18573ab FT |
172 | |
173 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | |
2e46e8b2 TH |
174 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
175 | unsigned int pad_len = | |
176 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
177 | |
178 | sg->length += pad_len; | |
179 | rq->extra_len += pad_len; | |
180 | } | |
181 | ||
2fb98e84 | 182 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
db0a2e00 TH |
183 | if (rq->cmd_flags & REQ_RW) |
184 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | |
185 | ||
d6d48196 JA |
186 | sg->page_link &= ~0x02; |
187 | sg = sg_next(sg); | |
188 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
189 | q->dma_drain_size, | |
190 | ((unsigned long)q->dma_drain_buffer) & | |
191 | (PAGE_SIZE - 1)); | |
192 | nsegs++; | |
7a85f889 | 193 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
194 | } |
195 | ||
196 | if (sg) | |
197 | sg_mark_end(sg); | |
198 | ||
199 | return nsegs; | |
200 | } | |
d6d48196 JA |
201 | EXPORT_SYMBOL(blk_rq_map_sg); |
202 | ||
d6d48196 JA |
203 | static inline int ll_new_hw_segment(struct request_queue *q, |
204 | struct request *req, | |
205 | struct bio *bio) | |
206 | { | |
d6d48196 JA |
207 | int nr_phys_segs = bio_phys_segments(q, bio); |
208 | ||
8a78362c | 209 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { |
d6d48196 JA |
210 | req->cmd_flags |= REQ_NOMERGE; |
211 | if (req == q->last_merge) | |
212 | q->last_merge = NULL; | |
213 | return 0; | |
214 | } | |
215 | ||
216 | /* | |
217 | * This will form the start of a new hw segment. Bump both | |
218 | * counters. | |
219 | */ | |
d6d48196 JA |
220 | req->nr_phys_segments += nr_phys_segs; |
221 | return 1; | |
222 | } | |
223 | ||
224 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
225 | struct bio *bio) | |
226 | { | |
227 | unsigned short max_sectors; | |
d6d48196 JA |
228 | |
229 | if (unlikely(blk_pc_request(req))) | |
ae03bf63 | 230 | max_sectors = queue_max_hw_sectors(q); |
d6d48196 | 231 | else |
ae03bf63 | 232 | max_sectors = queue_max_sectors(q); |
d6d48196 | 233 | |
83096ebf | 234 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { |
d6d48196 JA |
235 | req->cmd_flags |= REQ_NOMERGE; |
236 | if (req == q->last_merge) | |
237 | q->last_merge = NULL; | |
238 | return 0; | |
239 | } | |
2cdf79ca | 240 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 241 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 242 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 243 | blk_recount_segments(q, bio); |
d6d48196 JA |
244 | |
245 | return ll_new_hw_segment(q, req, bio); | |
246 | } | |
247 | ||
6728cb0e | 248 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
249 | struct bio *bio) |
250 | { | |
251 | unsigned short max_sectors; | |
d6d48196 JA |
252 | |
253 | if (unlikely(blk_pc_request(req))) | |
ae03bf63 | 254 | max_sectors = queue_max_hw_sectors(q); |
d6d48196 | 255 | else |
ae03bf63 | 256 | max_sectors = queue_max_sectors(q); |
d6d48196 JA |
257 | |
258 | ||
83096ebf | 259 | if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { |
d6d48196 JA |
260 | req->cmd_flags |= REQ_NOMERGE; |
261 | if (req == q->last_merge) | |
262 | q->last_merge = NULL; | |
263 | return 0; | |
264 | } | |
2cdf79ca | 265 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 266 | blk_recount_segments(q, bio); |
2cdf79ca | 267 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 268 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
269 | |
270 | return ll_new_hw_segment(q, req, bio); | |
271 | } | |
272 | ||
273 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |
274 | struct request *next) | |
275 | { | |
276 | int total_phys_segments; | |
86771427 FT |
277 | unsigned int seg_size = |
278 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 JA |
279 | |
280 | /* | |
281 | * First check if the either of the requests are re-queued | |
282 | * requests. Can't merge them if they are. | |
283 | */ | |
284 | if (req->special || next->special) | |
285 | return 0; | |
286 | ||
287 | /* | |
288 | * Will it become too large? | |
289 | */ | |
ae03bf63 | 290 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) |
d6d48196 JA |
291 | return 0; |
292 | ||
293 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
294 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
295 | if (req->nr_phys_segments == 1) | |
296 | req->bio->bi_seg_front_size = seg_size; | |
297 | if (next->nr_phys_segments == 1) | |
298 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 299 | total_phys_segments--; |
86771427 | 300 | } |
d6d48196 | 301 | |
8a78362c | 302 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
303 | return 0; |
304 | ||
305 | /* Merge is OK... */ | |
306 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
307 | return 1; |
308 | } | |
309 | ||
80a761fd TH |
310 | /** |
311 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
312 | * @rq: request to mark as mixed merge | |
313 | * | |
314 | * Description: | |
315 | * @rq is about to be mixed merged. Make sure the attributes | |
316 | * which can be mixed are set in each bio and mark @rq as mixed | |
317 | * merged. | |
318 | */ | |
319 | void blk_rq_set_mixed_merge(struct request *rq) | |
320 | { | |
321 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
322 | struct bio *bio; | |
323 | ||
324 | if (rq->cmd_flags & REQ_MIXED_MERGE) | |
325 | return; | |
326 | ||
327 | /* | |
328 | * @rq will no longer represent mixable attributes for all the | |
329 | * contained bios. It will just track those of the first one. | |
330 | * Distributes the attributs to each bio. | |
331 | */ | |
332 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
333 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && | |
334 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); | |
335 | bio->bi_rw |= ff; | |
336 | } | |
337 | rq->cmd_flags |= REQ_MIXED_MERGE; | |
338 | } | |
339 | ||
26308eab JM |
340 | static void blk_account_io_merge(struct request *req) |
341 | { | |
342 | if (blk_do_io_stat(req)) { | |
343 | struct hd_struct *part; | |
344 | int cpu; | |
345 | ||
346 | cpu = part_stat_lock(); | |
83096ebf | 347 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
26308eab JM |
348 | |
349 | part_round_stats(cpu, part); | |
316d315b | 350 | part_dec_in_flight(part, rq_data_dir(req)); |
26308eab JM |
351 | |
352 | part_stat_unlock(); | |
353 | } | |
354 | } | |
355 | ||
d6d48196 JA |
356 | /* |
357 | * Has to be called with the request spinlock acquired | |
358 | */ | |
359 | static int attempt_merge(struct request_queue *q, struct request *req, | |
360 | struct request *next) | |
361 | { | |
362 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
363 | return 0; | |
364 | ||
365 | /* | |
366 | * not contiguous | |
367 | */ | |
83096ebf | 368 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
d6d48196 JA |
369 | return 0; |
370 | ||
371 | if (rq_data_dir(req) != rq_data_dir(next) | |
372 | || req->rq_disk != next->rq_disk | |
373 | || next->special) | |
374 | return 0; | |
375 | ||
7ba1ba12 MP |
376 | if (blk_integrity_rq(req) != blk_integrity_rq(next)) |
377 | return 0; | |
378 | ||
d6d48196 JA |
379 | /* |
380 | * If we are allowed to merge, then append bio list | |
381 | * from next to rq and release next. merge_requests_fn | |
382 | * will have updated segment counts, update sector | |
383 | * counts here. | |
384 | */ | |
385 | if (!ll_merge_requests_fn(q, req, next)) | |
386 | return 0; | |
387 | ||
80a761fd TH |
388 | /* |
389 | * If failfast settings disagree or any of the two is already | |
390 | * a mixed merge, mark both as mixed before proceeding. This | |
391 | * makes sure that all involved bios have mixable attributes | |
392 | * set properly. | |
393 | */ | |
394 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || | |
395 | (req->cmd_flags & REQ_FAILFAST_MASK) != | |
396 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
397 | blk_rq_set_mixed_merge(req); | |
398 | blk_rq_set_mixed_merge(next); | |
399 | } | |
400 | ||
d6d48196 JA |
401 | /* |
402 | * At this point we have either done a back merge | |
403 | * or front merge. We need the smaller start_time of | |
404 | * the merged requests to be the current request | |
405 | * for accounting purposes. | |
406 | */ | |
407 | if (time_after(req->start_time, next->start_time)) | |
408 | req->start_time = next->start_time; | |
409 | ||
410 | req->biotail->bi_next = next->bio; | |
411 | req->biotail = next->biotail; | |
412 | ||
a2dec7b3 | 413 | req->__data_len += blk_rq_bytes(next); |
d6d48196 JA |
414 | |
415 | elv_merge_requests(q, req, next); | |
416 | ||
42dad764 JM |
417 | /* |
418 | * 'next' is going away, so update stats accordingly | |
419 | */ | |
420 | blk_account_io_merge(next); | |
d6d48196 JA |
421 | |
422 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
ab780f1e JA |
423 | if (blk_rq_cpu_valid(next)) |
424 | req->cpu = next->cpu; | |
d6d48196 | 425 | |
1cd96c24 BH |
426 | /* owner-ship of bio passed from next to req */ |
427 | next->bio = NULL; | |
d6d48196 JA |
428 | __blk_put_request(q, next); |
429 | return 1; | |
430 | } | |
431 | ||
432 | int attempt_back_merge(struct request_queue *q, struct request *rq) | |
433 | { | |
434 | struct request *next = elv_latter_request(q, rq); | |
435 | ||
436 | if (next) | |
437 | return attempt_merge(q, rq, next); | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
442 | int attempt_front_merge(struct request_queue *q, struct request *rq) | |
443 | { | |
444 | struct request *prev = elv_former_request(q, rq); | |
445 | ||
446 | if (prev) | |
447 | return attempt_merge(q, prev, rq); | |
448 | ||
449 | return 0; | |
450 | } |