Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
12 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | |
13 | { | |
14 | if (blk_fs_request(rq)) { | |
15 | rq->hard_sector += nsect; | |
16 | rq->hard_nr_sectors -= nsect; | |
17 | ||
18 | /* | |
19 | * Move the I/O submission pointers ahead if required. | |
20 | */ | |
21 | if ((rq->nr_sectors >= rq->hard_nr_sectors) && | |
22 | (rq->sector <= rq->hard_sector)) { | |
23 | rq->sector = rq->hard_sector; | |
24 | rq->nr_sectors = rq->hard_nr_sectors; | |
25 | rq->hard_cur_sectors = bio_cur_sectors(rq->bio); | |
26 | rq->current_nr_sectors = rq->hard_cur_sectors; | |
27 | rq->buffer = bio_data(rq->bio); | |
28 | } | |
29 | ||
30 | /* | |
31 | * if total number of sectors is less than the first segment | |
32 | * size, something has gone terribly wrong | |
33 | */ | |
34 | if (rq->nr_sectors < rq->current_nr_sectors) { | |
6728cb0e | 35 | printk(KERN_ERR "blk: request botched\n"); |
d6d48196 JA |
36 | rq->nr_sectors = rq->current_nr_sectors; |
37 | } | |
38 | } | |
39 | } | |
40 | ||
41 | void blk_recalc_rq_segments(struct request *rq) | |
42 | { | |
43 | int nr_phys_segs; | |
44 | int nr_hw_segs; | |
45 | unsigned int phys_size; | |
46 | unsigned int hw_size; | |
47 | struct bio_vec *bv, *bvprv = NULL; | |
48 | int seg_size; | |
49 | int hw_seg_size; | |
50 | int cluster; | |
51 | struct req_iterator iter; | |
52 | int high, highprv = 1; | |
53 | struct request_queue *q = rq->q; | |
54 | ||
55 | if (!rq->bio) | |
56 | return; | |
57 | ||
75ad23bc | 58 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
d6d48196 JA |
59 | hw_seg_size = seg_size = 0; |
60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | |
61 | rq_for_each_segment(bv, rq, iter) { | |
62 | /* | |
63 | * the trick here is making sure that a high page is never | |
64 | * considered part of another segment, since that might | |
65 | * change with the bounce page. | |
66 | */ | |
67 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | |
68 | if (high || highprv) | |
69 | goto new_hw_segment; | |
70 | if (cluster) { | |
71 | if (seg_size + bv->bv_len > q->max_segment_size) | |
72 | goto new_segment; | |
73 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | |
74 | goto new_segment; | |
75 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | |
76 | goto new_segment; | |
77 | if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | |
78 | goto new_hw_segment; | |
79 | ||
80 | seg_size += bv->bv_len; | |
81 | hw_seg_size += bv->bv_len; | |
82 | bvprv = bv; | |
83 | continue; | |
84 | } | |
85 | new_segment: | |
86 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | |
87 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | |
88 | hw_seg_size += bv->bv_len; | |
89 | else { | |
90 | new_hw_segment: | |
91 | if (nr_hw_segs == 1 && | |
92 | hw_seg_size > rq->bio->bi_hw_front_size) | |
93 | rq->bio->bi_hw_front_size = hw_seg_size; | |
94 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | |
95 | nr_hw_segs++; | |
96 | } | |
97 | ||
98 | nr_phys_segs++; | |
99 | bvprv = bv; | |
100 | seg_size = bv->bv_len; | |
101 | highprv = high; | |
102 | } | |
103 | ||
104 | if (nr_hw_segs == 1 && | |
105 | hw_seg_size > rq->bio->bi_hw_front_size) | |
106 | rq->bio->bi_hw_front_size = hw_seg_size; | |
107 | if (hw_seg_size > rq->biotail->bi_hw_back_size) | |
108 | rq->biotail->bi_hw_back_size = hw_seg_size; | |
109 | rq->nr_phys_segments = nr_phys_segs; | |
110 | rq->nr_hw_segments = nr_hw_segs; | |
111 | } | |
112 | ||
113 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
114 | { | |
115 | struct request rq; | |
116 | struct bio *nxt = bio->bi_next; | |
117 | rq.q = q; | |
118 | rq.bio = rq.biotail = bio; | |
119 | bio->bi_next = NULL; | |
120 | blk_recalc_rq_segments(&rq); | |
121 | bio->bi_next = nxt; | |
122 | bio->bi_phys_segments = rq.nr_phys_segments; | |
123 | bio->bi_hw_segments = rq.nr_hw_segments; | |
124 | bio->bi_flags |= (1 << BIO_SEG_VALID); | |
125 | } | |
126 | EXPORT_SYMBOL(blk_recount_segments); | |
127 | ||
128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
129 | struct bio *nxt) | |
130 | { | |
75ad23bc | 131 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
d6d48196 JA |
132 | return 0; |
133 | ||
134 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | |
135 | return 0; | |
136 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) | |
137 | return 0; | |
138 | ||
139 | /* | |
140 | * bio and nxt are contigous in memory, check if the queue allows | |
141 | * these two to be merged into one | |
142 | */ | |
143 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | |
144 | return 1; | |
145 | ||
146 | return 0; | |
147 | } | |
148 | ||
149 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | |
150 | struct bio *nxt) | |
151 | { | |
2cdf79ca | 152 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 153 | blk_recount_segments(q, bio); |
2cdf79ca | 154 | if (!bio_flagged(nxt, BIO_SEG_VALID)) |
d6d48196 JA |
155 | blk_recount_segments(q, nxt); |
156 | if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | |
157 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size)) | |
158 | return 0; | |
159 | if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) | |
160 | return 0; | |
161 | ||
162 | return 1; | |
163 | } | |
164 | ||
165 | /* | |
166 | * map a request to scatterlist, return number of sg entries setup. Caller | |
167 | * must make sure sg can hold rq->nr_phys_segments entries | |
168 | */ | |
169 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
170 | struct scatterlist *sglist) | |
171 | { | |
172 | struct bio_vec *bvec, *bvprv; | |
173 | struct req_iterator iter; | |
174 | struct scatterlist *sg; | |
175 | int nsegs, cluster; | |
176 | ||
177 | nsegs = 0; | |
75ad23bc | 178 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
d6d48196 JA |
179 | |
180 | /* | |
181 | * for each bio in rq | |
182 | */ | |
183 | bvprv = NULL; | |
184 | sg = NULL; | |
185 | rq_for_each_segment(bvec, rq, iter) { | |
186 | int nbytes = bvec->bv_len; | |
187 | ||
188 | if (bvprv && cluster) { | |
189 | if (sg->length + nbytes > q->max_segment_size) | |
190 | goto new_segment; | |
191 | ||
192 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | |
193 | goto new_segment; | |
194 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | |
195 | goto new_segment; | |
196 | ||
197 | sg->length += nbytes; | |
198 | } else { | |
199 | new_segment: | |
200 | if (!sg) | |
201 | sg = sglist; | |
202 | else { | |
203 | /* | |
204 | * If the driver previously mapped a shorter | |
205 | * list, we could see a termination bit | |
206 | * prematurely unless it fully inits the sg | |
207 | * table on each mapping. We KNOW that there | |
208 | * must be more entries here or the driver | |
209 | * would be buggy, so force clear the | |
210 | * termination bit to avoid doing a full | |
211 | * sg_init_table() in drivers for each command. | |
212 | */ | |
213 | sg->page_link &= ~0x02; | |
214 | sg = sg_next(sg); | |
215 | } | |
216 | ||
217 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
218 | nsegs++; | |
219 | } | |
220 | bvprv = bvec; | |
221 | } /* segments in rq */ | |
222 | ||
f18573ab FT |
223 | |
224 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | |
225 | (rq->data_len & q->dma_pad_mask)) { | |
226 | unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; | |
227 | ||
228 | sg->length += pad_len; | |
229 | rq->extra_len += pad_len; | |
230 | } | |
231 | ||
2fb98e84 | 232 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
db0a2e00 TH |
233 | if (rq->cmd_flags & REQ_RW) |
234 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | |
235 | ||
d6d48196 JA |
236 | sg->page_link &= ~0x02; |
237 | sg = sg_next(sg); | |
238 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
239 | q->dma_drain_size, | |
240 | ((unsigned long)q->dma_drain_buffer) & | |
241 | (PAGE_SIZE - 1)); | |
242 | nsegs++; | |
7a85f889 | 243 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
244 | } |
245 | ||
246 | if (sg) | |
247 | sg_mark_end(sg); | |
248 | ||
249 | return nsegs; | |
250 | } | |
d6d48196 JA |
251 | EXPORT_SYMBOL(blk_rq_map_sg); |
252 | ||
253 | static inline int ll_new_mergeable(struct request_queue *q, | |
254 | struct request *req, | |
255 | struct bio *bio) | |
256 | { | |
257 | int nr_phys_segs = bio_phys_segments(q, bio); | |
258 | ||
259 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | |
260 | req->cmd_flags |= REQ_NOMERGE; | |
261 | if (req == q->last_merge) | |
262 | q->last_merge = NULL; | |
263 | return 0; | |
264 | } | |
265 | ||
266 | /* | |
267 | * A hw segment is just getting larger, bump just the phys | |
268 | * counter. | |
269 | */ | |
270 | req->nr_phys_segments += nr_phys_segs; | |
271 | return 1; | |
272 | } | |
273 | ||
274 | static inline int ll_new_hw_segment(struct request_queue *q, | |
275 | struct request *req, | |
276 | struct bio *bio) | |
277 | { | |
278 | int nr_hw_segs = bio_hw_segments(q, bio); | |
279 | int nr_phys_segs = bio_phys_segments(q, bio); | |
280 | ||
281 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments | |
282 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | |
283 | req->cmd_flags |= REQ_NOMERGE; | |
284 | if (req == q->last_merge) | |
285 | q->last_merge = NULL; | |
286 | return 0; | |
287 | } | |
288 | ||
289 | /* | |
290 | * This will form the start of a new hw segment. Bump both | |
291 | * counters. | |
292 | */ | |
293 | req->nr_hw_segments += nr_hw_segs; | |
294 | req->nr_phys_segments += nr_phys_segs; | |
295 | return 1; | |
296 | } | |
297 | ||
298 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
299 | struct bio *bio) | |
300 | { | |
301 | unsigned short max_sectors; | |
302 | int len; | |
303 | ||
304 | if (unlikely(blk_pc_request(req))) | |
305 | max_sectors = q->max_hw_sectors; | |
306 | else | |
307 | max_sectors = q->max_sectors; | |
308 | ||
309 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | |
310 | req->cmd_flags |= REQ_NOMERGE; | |
311 | if (req == q->last_merge) | |
312 | q->last_merge = NULL; | |
313 | return 0; | |
314 | } | |
2cdf79ca | 315 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 316 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 317 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 JA |
318 | blk_recount_segments(q, bio); |
319 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | |
6728cb0e JA |
320 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) |
321 | && !BIOVEC_VIRT_OVERSIZE(len)) { | |
d6d48196 JA |
322 | int mergeable = ll_new_mergeable(q, req, bio); |
323 | ||
324 | if (mergeable) { | |
325 | if (req->nr_hw_segments == 1) | |
326 | req->bio->bi_hw_front_size = len; | |
327 | if (bio->bi_hw_segments == 1) | |
328 | bio->bi_hw_back_size = len; | |
329 | } | |
330 | return mergeable; | |
331 | } | |
332 | ||
333 | return ll_new_hw_segment(q, req, bio); | |
334 | } | |
335 | ||
6728cb0e | 336 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
337 | struct bio *bio) |
338 | { | |
339 | unsigned short max_sectors; | |
340 | int len; | |
341 | ||
342 | if (unlikely(blk_pc_request(req))) | |
343 | max_sectors = q->max_hw_sectors; | |
344 | else | |
345 | max_sectors = q->max_sectors; | |
346 | ||
347 | ||
348 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | |
349 | req->cmd_flags |= REQ_NOMERGE; | |
350 | if (req == q->last_merge) | |
351 | q->last_merge = NULL; | |
352 | return 0; | |
353 | } | |
354 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | |
2cdf79ca | 355 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 356 | blk_recount_segments(q, bio); |
2cdf79ca | 357 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 JA |
358 | blk_recount_segments(q, req->bio); |
359 | if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | |
360 | !BIOVEC_VIRT_OVERSIZE(len)) { | |
361 | int mergeable = ll_new_mergeable(q, req, bio); | |
362 | ||
363 | if (mergeable) { | |
364 | if (bio->bi_hw_segments == 1) | |
365 | bio->bi_hw_front_size = len; | |
366 | if (req->nr_hw_segments == 1) | |
367 | req->biotail->bi_hw_back_size = len; | |
368 | } | |
369 | return mergeable; | |
370 | } | |
371 | ||
372 | return ll_new_hw_segment(q, req, bio); | |
373 | } | |
374 | ||
375 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |
376 | struct request *next) | |
377 | { | |
378 | int total_phys_segments; | |
379 | int total_hw_segments; | |
380 | ||
381 | /* | |
382 | * First check if the either of the requests are re-queued | |
383 | * requests. Can't merge them if they are. | |
384 | */ | |
385 | if (req->special || next->special) | |
386 | return 0; | |
387 | ||
388 | /* | |
389 | * Will it become too large? | |
390 | */ | |
391 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) | |
392 | return 0; | |
393 | ||
394 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
395 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) | |
396 | total_phys_segments--; | |
397 | ||
398 | if (total_phys_segments > q->max_phys_segments) | |
399 | return 0; | |
400 | ||
401 | total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; | |
402 | if (blk_hw_contig_segment(q, req->biotail, next->bio)) { | |
6728cb0e JA |
403 | int len = req->biotail->bi_hw_back_size + |
404 | next->bio->bi_hw_front_size; | |
d6d48196 JA |
405 | /* |
406 | * propagate the combined length to the end of the requests | |
407 | */ | |
408 | if (req->nr_hw_segments == 1) | |
409 | req->bio->bi_hw_front_size = len; | |
410 | if (next->nr_hw_segments == 1) | |
411 | next->biotail->bi_hw_back_size = len; | |
412 | total_hw_segments--; | |
413 | } | |
414 | ||
415 | if (total_hw_segments > q->max_hw_segments) | |
416 | return 0; | |
417 | ||
418 | /* Merge is OK... */ | |
419 | req->nr_phys_segments = total_phys_segments; | |
420 | req->nr_hw_segments = total_hw_segments; | |
421 | return 1; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Has to be called with the request spinlock acquired | |
426 | */ | |
427 | static int attempt_merge(struct request_queue *q, struct request *req, | |
428 | struct request *next) | |
429 | { | |
430 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
431 | return 0; | |
432 | ||
433 | /* | |
434 | * not contiguous | |
435 | */ | |
436 | if (req->sector + req->nr_sectors != next->sector) | |
437 | return 0; | |
438 | ||
439 | if (rq_data_dir(req) != rq_data_dir(next) | |
440 | || req->rq_disk != next->rq_disk | |
441 | || next->special) | |
442 | return 0; | |
443 | ||
444 | /* | |
445 | * If we are allowed to merge, then append bio list | |
446 | * from next to rq and release next. merge_requests_fn | |
447 | * will have updated segment counts, update sector | |
448 | * counts here. | |
449 | */ | |
450 | if (!ll_merge_requests_fn(q, req, next)) | |
451 | return 0; | |
452 | ||
453 | /* | |
454 | * At this point we have either done a back merge | |
455 | * or front merge. We need the smaller start_time of | |
456 | * the merged requests to be the current request | |
457 | * for accounting purposes. | |
458 | */ | |
459 | if (time_after(req->start_time, next->start_time)) | |
460 | req->start_time = next->start_time; | |
461 | ||
462 | req->biotail->bi_next = next->bio; | |
463 | req->biotail = next->biotail; | |
464 | ||
465 | req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; | |
466 | ||
467 | elv_merge_requests(q, req, next); | |
468 | ||
469 | if (req->rq_disk) { | |
6f2576af JM |
470 | struct hd_struct *part |
471 | = get_part(req->rq_disk, req->sector); | |
d6d48196 JA |
472 | disk_round_stats(req->rq_disk); |
473 | req->rq_disk->in_flight--; | |
6f2576af JM |
474 | if (part) { |
475 | part_round_stats(part); | |
476 | part->in_flight--; | |
477 | } | |
d6d48196 JA |
478 | } |
479 | ||
480 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
481 | ||
482 | __blk_put_request(q, next); | |
483 | return 1; | |
484 | } | |
485 | ||
486 | int attempt_back_merge(struct request_queue *q, struct request *rq) | |
487 | { | |
488 | struct request *next = elv_latter_request(q, rq); | |
489 | ||
490 | if (next) | |
491 | return attempt_merge(q, rq, next); | |
492 | ||
493 | return 0; | |
494 | } | |
495 | ||
496 | int attempt_front_merge(struct request_queue *q, struct request *rq) | |
497 | { | |
498 | struct request *prev = elv_former_request(q, rq); | |
499 | ||
500 | if (prev) | |
501 | return attempt_merge(q, prev, rq); | |
502 | ||
503 | return 0; | |
504 | } |