Commit | Line | Data |
---|---|---|
86db1e29 JA |
1 | /* |
2 | * Functions related to setting various queue properties from drivers | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/init.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | |
10 | ||
11 | #include "blk.h" | |
12 | ||
13 | unsigned long blk_max_low_pfn, blk_max_pfn; | |
14 | EXPORT_SYMBOL(blk_max_low_pfn); | |
15 | EXPORT_SYMBOL(blk_max_pfn); | |
16 | ||
17 | /** | |
18 | * blk_queue_prep_rq - set a prepare_request function for queue | |
19 | * @q: queue | |
20 | * @pfn: prepare_request function | |
21 | * | |
22 | * It's possible for a queue to register a prepare_request callback which | |
23 | * is invoked before the request is handed to the request_fn. The goal of | |
24 | * the function is to prepare a request for I/O, it can be used to build a | |
25 | * cdb from the request data for instance. | |
26 | * | |
27 | */ | |
28 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | |
29 | { | |
30 | q->prep_rq_fn = pfn; | |
31 | } | |
32 | ||
33 | EXPORT_SYMBOL(blk_queue_prep_rq); | |
34 | ||
35 | /** | |
36 | * blk_queue_merge_bvec - set a merge_bvec function for queue | |
37 | * @q: queue | |
38 | * @mbfn: merge_bvec_fn | |
39 | * | |
40 | * Usually queues have static limitations on the max sectors or segments that | |
41 | * we can put in a request. Stacking drivers may have some settings that | |
42 | * are dynamic, and thus we have to query the queue whether it is ok to | |
43 | * add a new bio_vec to a bio at a given offset or not. If the block device | |
44 | * has such limitations, it needs to register a merge_bvec_fn to control | |
45 | * the size of bio's sent to it. Note that a block device *must* allow a | |
46 | * single page to be added to an empty bio. The block device driver may want | |
47 | * to use the bio_split() function to deal with these bio's. By default | |
48 | * no merge_bvec_fn is defined for a queue, and only the fixed limits are | |
49 | * honored. | |
50 | */ | |
51 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | |
52 | { | |
53 | q->merge_bvec_fn = mbfn; | |
54 | } | |
55 | ||
56 | EXPORT_SYMBOL(blk_queue_merge_bvec); | |
57 | ||
58 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | |
59 | { | |
60 | q->softirq_done_fn = fn; | |
61 | } | |
62 | ||
63 | EXPORT_SYMBOL(blk_queue_softirq_done); | |
64 | ||
65 | /** | |
66 | * blk_queue_make_request - define an alternate make_request function for a device | |
67 | * @q: the request queue for the device to be affected | |
68 | * @mfn: the alternate make_request function | |
69 | * | |
70 | * Description: | |
71 | * The normal way for &struct bios to be passed to a device | |
72 | * driver is for them to be collected into requests on a request | |
73 | * queue, and then to allow the device driver to select requests | |
74 | * off that queue when it is ready. This works well for many block | |
75 | * devices. However some block devices (typically virtual devices | |
76 | * such as md or lvm) do not benefit from the processing on the | |
77 | * request queue, and are served best by having the requests passed | |
78 | * directly to them. This can be achieved by providing a function | |
79 | * to blk_queue_make_request(). | |
80 | * | |
81 | * Caveat: | |
82 | * The driver that does this *must* be able to deal appropriately | |
83 | * with buffers in "highmemory". This can be accomplished by either calling | |
84 | * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | |
85 | * blk_queue_bounce() to create a buffer in normal memory. | |
86 | **/ | |
87 | void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) | |
88 | { | |
89 | /* | |
90 | * set defaults | |
91 | */ | |
92 | q->nr_requests = BLKDEV_MAX_RQ; | |
93 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | |
94 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | |
95 | q->make_request_fn = mfn; | |
96 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | |
97 | q->backing_dev_info.state = 0; | |
98 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | |
99 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | |
100 | blk_queue_hardsect_size(q, 512); | |
101 | blk_queue_dma_alignment(q, 511); | |
102 | blk_queue_congestion_threshold(q); | |
103 | q->nr_batching = BLK_BATCH_REQ; | |
104 | ||
105 | q->unplug_thresh = 4; /* hmm */ | |
106 | q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ | |
107 | if (q->unplug_delay == 0) | |
108 | q->unplug_delay = 1; | |
109 | ||
110 | INIT_WORK(&q->unplug_work, blk_unplug_work); | |
111 | ||
112 | q->unplug_timer.function = blk_unplug_timeout; | |
113 | q->unplug_timer.data = (unsigned long)q; | |
114 | ||
115 | /* | |
116 | * by default assume old behaviour and bounce for any highmem page | |
117 | */ | |
118 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | |
119 | } | |
120 | ||
121 | EXPORT_SYMBOL(blk_queue_make_request); | |
122 | ||
123 | /** | |
124 | * blk_queue_bounce_limit - set bounce buffer limit for queue | |
125 | * @q: the request queue for the device | |
126 | * @dma_addr: bus address limit | |
127 | * | |
128 | * Description: | |
129 | * Different hardware can have different requirements as to what pages | |
130 | * it can do I/O directly to. A low level driver can call | |
131 | * blk_queue_bounce_limit to have lower memory pages allocated as bounce | |
132 | * buffers for doing I/O to pages residing above @page. | |
133 | **/ | |
134 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | |
135 | { | |
136 | unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; | |
137 | int dma = 0; | |
138 | ||
139 | q->bounce_gfp = GFP_NOIO; | |
140 | #if BITS_PER_LONG == 64 | |
141 | /* Assume anything <= 4GB can be handled by IOMMU. | |
142 | Actually some IOMMUs can handle everything, but I don't | |
143 | know of a way to test this here. */ | |
144 | if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | |
145 | dma = 1; | |
146 | q->bounce_pfn = max_low_pfn; | |
147 | #else | |
148 | if (bounce_pfn < blk_max_low_pfn) | |
149 | dma = 1; | |
150 | q->bounce_pfn = bounce_pfn; | |
151 | #endif | |
152 | if (dma) { | |
153 | init_emergency_isa_pool(); | |
154 | q->bounce_gfp = GFP_NOIO | GFP_DMA; | |
155 | q->bounce_pfn = bounce_pfn; | |
156 | } | |
157 | } | |
158 | ||
159 | EXPORT_SYMBOL(blk_queue_bounce_limit); | |
160 | ||
161 | /** | |
162 | * blk_queue_max_sectors - set max sectors for a request for this queue | |
163 | * @q: the request queue for the device | |
164 | * @max_sectors: max sectors in the usual 512b unit | |
165 | * | |
166 | * Description: | |
167 | * Enables a low level driver to set an upper limit on the size of | |
168 | * received requests. | |
169 | **/ | |
170 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | |
171 | { | |
172 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | |
173 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | |
174 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | |
175 | } | |
176 | ||
177 | if (BLK_DEF_MAX_SECTORS > max_sectors) | |
178 | q->max_hw_sectors = q->max_sectors = max_sectors; | |
179 | else { | |
180 | q->max_sectors = BLK_DEF_MAX_SECTORS; | |
181 | q->max_hw_sectors = max_sectors; | |
182 | } | |
183 | } | |
184 | ||
185 | EXPORT_SYMBOL(blk_queue_max_sectors); | |
186 | ||
187 | /** | |
188 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | |
189 | * @q: the request queue for the device | |
190 | * @max_segments: max number of segments | |
191 | * | |
192 | * Description: | |
193 | * Enables a low level driver to set an upper limit on the number of | |
194 | * physical data segments in a request. This would be the largest sized | |
195 | * scatter list the driver could handle. | |
196 | **/ | |
197 | void blk_queue_max_phys_segments(struct request_queue *q, | |
198 | unsigned short max_segments) | |
199 | { | |
200 | if (!max_segments) { | |
201 | max_segments = 1; | |
202 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | |
203 | } | |
204 | ||
205 | q->max_phys_segments = max_segments; | |
206 | } | |
207 | ||
208 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | |
209 | ||
210 | /** | |
211 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | |
212 | * @q: the request queue for the device | |
213 | * @max_segments: max number of segments | |
214 | * | |
215 | * Description: | |
216 | * Enables a low level driver to set an upper limit on the number of | |
217 | * hw data segments in a request. This would be the largest number of | |
218 | * address/length pairs the host adapter can actually give as once | |
219 | * to the device. | |
220 | **/ | |
221 | void blk_queue_max_hw_segments(struct request_queue *q, | |
222 | unsigned short max_segments) | |
223 | { | |
224 | if (!max_segments) { | |
225 | max_segments = 1; | |
226 | printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); | |
227 | } | |
228 | ||
229 | q->max_hw_segments = max_segments; | |
230 | } | |
231 | ||
232 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | |
233 | ||
234 | /** | |
235 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | |
236 | * @q: the request queue for the device | |
237 | * @max_size: max size of segment in bytes | |
238 | * | |
239 | * Description: | |
240 | * Enables a low level driver to set an upper limit on the size of a | |
241 | * coalesced segment | |
242 | **/ | |
243 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | |
244 | { | |
245 | if (max_size < PAGE_CACHE_SIZE) { | |
246 | max_size = PAGE_CACHE_SIZE; | |
247 | printk("%s: set to minimum %d\n", __FUNCTION__, max_size); | |
248 | } | |
249 | ||
250 | q->max_segment_size = max_size; | |
251 | } | |
252 | ||
253 | EXPORT_SYMBOL(blk_queue_max_segment_size); | |
254 | ||
255 | /** | |
256 | * blk_queue_hardsect_size - set hardware sector size for the queue | |
257 | * @q: the request queue for the device | |
258 | * @size: the hardware sector size, in bytes | |
259 | * | |
260 | * Description: | |
261 | * This should typically be set to the lowest possible sector size | |
262 | * that the hardware can operate on (possible without reverting to | |
263 | * even internal read-modify-write operations). Usually the default | |
264 | * of 512 covers most hardware. | |
265 | **/ | |
266 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) | |
267 | { | |
268 | q->hardsect_size = size; | |
269 | } | |
270 | ||
271 | EXPORT_SYMBOL(blk_queue_hardsect_size); | |
272 | ||
273 | /* | |
274 | * Returns the minimum that is _not_ zero, unless both are zero. | |
275 | */ | |
276 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | |
277 | ||
278 | /** | |
279 | * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | |
280 | * @t: the stacking driver (top) | |
281 | * @b: the underlying device (bottom) | |
282 | **/ | |
283 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | |
284 | { | |
285 | /* zero is "infinity" */ | |
286 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); | |
287 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); | |
288 | ||
289 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | |
290 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | |
291 | t->max_segment_size = min(t->max_segment_size,b->max_segment_size); | |
292 | t->hardsect_size = max(t->hardsect_size,b->hardsect_size); | |
293 | if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) | |
294 | clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); | |
295 | } | |
296 | ||
297 | EXPORT_SYMBOL(blk_queue_stack_limits); | |
298 | ||
299 | /** | |
300 | * blk_queue_dma_drain - Set up a drain buffer for excess dma. | |
301 | * | |
302 | * @q: the request queue for the device | |
303 | * @buf: physically contiguous buffer | |
304 | * @size: size of the buffer in bytes | |
305 | * | |
306 | * Some devices have excess DMA problems and can't simply discard (or | |
307 | * zero fill) the unwanted piece of the transfer. They have to have a | |
308 | * real area of memory to transfer it into. The use case for this is | |
309 | * ATAPI devices in DMA mode. If the packet command causes a transfer | |
310 | * bigger than the transfer size some HBAs will lock up if there | |
311 | * aren't DMA elements to contain the excess transfer. What this API | |
312 | * does is adjust the queue so that the buf is always appended | |
313 | * silently to the scatterlist. | |
314 | * | |
315 | * Note: This routine adjusts max_hw_segments to make room for | |
316 | * appending the drain buffer. If you call | |
317 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | |
318 | * calling this routine, you must set the limit to one fewer than your | |
319 | * device can support otherwise there won't be room for the drain | |
320 | * buffer. | |
321 | */ | |
322 | int blk_queue_dma_drain(struct request_queue *q, void *buf, | |
323 | unsigned int size) | |
324 | { | |
325 | if (q->max_hw_segments < 2 || q->max_phys_segments < 2) | |
326 | return -EINVAL; | |
327 | /* make room for appending the drain */ | |
328 | --q->max_hw_segments; | |
329 | --q->max_phys_segments; | |
330 | q->dma_drain_buffer = buf; | |
331 | q->dma_drain_size = size; | |
332 | ||
333 | return 0; | |
334 | } | |
335 | ||
336 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | |
337 | ||
338 | /** | |
339 | * blk_queue_segment_boundary - set boundary rules for segment merging | |
340 | * @q: the request queue for the device | |
341 | * @mask: the memory boundary mask | |
342 | **/ | |
343 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | |
344 | { | |
345 | if (mask < PAGE_CACHE_SIZE - 1) { | |
346 | mask = PAGE_CACHE_SIZE - 1; | |
347 | printk("%s: set to minimum %lx\n", __FUNCTION__, mask); | |
348 | } | |
349 | ||
350 | q->seg_boundary_mask = mask; | |
351 | } | |
352 | ||
353 | EXPORT_SYMBOL(blk_queue_segment_boundary); | |
354 | ||
355 | /** | |
356 | * blk_queue_dma_alignment - set dma length and memory alignment | |
357 | * @q: the request queue for the device | |
358 | * @mask: alignment mask | |
359 | * | |
360 | * description: | |
361 | * set required memory and length aligment for direct dma transactions. | |
362 | * this is used when buiding direct io requests for the queue. | |
363 | * | |
364 | **/ | |
365 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | |
366 | { | |
367 | q->dma_alignment = mask; | |
368 | } | |
369 | ||
370 | EXPORT_SYMBOL(blk_queue_dma_alignment); | |
371 | ||
372 | /** | |
373 | * blk_queue_update_dma_alignment - update dma length and memory alignment | |
374 | * @q: the request queue for the device | |
375 | * @mask: alignment mask | |
376 | * | |
377 | * description: | |
378 | * update required memory and length aligment for direct dma transactions. | |
379 | * If the requested alignment is larger than the current alignment, then | |
380 | * the current queue alignment is updated to the new value, otherwise it | |
381 | * is left alone. The design of this is to allow multiple objects | |
382 | * (driver, device, transport etc) to set their respective | |
383 | * alignments without having them interfere. | |
384 | * | |
385 | **/ | |
386 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | |
387 | { | |
388 | BUG_ON(mask > PAGE_SIZE); | |
389 | ||
390 | if (mask > q->dma_alignment) | |
391 | q->dma_alignment = mask; | |
392 | } | |
393 | ||
394 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | |
395 | ||
396 | int __init blk_settings_init(void) | |
397 | { | |
398 | blk_max_low_pfn = max_low_pfn - 1; | |
399 | blk_max_pfn = max_pfn - 1; | |
400 | return 0; | |
401 | } | |
402 | subsys_initcall(blk_settings_init); |