Commit | Line | Data |
---|---|---|
86db1e29 JA |
1 | /* |
2 | * Functions related to mapping data to requests | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
afdc1a78 | 8 | #include <scsi/sg.h> /* for struct sg_iovec */ |
86db1e29 JA |
9 | |
10 | #include "blk.h" | |
11 | ||
12 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | |
13 | struct bio *bio) | |
14 | { | |
15 | if (!rq->bio) | |
16 | blk_rq_bio_prep(q, rq, bio); | |
17 | else if (!ll_back_merge_fn(q, rq, bio)) | |
18 | return -EINVAL; | |
19 | else { | |
20 | rq->biotail->bi_next = bio; | |
21 | rq->biotail = bio; | |
22 | ||
a2dec7b3 | 23 | rq->__data_len += bio->bi_size; |
86db1e29 JA |
24 | } |
25 | return 0; | |
26 | } | |
86db1e29 JA |
27 | |
28 | static int __blk_rq_unmap_user(struct bio *bio) | |
29 | { | |
30 | int ret = 0; | |
31 | ||
32 | if (bio) { | |
33 | if (bio_flagged(bio, BIO_USER_MAPPED)) | |
34 | bio_unmap_user(bio); | |
35 | else | |
36 | ret = bio_uncopy_user(bio); | |
37 | } | |
38 | ||
39 | return ret; | |
40 | } | |
41 | ||
42 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | |
152e283f | 43 | struct rq_map_data *map_data, void __user *ubuf, |
97ae77a1 | 44 | unsigned int len, gfp_t gfp_mask) |
86db1e29 JA |
45 | { |
46 | unsigned long uaddr; | |
47 | struct bio *bio, *orig_bio; | |
48 | int reading, ret; | |
49 | ||
50 | reading = rq_data_dir(rq) == READ; | |
51 | ||
52 | /* | |
53 | * if alignment requirement is satisfied, map in user pages for | |
54 | * direct dma. else, set up kernel bounce buffers | |
55 | */ | |
56 | uaddr = (unsigned long) ubuf; | |
14417799 | 57 | if (blk_rq_aligned(q, uaddr, len) && !map_data) |
a3bce90e | 58 | bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); |
86db1e29 | 59 | else |
152e283f | 60 | bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); |
86db1e29 JA |
61 | |
62 | if (IS_ERR(bio)) | |
63 | return PTR_ERR(bio); | |
64 | ||
97ae77a1 | 65 | if (map_data && map_data->null_mapped) |
81882766 FT |
66 | bio->bi_flags |= (1 << BIO_NULL_MAPPED); |
67 | ||
86db1e29 JA |
68 | orig_bio = bio; |
69 | blk_queue_bounce(q, &bio); | |
70 | ||
71 | /* | |
72 | * We link the bounce buffer in and could have to traverse it | |
73 | * later so we have to get a ref to prevent it from being freed | |
74 | */ | |
75 | bio_get(bio); | |
76 | ||
77 | ret = blk_rq_append_bio(q, rq, bio); | |
78 | if (!ret) | |
79 | return bio->bi_size; | |
80 | ||
81 | /* if it was boucned we must call the end io function */ | |
82 | bio_endio(bio, 0); | |
83 | __blk_rq_unmap_user(orig_bio); | |
84 | bio_put(bio); | |
85 | return ret; | |
86 | } | |
87 | ||
88 | /** | |
710027a4 | 89 | * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e29 JA |
90 | * @q: request queue where request should be inserted |
91 | * @rq: request structure to fill | |
152e283f | 92 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
86db1e29 JA |
93 | * @ubuf: the user buffer |
94 | * @len: length of user data | |
a3bce90e | 95 | * @gfp_mask: memory allocation flags |
86db1e29 JA |
96 | * |
97 | * Description: | |
710027a4 | 98 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e29 JA |
99 | * a kernel bounce buffer is used. |
100 | * | |
710027a4 | 101 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e29 JA |
102 | * still in process context. |
103 | * | |
104 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
105 | * before being submitted to the device, as pages mapped may be out of | |
106 | * reach. It's the callers responsibility to make sure this happens. The | |
107 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
108 | * unmapping. | |
109 | */ | |
110 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | |
152e283f FT |
111 | struct rq_map_data *map_data, void __user *ubuf, |
112 | unsigned long len, gfp_t gfp_mask) | |
86db1e29 JA |
113 | { |
114 | unsigned long bytes_read = 0; | |
115 | struct bio *bio = NULL; | |
97ae77a1 | 116 | int ret; |
86db1e29 | 117 | |
ae03bf63 | 118 | if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e29 | 119 | return -EINVAL; |
81882766 | 120 | if (!len) |
86db1e29 | 121 | return -EINVAL; |
97ae77a1 FT |
122 | |
123 | if (!ubuf && (!map_data || !map_data->null_mapped)) | |
124 | return -EINVAL; | |
86db1e29 JA |
125 | |
126 | while (bytes_read != len) { | |
127 | unsigned long map_len, end, start; | |
128 | ||
129 | map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | |
130 | end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | |
131 | >> PAGE_SHIFT; | |
132 | start = (unsigned long)ubuf >> PAGE_SHIFT; | |
133 | ||
134 | /* | |
135 | * A bad offset could cause us to require BIO_MAX_PAGES + 1 | |
136 | * pages. If this happens we just lower the requested | |
137 | * mapping len by a page so that we can fit | |
138 | */ | |
139 | if (end - start > BIO_MAX_PAGES) | |
140 | map_len -= PAGE_SIZE; | |
141 | ||
152e283f | 142 | ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, |
97ae77a1 | 143 | gfp_mask); |
86db1e29 JA |
144 | if (ret < 0) |
145 | goto unmap_rq; | |
146 | if (!bio) | |
147 | bio = rq->bio; | |
148 | bytes_read += ret; | |
149 | ubuf += ret; | |
56c451f4 FT |
150 | |
151 | if (map_data) | |
152 | map_data->offset += ret; | |
86db1e29 JA |
153 | } |
154 | ||
f18573ab FT |
155 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
156 | rq->cmd_flags |= REQ_COPY_USER; | |
40b01b9b | 157 | |
731ec497 | 158 | rq->buffer = NULL; |
86db1e29 JA |
159 | return 0; |
160 | unmap_rq: | |
161 | blk_rq_unmap_user(bio); | |
84e9e03c | 162 | rq->bio = NULL; |
86db1e29 JA |
163 | return ret; |
164 | } | |
86db1e29 JA |
165 | EXPORT_SYMBOL(blk_rq_map_user); |
166 | ||
167 | /** | |
710027a4 | 168 | * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e29 JA |
169 | * @q: request queue where request should be inserted |
170 | * @rq: request to map data to | |
152e283f | 171 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
86db1e29 JA |
172 | * @iov: pointer to the iovec |
173 | * @iov_count: number of elements in the iovec | |
174 | * @len: I/O byte count | |
a3bce90e | 175 | * @gfp_mask: memory allocation flags |
86db1e29 JA |
176 | * |
177 | * Description: | |
710027a4 | 178 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e29 JA |
179 | * a kernel bounce buffer is used. |
180 | * | |
710027a4 | 181 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e29 JA |
182 | * still in process context. |
183 | * | |
184 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() | |
185 | * before being submitted to the device, as pages mapped may be out of | |
186 | * reach. It's the callers responsibility to make sure this happens. The | |
187 | * original bio must be passed back in to blk_rq_unmap_user() for proper | |
188 | * unmapping. | |
189 | */ | |
190 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | |
152e283f FT |
191 | struct rq_map_data *map_data, struct sg_iovec *iov, |
192 | int iov_count, unsigned int len, gfp_t gfp_mask) | |
86db1e29 JA |
193 | { |
194 | struct bio *bio; | |
afdc1a78 FT |
195 | int i, read = rq_data_dir(rq) == READ; |
196 | int unaligned = 0; | |
86db1e29 JA |
197 | |
198 | if (!iov || iov_count <= 0) | |
199 | return -EINVAL; | |
200 | ||
afdc1a78 FT |
201 | for (i = 0; i < iov_count; i++) { |
202 | unsigned long uaddr = (unsigned long)iov[i].iov_base; | |
203 | ||
54787556 XF |
204 | if (!iov[i].iov_len) |
205 | return -EINVAL; | |
206 | ||
6b76106d BH |
207 | /* |
208 | * Keep going so we check length of all segments | |
209 | */ | |
210 | if (uaddr & queue_dma_alignment(q)) | |
afdc1a78 | 211 | unaligned = 1; |
afdc1a78 FT |
212 | } |
213 | ||
152e283f FT |
214 | if (unaligned || (q->dma_pad_mask & len) || map_data) |
215 | bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, | |
216 | gfp_mask); | |
afdc1a78 | 217 | else |
a3bce90e | 218 | bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); |
afdc1a78 | 219 | |
86db1e29 JA |
220 | if (IS_ERR(bio)) |
221 | return PTR_ERR(bio); | |
222 | ||
223 | if (bio->bi_size != len) { | |
c26156b2 JA |
224 | /* |
225 | * Grab an extra reference to this bio, as bio_unmap_user() | |
226 | * expects to be able to drop it twice as it happens on the | |
227 | * normal IO completion path | |
228 | */ | |
229 | bio_get(bio); | |
86db1e29 | 230 | bio_endio(bio, 0); |
53cc0b29 | 231 | __blk_rq_unmap_user(bio); |
86db1e29 JA |
232 | return -EINVAL; |
233 | } | |
234 | ||
f18573ab FT |
235 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
236 | rq->cmd_flags |= REQ_COPY_USER; | |
237 | ||
07359fc6 | 238 | blk_queue_bounce(q, &bio); |
86db1e29 JA |
239 | bio_get(bio); |
240 | blk_rq_bio_prep(q, rq, bio); | |
731ec497 | 241 | rq->buffer = NULL; |
86db1e29 JA |
242 | return 0; |
243 | } | |
152e283f | 244 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e29 JA |
245 | |
246 | /** | |
247 | * blk_rq_unmap_user - unmap a request with user data | |
248 | * @bio: start of bio list | |
249 | * | |
250 | * Description: | |
251 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must | |
252 | * supply the original rq->bio from the blk_rq_map_user() return, since | |
710027a4 | 253 | * the I/O completion may have changed rq->bio. |
86db1e29 JA |
254 | */ |
255 | int blk_rq_unmap_user(struct bio *bio) | |
256 | { | |
257 | struct bio *mapped_bio; | |
258 | int ret = 0, ret2; | |
259 | ||
260 | while (bio) { | |
261 | mapped_bio = bio; | |
262 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | |
263 | mapped_bio = bio->bi_private; | |
264 | ||
265 | ret2 = __blk_rq_unmap_user(mapped_bio); | |
266 | if (ret2 && !ret) | |
267 | ret = ret2; | |
268 | ||
269 | mapped_bio = bio; | |
270 | bio = bio->bi_next; | |
271 | bio_put(mapped_bio); | |
272 | } | |
273 | ||
274 | return ret; | |
275 | } | |
86db1e29 JA |
276 | EXPORT_SYMBOL(blk_rq_unmap_user); |
277 | ||
278 | /** | |
710027a4 | 279 | * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e29 JA |
280 | * @q: request queue where request should be inserted |
281 | * @rq: request to fill | |
282 | * @kbuf: the kernel buffer | |
283 | * @len: length of user data | |
284 | * @gfp_mask: memory allocation flags | |
68154e90 FT |
285 | * |
286 | * Description: | |
287 | * Data will be mapped directly if possible. Otherwise a bounce | |
3a5a3927 JB |
288 | * buffer is used. Can be called multple times to append multple |
289 | * buffers. | |
86db1e29 JA |
290 | */ |
291 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | |
292 | unsigned int len, gfp_t gfp_mask) | |
293 | { | |
68154e90 | 294 | int reading = rq_data_dir(rq) == READ; |
14417799 | 295 | unsigned long addr = (unsigned long) kbuf; |
68154e90 | 296 | int do_copy = 0; |
86db1e29 | 297 | struct bio *bio; |
3a5a3927 | 298 | int ret; |
86db1e29 | 299 | |
ae03bf63 | 300 | if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e29 JA |
301 | return -EINVAL; |
302 | if (!len || !kbuf) | |
303 | return -EINVAL; | |
304 | ||
14417799 | 305 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
68154e90 FT |
306 | if (do_copy) |
307 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | |
308 | else | |
309 | bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
310 | ||
86db1e29 JA |
311 | if (IS_ERR(bio)) |
312 | return PTR_ERR(bio); | |
313 | ||
609f6ea1 | 314 | if (!reading) |
a45dc2d2 | 315 | bio->bi_rw |= REQ_WRITE; |
86db1e29 | 316 | |
68154e90 FT |
317 | if (do_copy) |
318 | rq->cmd_flags |= REQ_COPY_USER; | |
319 | ||
3a5a3927 JB |
320 | ret = blk_rq_append_bio(q, rq, bio); |
321 | if (unlikely(ret)) { | |
322 | /* request is too big */ | |
323 | bio_put(bio); | |
324 | return ret; | |
325 | } | |
326 | ||
86db1e29 | 327 | blk_queue_bounce(q, &rq->bio); |
731ec497 | 328 | rq->buffer = NULL; |
86db1e29 JA |
329 | return 0; |
330 | } | |
86db1e29 | 331 | EXPORT_SYMBOL(blk_rq_map_kern); |