Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2.5 block I/O model | |
3 | * | |
4 | * Copyright (C) 2001 Jens Axboe <axboe@suse.de> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
7cc01581 | 12 | * |
1da177e4 LT |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public Licens | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- | |
19 | */ | |
20 | #ifndef __LINUX_BIO_H | |
21 | #define __LINUX_BIO_H | |
22 | ||
23 | #include <linux/highmem.h> | |
24 | #include <linux/mempool.h> | |
22e2c507 | 25 | #include <linux/ioprio.h> |
187f1882 | 26 | #include <linux/bug.h> |
1da177e4 | 27 | |
02a5e0ac DH |
28 | #ifdef CONFIG_BLOCK |
29 | ||
1da177e4 LT |
30 | #include <asm/io.h> |
31 | ||
7cc01581 TH |
32 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ |
33 | #include <linux/blk_types.h> | |
34 | ||
1da177e4 LT |
35 | #define BIO_DEBUG |
36 | ||
37 | #ifdef BIO_DEBUG | |
38 | #define BIO_BUG_ON BUG_ON | |
39 | #else | |
40 | #define BIO_BUG_ON | |
41 | #endif | |
42 | ||
d84a8477 | 43 | #define BIO_MAX_PAGES 256 |
1da177e4 LT |
44 | #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) |
45 | #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) | |
46 | ||
22e2c507 JA |
47 | /* |
48 | * upper 16 bits of bi_rw define the io priority of this bio | |
49 | */ | |
50 | #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) | |
51 | #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) | |
52 | #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) | |
53 | ||
54 | #define bio_set_prio(bio, prio) do { \ | |
55 | WARN_ON(prio >= (1 << IOPRIO_BITS)); \ | |
56 | (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ | |
57 | (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ | |
58 | } while (0) | |
59 | ||
1da177e4 LT |
60 | /* |
61 | * various member access, note that bio_data should of course not be used | |
62 | * on highmem page vectors | |
63 | */ | |
64 | #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) | |
a4ad39b1 | 65 | #define __bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) |
7988613b | 66 | |
4550dd6c | 67 | #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) |
a4ad39b1 | 68 | |
4550dd6c KO |
69 | #define bvec_iter_page(bvec, iter) \ |
70 | (__bvec_iter_bvec((bvec), (iter))->bv_page) | |
71 | ||
72 | #define bvec_iter_len(bvec, iter) \ | |
73 | min((iter).bi_size, \ | |
74 | __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) | |
75 | ||
76 | #define bvec_iter_offset(bvec, iter) \ | |
77 | (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) | |
78 | ||
79 | #define bvec_iter_bvec(bvec, iter) \ | |
80 | ((struct bio_vec) { \ | |
81 | .bv_page = bvec_iter_page((bvec), (iter)), \ | |
82 | .bv_len = bvec_iter_len((bvec), (iter)), \ | |
83 | .bv_offset = bvec_iter_offset((bvec), (iter)), \ | |
84 | }) | |
85 | ||
86 | #define bio_iter_iovec(bio, iter) \ | |
87 | bvec_iter_bvec((bio)->bi_io_vec, (iter)) | |
88 | ||
89 | #define bio_iter_page(bio, iter) \ | |
90 | bvec_iter_page((bio)->bi_io_vec, (iter)) | |
91 | #define bio_iter_len(bio, iter) \ | |
92 | bvec_iter_len((bio)->bi_io_vec, (iter)) | |
93 | #define bio_iter_offset(bio, iter) \ | |
94 | bvec_iter_offset((bio)->bi_io_vec, (iter)) | |
95 | ||
96 | #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) | |
97 | #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) | |
98 | #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) | |
7988613b | 99 | |
458b76ed KO |
100 | #define bio_multiple_segments(bio) \ |
101 | ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) | |
4f024f37 KO |
102 | #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) |
103 | #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) | |
bf2de6f5 | 104 | |
458b76ed KO |
105 | /* |
106 | * Check whether this bio carries any data or not. A NULL bio is allowed. | |
107 | */ | |
108 | static inline bool bio_has_data(struct bio *bio) | |
109 | { | |
110 | if (bio && | |
111 | bio->bi_iter.bi_size && | |
112 | !(bio->bi_rw & REQ_DISCARD)) | |
113 | return true; | |
114 | ||
115 | return false; | |
116 | } | |
117 | ||
118 | static inline bool bio_is_rw(struct bio *bio) | |
119 | { | |
120 | if (!bio_has_data(bio)) | |
121 | return false; | |
122 | ||
123 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | |
124 | return false; | |
125 | ||
126 | return true; | |
127 | } | |
128 | ||
129 | static inline bool bio_mergeable(struct bio *bio) | |
130 | { | |
131 | if (bio->bi_rw & REQ_NOMERGE_FLAGS) | |
132 | return false; | |
133 | ||
134 | return true; | |
135 | } | |
136 | ||
2e46e8b2 | 137 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
bf2de6f5 | 138 | { |
458b76ed | 139 | if (bio_has_data(bio)) |
a4ad39b1 | 140 | return bio_iovec(bio).bv_len; |
fb2dce86 | 141 | else /* dataless requests such as discard */ |
4f024f37 | 142 | return bio->bi_iter.bi_size; |
bf2de6f5 JA |
143 | } |
144 | ||
145 | static inline void *bio_data(struct bio *bio) | |
146 | { | |
458b76ed | 147 | if (bio_has_data(bio)) |
bf2de6f5 JA |
148 | return page_address(bio_page(bio)) + bio_offset(bio); |
149 | ||
150 | return NULL; | |
151 | } | |
1da177e4 LT |
152 | |
153 | /* | |
154 | * will die | |
155 | */ | |
156 | #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) | |
157 | #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) | |
158 | ||
159 | /* | |
160 | * queues that have highmem support enabled may still need to revert to | |
161 | * PIO transfers occasionally and thus map high pages temporarily. For | |
162 | * permanent PIO fall back, user is probably better off disabling highmem | |
163 | * I/O completely on that queue (see ide-dma for example) | |
164 | */ | |
0eb5afb3 | 165 | #define __bio_kmap_atomic(bio, idx) \ |
e8e3c3d6 | 166 | (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \ |
1da177e4 LT |
167 | bio_iovec_idx((bio), (idx))->bv_offset) |
168 | ||
0eb5afb3 | 169 | #define __bio_kunmap_atomic(addr) kunmap_atomic(addr) |
1da177e4 LT |
170 | |
171 | /* | |
172 | * merge helpers etc | |
173 | */ | |
174 | ||
175 | #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) | |
4f024f37 | 176 | #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx) |
1da177e4 | 177 | |
f92131c3 JF |
178 | /* Default implementation of BIOVEC_PHYS_MERGEABLE */ |
179 | #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | |
180 | ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) | |
181 | ||
1da177e4 LT |
182 | /* |
183 | * allow arch override, for eg virtualized architectures (put in asm/io.h) | |
184 | */ | |
185 | #ifndef BIOVEC_PHYS_MERGEABLE | |
186 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | |
f92131c3 | 187 | __BIOVEC_PHYS_MERGEABLE(vec1, vec2) |
1da177e4 LT |
188 | #endif |
189 | ||
1da177e4 LT |
190 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
191 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) | |
192 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | |
ae03bf63 | 193 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
1da177e4 LT |
194 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ |
195 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) | |
196 | ||
6712ecf8 | 197 | #define bio_io_error(bio) bio_endio((bio), -EIO) |
1da177e4 | 198 | |
d74c6d51 KO |
199 | /* |
200 | * drivers should _never_ use the all version - the bio may have been split | |
201 | * before it got to the driver and the driver won't own all of it | |
202 | */ | |
203 | #define bio_for_each_segment_all(bvl, bio, i) \ | |
204 | for (i = 0; \ | |
205 | bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ | |
206 | i++) | |
207 | ||
4550dd6c KO |
208 | static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, |
209 | unsigned bytes) | |
210 | { | |
211 | WARN_ONCE(bytes > iter->bi_size, | |
212 | "Attempted to advance past end of bvec iter\n"); | |
213 | ||
214 | while (bytes) { | |
215 | unsigned len = min(bytes, bvec_iter_len(bv, *iter)); | |
216 | ||
217 | bytes -= len; | |
218 | iter->bi_size -= len; | |
219 | iter->bi_bvec_done += len; | |
220 | ||
221 | if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { | |
222 | iter->bi_bvec_done = 0; | |
223 | iter->bi_idx++; | |
224 | } | |
225 | } | |
226 | } | |
227 | ||
228 | #define for_each_bvec(bvl, bio_vec, iter, start) \ | |
229 | for ((iter) = start; \ | |
230 | (bvl) = bvec_iter_bvec((bio_vec), (iter)), \ | |
231 | (iter).bi_size; \ | |
232 | bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) | |
233 | ||
234 | ||
235 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, | |
236 | unsigned bytes) | |
237 | { | |
238 | iter->bi_sector += bytes >> 9; | |
239 | ||
240 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | |
241 | iter->bi_size -= bytes; | |
242 | else | |
243 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); | |
244 | } | |
245 | ||
7988613b KO |
246 | #define __bio_for_each_segment(bvl, bio, iter, start) \ |
247 | for (iter = (start); \ | |
4550dd6c KO |
248 | (iter).bi_size && \ |
249 | ((bvl = bio_iter_iovec((bio), (iter))), 1); \ | |
250 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) | |
7988613b KO |
251 | |
252 | #define bio_for_each_segment(bvl, bio, iter) \ | |
253 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) | |
254 | ||
4550dd6c | 255 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
1da177e4 | 256 | |
458b76ed KO |
257 | static inline unsigned bio_segments(struct bio *bio) |
258 | { | |
259 | unsigned segs = 0; | |
260 | struct bio_vec bv; | |
261 | struct bvec_iter iter; | |
262 | ||
263 | bio_for_each_segment(bv, bio, iter) | |
264 | segs++; | |
265 | ||
266 | return segs; | |
267 | } | |
268 | ||
1da177e4 LT |
269 | /* |
270 | * get a reference to a bio, so it won't disappear. the intended use is | |
271 | * something like: | |
272 | * | |
273 | * bio_get(bio); | |
274 | * submit_bio(rw, bio); | |
275 | * if (bio->bi_flags ...) | |
276 | * do_something | |
277 | * bio_put(bio); | |
278 | * | |
279 | * without the bio_get(), it could potentially complete I/O before submit_bio | |
280 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) | |
281 | * runs | |
282 | */ | |
283 | #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) | |
284 | ||
7ba1ba12 MP |
285 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
286 | /* | |
287 | * bio integrity payload | |
288 | */ | |
289 | struct bio_integrity_payload { | |
290 | struct bio *bip_bio; /* parent bio */ | |
7ba1ba12 | 291 | |
d57a5f7c | 292 | struct bvec_iter bip_iter; |
7ba1ba12 | 293 | |
d57a5f7c | 294 | /* kill - should just use bip_vec */ |
7ba1ba12 | 295 | void *bip_buf; /* generated integrity data */ |
7ba1ba12 | 296 | |
d57a5f7c | 297 | bio_end_io_t *bip_end_io; /* saved I/O completion fn */ |
7ba1ba12 | 298 | |
7878cba9 | 299 | unsigned short bip_slab; /* slab the bip came from */ |
7ba1ba12 | 300 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
29ed7813 | 301 | unsigned bip_owns_buf:1; /* should free bip_buf */ |
7ba1ba12 MP |
302 | |
303 | struct work_struct bip_work; /* I/O completion */ | |
6fda981c KO |
304 | |
305 | struct bio_vec *bip_vec; | |
306 | struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ | |
7ba1ba12 MP |
307 | }; |
308 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | |
1da177e4 LT |
309 | |
310 | /* | |
311 | * A bio_pair is used when we need to split a bio. | |
312 | * This can only happen for a bio that refers to just one | |
313 | * page of data, and in the unusual situation when the | |
314 | * page crosses a chunk/device boundary | |
315 | * | |
316 | * The address of the master bio is stored in bio1.bi_private | |
317 | * The address of the pool the pair was allocated from is stored | |
318 | * in bio2.bi_private | |
319 | */ | |
320 | struct bio_pair { | |
7ba1ba12 MP |
321 | struct bio bio1, bio2; |
322 | struct bio_vec bv1, bv2; | |
323 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | |
324 | struct bio_integrity_payload bip1, bip2; | |
325 | struct bio_vec iv1, iv2; | |
326 | #endif | |
327 | atomic_t cnt; | |
328 | int error; | |
1da177e4 | 329 | }; |
6feef531 | 330 | extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); |
1da177e4 | 331 | extern void bio_pair_release(struct bio_pair *dbio); |
6678d83f | 332 | extern void bio_trim(struct bio *bio, int offset, int size); |
1da177e4 | 333 | |
bb799ca0 | 334 | extern struct bio_set *bioset_create(unsigned int, unsigned int); |
1da177e4 | 335 | extern void bioset_free(struct bio_set *); |
9f060e22 | 336 | extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries); |
1da177e4 | 337 | |
dd0fc66f | 338 | extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); |
1da177e4 LT |
339 | extern void bio_put(struct bio *); |
340 | ||
bf800ef1 KO |
341 | extern void __bio_clone(struct bio *, struct bio *); |
342 | extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); | |
343 | ||
3f86a82a KO |
344 | extern struct bio_set *fs_bio_set; |
345 | ||
346 | static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | |
347 | { | |
348 | return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); | |
349 | } | |
350 | ||
bf800ef1 KO |
351 | static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) |
352 | { | |
353 | return bio_clone_bioset(bio, gfp_mask, fs_bio_set); | |
354 | } | |
355 | ||
3f86a82a KO |
356 | static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) |
357 | { | |
358 | return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); | |
359 | } | |
360 | ||
bf800ef1 KO |
361 | static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) |
362 | { | |
363 | return bio_clone_bioset(bio, gfp_mask, NULL); | |
364 | ||
365 | } | |
366 | ||
6712ecf8 | 367 | extern void bio_endio(struct bio *, int); |
1da177e4 LT |
368 | struct request_queue; |
369 | extern int bio_phys_segments(struct request_queue *, struct bio *); | |
1da177e4 | 370 | |
9e882242 | 371 | extern int submit_bio_wait(int rw, struct bio *bio); |
054bdf64 KO |
372 | extern void bio_advance(struct bio *, unsigned); |
373 | ||
1da177e4 | 374 | extern void bio_init(struct bio *); |
f44b48c7 | 375 | extern void bio_reset(struct bio *); |
1da177e4 LT |
376 | |
377 | extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); | |
6e68af66 MC |
378 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
379 | unsigned int, unsigned int); | |
1da177e4 | 380 | extern int bio_get_nr_vecs(struct block_device *); |
ad3316bf | 381 | extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int); |
1da177e4 | 382 | extern struct bio *bio_map_user(struct request_queue *, struct block_device *, |
a3bce90e | 383 | unsigned long, unsigned int, int, gfp_t); |
f1970baf | 384 | struct sg_iovec; |
152e283f | 385 | struct rq_map_data; |
f1970baf JB |
386 | extern struct bio *bio_map_user_iov(struct request_queue *, |
387 | struct block_device *, | |
a3bce90e | 388 | struct sg_iovec *, int, int, gfp_t); |
1da177e4 | 389 | extern void bio_unmap_user(struct bio *); |
df46b9a4 | 390 | extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, |
27496a8c | 391 | gfp_t); |
68154e90 FT |
392 | extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, |
393 | gfp_t, int); | |
1da177e4 LT |
394 | extern void bio_set_pages_dirty(struct bio *bio); |
395 | extern void bio_check_pages_dirty(struct bio *bio); | |
2d4dc890 IL |
396 | |
397 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | |
398 | # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | |
399 | #endif | |
400 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | |
401 | extern void bio_flush_dcache_pages(struct bio *bi); | |
402 | #else | |
403 | static inline void bio_flush_dcache_pages(struct bio *bi) | |
404 | { | |
405 | } | |
406 | #endif | |
407 | ||
16ac3d63 | 408 | extern void bio_copy_data(struct bio *dst, struct bio *src); |
a0787606 | 409 | extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); |
16ac3d63 | 410 | |
152e283f FT |
411 | extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, |
412 | unsigned long, unsigned int, int, gfp_t); | |
413 | extern struct bio *bio_copy_user_iov(struct request_queue *, | |
414 | struct rq_map_data *, struct sg_iovec *, | |
a3bce90e | 415 | int, int, gfp_t); |
1da177e4 LT |
416 | extern int bio_uncopy_user(struct bio *); |
417 | void zero_fill_bio(struct bio *bio); | |
9f060e22 KO |
418 | extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); |
419 | extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); | |
7ba1ba12 | 420 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
51d654e1 | 421 | |
852c788f TH |
422 | #ifdef CONFIG_BLK_CGROUP |
423 | int bio_associate_current(struct bio *bio); | |
424 | void bio_disassociate_task(struct bio *bio); | |
425 | #else /* CONFIG_BLK_CGROUP */ | |
426 | static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } | |
427 | static inline void bio_disassociate_task(struct bio *bio) { } | |
428 | #endif /* CONFIG_BLK_CGROUP */ | |
429 | ||
1da177e4 LT |
430 | #ifdef CONFIG_HIGHMEM |
431 | /* | |
20b636bf AB |
432 | * remember never ever reenable interrupts between a bvec_kmap_irq and |
433 | * bvec_kunmap_irq! | |
1da177e4 | 434 | */ |
4f570f99 | 435 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
1da177e4 LT |
436 | { |
437 | unsigned long addr; | |
438 | ||
439 | /* | |
440 | * might not be a highmem page, but the preempt/irq count | |
441 | * balancing is a lot nicer this way | |
442 | */ | |
443 | local_irq_save(*flags); | |
e8e3c3d6 | 444 | addr = (unsigned long) kmap_atomic(bvec->bv_page); |
1da177e4 LT |
445 | |
446 | BUG_ON(addr & ~PAGE_MASK); | |
447 | ||
448 | return (char *) addr + bvec->bv_offset; | |
449 | } | |
450 | ||
4f570f99 | 451 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) |
1da177e4 LT |
452 | { |
453 | unsigned long ptr = (unsigned long) buffer & PAGE_MASK; | |
454 | ||
e8e3c3d6 | 455 | kunmap_atomic((void *) ptr); |
1da177e4 LT |
456 | local_irq_restore(*flags); |
457 | } | |
458 | ||
459 | #else | |
11a691be GU |
460 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
461 | { | |
462 | return page_address(bvec->bv_page) + bvec->bv_offset; | |
463 | } | |
464 | ||
465 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | |
466 | { | |
467 | *flags = 0; | |
468 | } | |
1da177e4 LT |
469 | #endif |
470 | ||
c2d08dad | 471 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
1da177e4 LT |
472 | unsigned long *flags) |
473 | { | |
474 | return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); | |
475 | } | |
476 | #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) | |
477 | ||
478 | #define bio_kmap_irq(bio, flags) \ | |
4f024f37 | 479 | __bio_kmap_irq((bio), (bio)->bi_iter.bi_idx, (flags)) |
1da177e4 LT |
480 | #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) |
481 | ||
8f3d8ba2 | 482 | /* |
e686307f | 483 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
8f3d8ba2 CH |
484 | * |
485 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | |
486 | * member of the bio. The bio_list also caches the last list member to allow | |
487 | * fast access to the tail. | |
488 | */ | |
489 | struct bio_list { | |
490 | struct bio *head; | |
491 | struct bio *tail; | |
492 | }; | |
493 | ||
494 | static inline int bio_list_empty(const struct bio_list *bl) | |
495 | { | |
496 | return bl->head == NULL; | |
497 | } | |
498 | ||
499 | static inline void bio_list_init(struct bio_list *bl) | |
500 | { | |
501 | bl->head = bl->tail = NULL; | |
502 | } | |
503 | ||
320ae51f JA |
504 | #define BIO_EMPTY_LIST { NULL, NULL } |
505 | ||
8f3d8ba2 CH |
506 | #define bio_list_for_each(bio, bl) \ |
507 | for (bio = (bl)->head; bio; bio = bio->bi_next) | |
508 | ||
509 | static inline unsigned bio_list_size(const struct bio_list *bl) | |
510 | { | |
511 | unsigned sz = 0; | |
512 | struct bio *bio; | |
513 | ||
514 | bio_list_for_each(bio, bl) | |
515 | sz++; | |
516 | ||
517 | return sz; | |
518 | } | |
519 | ||
520 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) | |
521 | { | |
522 | bio->bi_next = NULL; | |
523 | ||
524 | if (bl->tail) | |
525 | bl->tail->bi_next = bio; | |
526 | else | |
527 | bl->head = bio; | |
528 | ||
529 | bl->tail = bio; | |
530 | } | |
531 | ||
532 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) | |
533 | { | |
534 | bio->bi_next = bl->head; | |
535 | ||
536 | bl->head = bio; | |
537 | ||
538 | if (!bl->tail) | |
539 | bl->tail = bio; | |
540 | } | |
541 | ||
542 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) | |
543 | { | |
544 | if (!bl2->head) | |
545 | return; | |
546 | ||
547 | if (bl->tail) | |
548 | bl->tail->bi_next = bl2->head; | |
549 | else | |
550 | bl->head = bl2->head; | |
551 | ||
552 | bl->tail = bl2->tail; | |
553 | } | |
554 | ||
555 | static inline void bio_list_merge_head(struct bio_list *bl, | |
556 | struct bio_list *bl2) | |
557 | { | |
558 | if (!bl2->head) | |
559 | return; | |
560 | ||
561 | if (bl->head) | |
562 | bl2->tail->bi_next = bl->head; | |
563 | else | |
564 | bl->tail = bl2->tail; | |
565 | ||
566 | bl->head = bl2->head; | |
567 | } | |
568 | ||
13685a16 GU |
569 | static inline struct bio *bio_list_peek(struct bio_list *bl) |
570 | { | |
571 | return bl->head; | |
572 | } | |
573 | ||
8f3d8ba2 CH |
574 | static inline struct bio *bio_list_pop(struct bio_list *bl) |
575 | { | |
576 | struct bio *bio = bl->head; | |
577 | ||
578 | if (bio) { | |
579 | bl->head = bl->head->bi_next; | |
580 | if (!bl->head) | |
581 | bl->tail = NULL; | |
582 | ||
583 | bio->bi_next = NULL; | |
584 | } | |
585 | ||
586 | return bio; | |
587 | } | |
588 | ||
589 | static inline struct bio *bio_list_get(struct bio_list *bl) | |
590 | { | |
591 | struct bio *bio = bl->head; | |
592 | ||
593 | bl->head = bl->tail = NULL; | |
594 | ||
595 | return bio; | |
596 | } | |
597 | ||
57fb233f KO |
598 | /* |
599 | * bio_set is used to allow other portions of the IO system to | |
600 | * allocate their own private memory pools for bio and iovec structures. | |
601 | * These memory pools in turn all allocate from the bio_slab | |
602 | * and the bvec_slabs[]. | |
603 | */ | |
604 | #define BIO_POOL_SIZE 2 | |
605 | #define BIOVEC_NR_POOLS 6 | |
606 | #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) | |
607 | ||
608 | struct bio_set { | |
609 | struct kmem_cache *bio_slab; | |
610 | unsigned int front_pad; | |
611 | ||
612 | mempool_t *bio_pool; | |
9f060e22 | 613 | mempool_t *bvec_pool; |
57fb233f KO |
614 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
615 | mempool_t *bio_integrity_pool; | |
9f060e22 | 616 | mempool_t *bvec_integrity_pool; |
57fb233f | 617 | #endif |
df2cb6da KO |
618 | |
619 | /* | |
620 | * Deadlock avoidance for stacking block drivers: see comments in | |
621 | * bio_alloc_bioset() for details | |
622 | */ | |
623 | spinlock_t rescue_lock; | |
624 | struct bio_list rescue_list; | |
625 | struct work_struct rescue_work; | |
626 | struct workqueue_struct *rescue_workqueue; | |
57fb233f KO |
627 | }; |
628 | ||
629 | struct biovec_slab { | |
630 | int nr_vecs; | |
631 | char *name; | |
632 | struct kmem_cache *slab; | |
633 | }; | |
634 | ||
635 | /* | |
636 | * a small number of entries is fine, not going to be performance critical. | |
637 | * basically we just need to survive | |
638 | */ | |
639 | #define BIO_SPLIT_ENTRIES 2 | |
640 | ||
7ba1ba12 MP |
641 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
642 | ||
7ba1ba12 | 643 | |
7ba1ba12 | 644 | |
d57a5f7c KO |
645 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) |
646 | ||
647 | #define bip_for_each_vec(bvl, bip, iter) \ | |
648 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) | |
7ba1ba12 | 649 | |
13f05c8d MP |
650 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ |
651 | for_each_bio(_bio) \ | |
652 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) | |
653 | ||
8deaf721 | 654 | #define bio_integrity(bio) (bio->bi_integrity != NULL) |
7ba1ba12 | 655 | |
7ba1ba12 | 656 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
1e2a410f | 657 | extern void bio_integrity_free(struct bio *); |
7ba1ba12 MP |
658 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
659 | extern int bio_integrity_enabled(struct bio *bio); | |
660 | extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); | |
661 | extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); | |
662 | extern int bio_integrity_prep(struct bio *); | |
663 | extern void bio_integrity_endio(struct bio *, int); | |
664 | extern void bio_integrity_advance(struct bio *, unsigned int); | |
665 | extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); | |
666 | extern void bio_integrity_split(struct bio *, struct bio_pair *, int); | |
1e2a410f | 667 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); |
7878cba9 MP |
668 | extern int bioset_integrity_create(struct bio_set *, int); |
669 | extern void bioset_integrity_free(struct bio_set *); | |
670 | extern void bio_integrity_init(void); | |
7ba1ba12 MP |
671 | |
672 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | |
673 | ||
6898e3bd MP |
674 | static inline int bio_integrity(struct bio *bio) |
675 | { | |
676 | return 0; | |
677 | } | |
678 | ||
679 | static inline int bio_integrity_enabled(struct bio *bio) | |
680 | { | |
681 | return 0; | |
682 | } | |
683 | ||
684 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) | |
685 | { | |
686 | return 0; | |
687 | } | |
688 | ||
689 | static inline void bioset_integrity_free (struct bio_set *bs) | |
690 | { | |
691 | return; | |
692 | } | |
693 | ||
694 | static inline int bio_integrity_prep(struct bio *bio) | |
695 | { | |
696 | return 0; | |
697 | } | |
698 | ||
1e2a410f | 699 | static inline void bio_integrity_free(struct bio *bio) |
6898e3bd MP |
700 | { |
701 | return; | |
702 | } | |
703 | ||
0c614e2d | 704 | static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
1e2a410f | 705 | gfp_t gfp_mask) |
0c614e2d SR |
706 | { |
707 | return 0; | |
708 | } | |
6898e3bd MP |
709 | |
710 | static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp, | |
711 | int sectors) | |
712 | { | |
713 | return; | |
714 | } | |
715 | ||
716 | static inline void bio_integrity_advance(struct bio *bio, | |
717 | unsigned int bytes_done) | |
718 | { | |
719 | return; | |
720 | } | |
721 | ||
722 | static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, | |
723 | unsigned int sectors) | |
724 | { | |
725 | return; | |
726 | } | |
727 | ||
728 | static inline void bio_integrity_init(void) | |
729 | { | |
730 | return; | |
731 | } | |
7ba1ba12 MP |
732 | |
733 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | |
734 | ||
02a5e0ac | 735 | #endif /* CONFIG_BLOCK */ |
1da177e4 | 736 | #endif /* __LINUX_BIO_H */ |