2 * Block data types and constants. Directly include this file only to
3 * break include dependency loop.
5 #ifndef __LINUX_BLK_TYPES_H
6 #define __LINUX_BLK_TYPES_H
8 #include <linux/types.h>
12 struct bio_integrity_payload
;
16 struct cgroup_subsys_state
;
17 typedef void (bio_end_io_t
) (struct bio
*);
18 typedef void (bio_destructor_t
) (struct bio
*);
21 * was unsigned short, but we might as well be ready for > 64kB I/O pages
26 unsigned int bv_offset
;
32 sector_t bi_sector
; /* device address in 512 byte
34 unsigned int bi_size
; /* residual I/O count */
36 unsigned int bi_idx
; /* current index into bvl_vec */
38 unsigned int bi_bvec_done
; /* number of bytes completed in
43 * main unit of I/O for the block layer and lower layers (ie drivers and
47 struct bio
*bi_next
; /* request queue link */
48 struct block_device
*bi_bdev
;
49 unsigned int bi_flags
; /* status, command, etc */
51 unsigned int bi_rw
; /* READ/WRITE */
52 unsigned short bi_ioprio
;
54 struct bvec_iter bi_iter
;
56 /* Number of segments in this BIO after
57 * physical address coalescing is performed.
59 unsigned int bi_phys_segments
;
62 * To keep track of the max segment size, we account for the
63 * sizes of the first and last mergeable segments in this bio.
65 unsigned int bi_seg_front_size
;
66 unsigned int bi_seg_back_size
;
68 atomic_t __bi_remaining
;
70 bio_end_io_t
*bi_end_io
;
73 #ifdef CONFIG_BLK_CGROUP
75 * Optional ioc and css associated with this bio. Put on bio
76 * release. Read comment on top of bio_associate_current().
78 struct io_context
*bi_ioc
;
79 struct cgroup_subsys_state
*bi_css
;
82 #if defined(CONFIG_BLK_DEV_INTEGRITY)
83 struct bio_integrity_payload
*bi_integrity
; /* data integrity */
87 unsigned short bi_vcnt
; /* how many bio_vec's */
90 * Everything starting with bi_max_vecs will be preserved by bio_reset()
93 unsigned short bi_max_vecs
; /* max bvl_vecs we can hold */
95 atomic_t __bi_cnt
; /* pin count */
97 struct bio_vec
*bi_io_vec
; /* the actual vec list */
99 struct bio_set
*bi_pool
;
102 * We can inline a number of vecs at the end of the bio, to avoid
103 * double allocations for a small number of bio_vecs. This member
104 * MUST obviously be kept at the very end of the bio.
106 struct bio_vec bi_inline_vecs
[0];
109 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
114 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */
115 #define BIO_CLONED 2 /* doesn't own data */
116 #define BIO_BOUNCED 3 /* bio is a bounce bio */
117 #define BIO_USER_MAPPED 4 /* contains user pages */
118 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */
119 #define BIO_QUIET 6 /* Make BIO Quiet */
120 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
121 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
124 * Flags starting here get preserved by bio_reset() - this includes
127 #define BIO_RESET_BITS 13
128 #define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
131 * top 4 bits of bio flags indicate the pool this bio came from
133 #define BIO_POOL_BITS (4)
134 #define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
135 #define BIO_POOL_OFFSET (32 - BIO_POOL_BITS)
136 #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
137 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
139 #endif /* CONFIG_BLOCK */
142 * Request flags. For use in the cmd_flags field of struct request, and in
143 * bi_rw of struct bio. Note that some flags are only valid in either one.
147 __REQ_WRITE
, /* not set, read. set, write */
148 __REQ_FAILFAST_DEV
, /* no driver retries of device errors */
149 __REQ_FAILFAST_TRANSPORT
, /* no driver retries of transport errors */
150 __REQ_FAILFAST_DRIVER
, /* no driver retries of driver errors */
152 __REQ_SYNC
, /* request is sync (sync write or read) */
153 __REQ_META
, /* metadata io request */
154 __REQ_PRIO
, /* boost priority in cfq */
155 __REQ_DISCARD
, /* request to discard sectors */
156 __REQ_SECURE
, /* secure discard (used with __REQ_DISCARD) */
157 __REQ_WRITE_SAME
, /* write same block many times */
159 __REQ_NOIDLE
, /* don't anticipate more IO after this one */
160 __REQ_INTEGRITY
, /* I/O includes block integrity payload */
161 __REQ_FUA
, /* forced unit access */
162 __REQ_FLUSH
, /* request for cache flush */
165 __REQ_RAHEAD
, /* read ahead, can fail anytime */
166 __REQ_THROTTLED
, /* This bio has already been subjected to
167 * throttling rules. Don't do it again. */
169 /* request only flags */
170 __REQ_SORTED
, /* elevator knows about this request */
171 __REQ_SOFTBARRIER
, /* may not be passed by ioscheduler */
172 __REQ_NOMERGE
, /* don't touch this for merging */
173 __REQ_STARTED
, /* drive already may have started this one */
174 __REQ_DONTPREP
, /* don't call prep for this one */
175 __REQ_QUEUED
, /* uses queueing */
176 __REQ_ELVPRIV
, /* elevator private data attached */
177 __REQ_FAILED
, /* set if the request failed */
178 __REQ_QUIET
, /* don't worry about errors */
179 __REQ_PREEMPT
, /* set for "ide_preempt" requests and also
180 for requests for which the SCSI "quiesce"
181 state must be ignored. */
182 __REQ_ALLOCED
, /* request came from our alloc pool */
183 __REQ_COPY_USER
, /* contains copies of user pages */
184 __REQ_FLUSH_SEQ
, /* request for flush sequence */
185 __REQ_IO_STAT
, /* account I/O stat */
186 __REQ_MIXED_MERGE
, /* merge of different types, fail separately */
187 __REQ_PM
, /* runtime pm request */
188 __REQ_HASHED
, /* on IO scheduler merge hash */
189 __REQ_MQ_INFLIGHT
, /* track inflight for MQ */
190 __REQ_NR_BITS
, /* stops here */
193 #define REQ_WRITE (1ULL << __REQ_WRITE)
194 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
195 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
196 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
197 #define REQ_SYNC (1ULL << __REQ_SYNC)
198 #define REQ_META (1ULL << __REQ_META)
199 #define REQ_PRIO (1ULL << __REQ_PRIO)
200 #define REQ_DISCARD (1ULL << __REQ_DISCARD)
201 #define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
202 #define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
203 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
205 #define REQ_FAILFAST_MASK \
206 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
207 #define REQ_COMMON_MASK \
208 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
209 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
210 REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
211 #define REQ_CLONE_MASK REQ_COMMON_MASK
213 #define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
215 /* This mask is used for both bio and request merge checking */
216 #define REQ_NOMERGE_FLAGS \
217 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
219 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
220 #define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
222 #define REQ_SORTED (1ULL << __REQ_SORTED)
223 #define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
224 #define REQ_FUA (1ULL << __REQ_FUA)
225 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
226 #define REQ_STARTED (1ULL << __REQ_STARTED)
227 #define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
228 #define REQ_QUEUED (1ULL << __REQ_QUEUED)
229 #define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
230 #define REQ_FAILED (1ULL << __REQ_FAILED)
231 #define REQ_QUIET (1ULL << __REQ_QUIET)
232 #define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
233 #define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
234 #define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
235 #define REQ_FLUSH (1ULL << __REQ_FLUSH)
236 #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
237 #define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
238 #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
239 #define REQ_SECURE (1ULL << __REQ_SECURE)
240 #define REQ_PM (1ULL << __REQ_PM)
241 #define REQ_HASHED (1ULL << __REQ_HASHED)
242 #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
246 REQ_OP_WRITE
= REQ_WRITE
,
247 REQ_OP_DISCARD
= REQ_DISCARD
,
248 REQ_OP_WRITE_SAME
= REQ_WRITE_SAME
,
252 * tmp cpmpat. Users used to set the write bit for all non reads, but
253 * we will be dropping the bitmap use for ops. Support both until
254 * the end of the patchset.
256 static inline int op_from_rq_bits(u64 flags
)
258 if (flags
& REQ_OP_DISCARD
)
259 return REQ_OP_DISCARD
;
260 else if (flags
& REQ_OP_WRITE_SAME
)
261 return REQ_OP_WRITE_SAME
;
262 else if (flags
& REQ_OP_WRITE
)
268 typedef unsigned int blk_qc_t
;
269 #define BLK_QC_T_NONE -1U
270 #define BLK_QC_T_SHIFT 16
272 static inline bool blk_qc_t_valid(blk_qc_t cookie
)
274 return cookie
!= BLK_QC_T_NONE
;
277 static inline blk_qc_t
blk_tag_to_qc_t(unsigned int tag
, unsigned int queue_num
)
279 return tag
| (queue_num
<< BLK_QC_T_SHIFT
);
282 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie
)
284 return cookie
>> BLK_QC_T_SHIFT
;
287 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie
)
289 return cookie
& ((1u << BLK_QC_T_SHIFT
) - 1);
292 #endif /* __LINUX_BLK_TYPES_H */
This page took 0.036605 seconds and 5 git commands to generate.