| 1 | /* |
| 2 | * Block data types and constants. Directly include this file only to |
| 3 | * break include dependency loop. |
| 4 | */ |
| 5 | #ifndef __LINUX_BLK_TYPES_H |
| 6 | #define __LINUX_BLK_TYPES_H |
| 7 | |
| 8 | #include <linux/types.h> |
| 9 | #include <linux/bvec.h> |
| 10 | |
| 11 | struct bio_set; |
| 12 | struct bio; |
| 13 | struct bio_integrity_payload; |
| 14 | struct page; |
| 15 | struct block_device; |
| 16 | struct io_context; |
| 17 | struct cgroup_subsys_state; |
| 18 | typedef void (bio_end_io_t) (struct bio *); |
| 19 | typedef void (bio_destructor_t) (struct bio *); |
| 20 | |
| 21 | #ifdef CONFIG_BLOCK |
| 22 | /* |
| 23 | * main unit of I/O for the block layer and lower layers (ie drivers and |
| 24 | * stacking drivers) |
| 25 | */ |
| 26 | struct bio { |
| 27 | struct bio *bi_next; /* request queue link */ |
| 28 | struct block_device *bi_bdev; |
| 29 | int bi_error; |
| 30 | unsigned int bi_rw; /* bottom bits req flags, |
| 31 | * top bits REQ_OP |
| 32 | */ |
| 33 | unsigned short bi_flags; /* status, command, etc */ |
| 34 | unsigned short bi_ioprio; |
| 35 | |
| 36 | struct bvec_iter bi_iter; |
| 37 | |
| 38 | /* Number of segments in this BIO after |
| 39 | * physical address coalescing is performed. |
| 40 | */ |
| 41 | unsigned int bi_phys_segments; |
| 42 | |
| 43 | /* |
| 44 | * To keep track of the max segment size, we account for the |
| 45 | * sizes of the first and last mergeable segments in this bio. |
| 46 | */ |
| 47 | unsigned int bi_seg_front_size; |
| 48 | unsigned int bi_seg_back_size; |
| 49 | |
| 50 | atomic_t __bi_remaining; |
| 51 | |
| 52 | bio_end_io_t *bi_end_io; |
| 53 | |
| 54 | void *bi_private; |
| 55 | #ifdef CONFIG_BLK_CGROUP |
| 56 | /* |
| 57 | * Optional ioc and css associated with this bio. Put on bio |
| 58 | * release. Read comment on top of bio_associate_current(). |
| 59 | */ |
| 60 | struct io_context *bi_ioc; |
| 61 | struct cgroup_subsys_state *bi_css; |
| 62 | #endif |
| 63 | union { |
| 64 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
| 65 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
| 66 | #endif |
| 67 | }; |
| 68 | |
| 69 | unsigned short bi_vcnt; /* how many bio_vec's */ |
| 70 | |
| 71 | /* |
| 72 | * Everything starting with bi_max_vecs will be preserved by bio_reset() |
| 73 | */ |
| 74 | |
| 75 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
| 76 | |
| 77 | atomic_t __bi_cnt; /* pin count */ |
| 78 | |
| 79 | struct bio_vec *bi_io_vec; /* the actual vec list */ |
| 80 | |
| 81 | struct bio_set *bi_pool; |
| 82 | |
| 83 | /* |
| 84 | * We can inline a number of vecs at the end of the bio, to avoid |
| 85 | * double allocations for a small number of bio_vecs. This member |
| 86 | * MUST obviously be kept at the very end of the bio. |
| 87 | */ |
| 88 | struct bio_vec bi_inline_vecs[0]; |
| 89 | }; |
| 90 | |
| 91 | #define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS) |
| 92 | #define bio_op(bio) ((bio)->bi_rw >> BIO_OP_SHIFT) |
| 93 | |
| 94 | #define bio_set_op_attrs(bio, op, op_flags) do { \ |
| 95 | WARN_ON(op >= (1 << REQ_OP_BITS)); \ |
| 96 | (bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1); \ |
| 97 | (bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT); \ |
| 98 | (bio)->bi_rw |= op_flags; \ |
| 99 | } while (0) |
| 100 | |
| 101 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
| 102 | |
| 103 | /* |
| 104 | * bio flags |
| 105 | */ |
| 106 | #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ |
| 107 | #define BIO_CLONED 2 /* doesn't own data */ |
| 108 | #define BIO_BOUNCED 3 /* bio is a bounce bio */ |
| 109 | #define BIO_USER_MAPPED 4 /* contains user pages */ |
| 110 | #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ |
| 111 | #define BIO_QUIET 6 /* Make BIO Quiet */ |
| 112 | #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ |
| 113 | #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ |
| 114 | |
| 115 | /* |
| 116 | * Flags starting here get preserved by bio_reset() - this includes |
| 117 | * BVEC_POOL_IDX() |
| 118 | */ |
| 119 | #define BIO_RESET_BITS 10 |
| 120 | |
| 121 | /* |
| 122 | * We support 6 different bvec pools, the last one is magic in that it |
| 123 | * is backed by a mempool. |
| 124 | */ |
| 125 | #define BVEC_POOL_NR 6 |
| 126 | #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) |
| 127 | |
| 128 | /* |
| 129 | * Top 4 bits of bio flags indicate the pool the bvecs came from. We add |
| 130 | * 1 to the actual index so that 0 indicates that there are no bvecs to be |
| 131 | * freed. |
| 132 | */ |
| 133 | #define BVEC_POOL_BITS (4) |
| 134 | #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) |
| 135 | #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) |
| 136 | |
| 137 | #endif /* CONFIG_BLOCK */ |
| 138 | |
| 139 | /* |
| 140 | * Request flags. For use in the cmd_flags field of struct request, and in |
| 141 | * bi_rw of struct bio. Note that some flags are only valid in either one. |
| 142 | */ |
| 143 | enum rq_flag_bits { |
| 144 | /* common flags */ |
| 145 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ |
| 146 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
| 147 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ |
| 148 | |
| 149 | __REQ_SYNC, /* request is sync (sync write or read) */ |
| 150 | __REQ_META, /* metadata io request */ |
| 151 | __REQ_PRIO, /* boost priority in cfq */ |
| 152 | |
| 153 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
| 154 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
| 155 | __REQ_FUA, /* forced unit access */ |
| 156 | __REQ_PREFLUSH, /* request for cache flush */ |
| 157 | |
| 158 | /* bio only flags */ |
| 159 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
| 160 | __REQ_THROTTLED, /* This bio has already been subjected to |
| 161 | * throttling rules. Don't do it again. */ |
| 162 | |
| 163 | /* request only flags */ |
| 164 | __REQ_SORTED, /* elevator knows about this request */ |
| 165 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ |
| 166 | __REQ_NOMERGE, /* don't touch this for merging */ |
| 167 | __REQ_STARTED, /* drive already may have started this one */ |
| 168 | __REQ_DONTPREP, /* don't call prep for this one */ |
| 169 | __REQ_QUEUED, /* uses queueing */ |
| 170 | __REQ_ELVPRIV, /* elevator private data attached */ |
| 171 | __REQ_FAILED, /* set if the request failed */ |
| 172 | __REQ_QUIET, /* don't worry about errors */ |
| 173 | __REQ_PREEMPT, /* set for "ide_preempt" requests and also |
| 174 | for requests for which the SCSI "quiesce" |
| 175 | state must be ignored. */ |
| 176 | __REQ_ALLOCED, /* request came from our alloc pool */ |
| 177 | __REQ_COPY_USER, /* contains copies of user pages */ |
| 178 | __REQ_FLUSH_SEQ, /* request for flush sequence */ |
| 179 | __REQ_IO_STAT, /* account I/O stat */ |
| 180 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
| 181 | __REQ_PM, /* runtime pm request */ |
| 182 | __REQ_HASHED, /* on IO scheduler merge hash */ |
| 183 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ |
| 184 | __REQ_NR_BITS, /* stops here */ |
| 185 | }; |
| 186 | |
| 187 | #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) |
| 188 | #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) |
| 189 | #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) |
| 190 | #define REQ_SYNC (1ULL << __REQ_SYNC) |
| 191 | #define REQ_META (1ULL << __REQ_META) |
| 192 | #define REQ_PRIO (1ULL << __REQ_PRIO) |
| 193 | #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) |
| 194 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) |
| 195 | |
| 196 | #define REQ_FAILFAST_MASK \ |
| 197 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
| 198 | #define REQ_COMMON_MASK \ |
| 199 | (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ |
| 200 | REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE) |
| 201 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
| 202 | |
| 203 | /* This mask is used for both bio and request merge checking */ |
| 204 | #define REQ_NOMERGE_FLAGS \ |
| 205 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) |
| 206 | |
| 207 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) |
| 208 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) |
| 209 | |
| 210 | #define REQ_SORTED (1ULL << __REQ_SORTED) |
| 211 | #define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) |
| 212 | #define REQ_FUA (1ULL << __REQ_FUA) |
| 213 | #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) |
| 214 | #define REQ_STARTED (1ULL << __REQ_STARTED) |
| 215 | #define REQ_DONTPREP (1ULL << __REQ_DONTPREP) |
| 216 | #define REQ_QUEUED (1ULL << __REQ_QUEUED) |
| 217 | #define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) |
| 218 | #define REQ_FAILED (1ULL << __REQ_FAILED) |
| 219 | #define REQ_QUIET (1ULL << __REQ_QUIET) |
| 220 | #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) |
| 221 | #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) |
| 222 | #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) |
| 223 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) |
| 224 | #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) |
| 225 | #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) |
| 226 | #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) |
| 227 | #define REQ_PM (1ULL << __REQ_PM) |
| 228 | #define REQ_HASHED (1ULL << __REQ_HASHED) |
| 229 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
| 230 | |
| 231 | enum req_op { |
| 232 | REQ_OP_READ, |
| 233 | REQ_OP_WRITE, |
| 234 | REQ_OP_DISCARD, /* request to discard sectors */ |
| 235 | REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ |
| 236 | REQ_OP_WRITE_SAME, /* write same block many times */ |
| 237 | REQ_OP_FLUSH, /* request for cache flush */ |
| 238 | }; |
| 239 | |
| 240 | #define REQ_OP_BITS 3 |
| 241 | |
| 242 | typedef unsigned int blk_qc_t; |
| 243 | #define BLK_QC_T_NONE -1U |
| 244 | #define BLK_QC_T_SHIFT 16 |
| 245 | |
| 246 | static inline bool blk_qc_t_valid(blk_qc_t cookie) |
| 247 | { |
| 248 | return cookie != BLK_QC_T_NONE; |
| 249 | } |
| 250 | |
| 251 | static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num) |
| 252 | { |
| 253 | return tag | (queue_num << BLK_QC_T_SHIFT); |
| 254 | } |
| 255 | |
| 256 | static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) |
| 257 | { |
| 258 | return cookie >> BLK_QC_T_SHIFT; |
| 259 | } |
| 260 | |
| 261 | static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) |
| 262 | { |
| 263 | return cookie & ((1u << BLK_QC_T_SHIFT) - 1); |
| 264 | } |
| 265 | |
| 266 | #endif /* __LINUX_BLK_TYPES_H */ |