]>
Commit | Line | Data |
---|---|---|
7cc01581 TH |
1 | /* |
2 | * Block data types and constants. Directly include this file only to | |
3 | * break include dependency loop. | |
4 | */ | |
5 | #ifndef __LINUX_BLK_TYPES_H | |
6 | #define __LINUX_BLK_TYPES_H | |
7 | ||
7cc01581 | 8 | #include <linux/types.h> |
0781e79e | 9 | #include <linux/bvec.h> |
7cc01581 TH |
10 | |
11 | struct bio_set; | |
12 | struct bio; | |
13 | struct bio_integrity_payload; | |
14 | struct page; | |
15 | struct block_device; | |
852c788f TH |
16 | struct io_context; |
17 | struct cgroup_subsys_state; | |
4246a0b6 | 18 | typedef void (bio_end_io_t) (struct bio *); |
7cc01581 | 19 | |
2a842aca CH |
20 | /* |
21 | * Block error status values. See block/blk-core:blk_errors for the details. | |
22 | */ | |
23 | typedef u8 __bitwise blk_status_t; | |
24 | #define BLK_STS_OK 0 | |
25 | #define BLK_STS_NOTSUPP ((__force blk_status_t)1) | |
26 | #define BLK_STS_TIMEOUT ((__force blk_status_t)2) | |
27 | #define BLK_STS_NOSPC ((__force blk_status_t)3) | |
28 | #define BLK_STS_TRANSPORT ((__force blk_status_t)4) | |
29 | #define BLK_STS_TARGET ((__force blk_status_t)5) | |
30 | #define BLK_STS_NEXUS ((__force blk_status_t)6) | |
31 | #define BLK_STS_MEDIUM ((__force blk_status_t)7) | |
32 | #define BLK_STS_PROTECTION ((__force blk_status_t)8) | |
33 | #define BLK_STS_RESOURCE ((__force blk_status_t)9) | |
34 | #define BLK_STS_IOERR ((__force blk_status_t)10) | |
35 | ||
4e4cbee9 CH |
36 | /* hack for device mapper, don't use elsewhere: */ |
37 | #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11) | |
38 | ||
b9147dd1 SL |
39 | struct blk_issue_stat { |
40 | u64 stat; | |
41 | }; | |
42 | ||
7cc01581 TH |
43 | /* |
44 | * main unit of I/O for the block layer and lower layers (ie drivers and | |
45 | * stacking drivers) | |
46 | */ | |
47 | struct bio { | |
7cc01581 TH |
48 | struct bio *bi_next; /* request queue link */ |
49 | struct block_device *bi_bdev; | |
4e4cbee9 | 50 | blk_status_t bi_status; |
1eff9d32 JA |
51 | unsigned int bi_opf; /* bottom bits req flags, |
52 | * top bits REQ_OP. Use | |
53 | * accessors. | |
4e1b2d52 | 54 | */ |
dbde775c | 55 | unsigned short bi_flags; /* status, etc and bvec pool number */ |
43b62ce3 | 56 | unsigned short bi_ioprio; |
7cc01581 | 57 | |
4f024f37 | 58 | struct bvec_iter bi_iter; |
7cc01581 TH |
59 | |
60 | /* Number of segments in this BIO after | |
61 | * physical address coalescing is performed. | |
62 | */ | |
63 | unsigned int bi_phys_segments; | |
64 | ||
7cc01581 TH |
65 | /* |
66 | * To keep track of the max segment size, we account for the | |
67 | * sizes of the first and last mergeable segments in this bio. | |
68 | */ | |
69 | unsigned int bi_seg_front_size; | |
70 | unsigned int bi_seg_back_size; | |
71 | ||
c4cf5261 | 72 | atomic_t __bi_remaining; |
196d38bc | 73 | |
7cc01581 TH |
74 | bio_end_io_t *bi_end_io; |
75 | ||
76 | void *bi_private; | |
852c788f TH |
77 | #ifdef CONFIG_BLK_CGROUP |
78 | /* | |
79 | * Optional ioc and css associated with this bio. Put on bio | |
80 | * release. Read comment on top of bio_associate_current(). | |
81 | */ | |
82 | struct io_context *bi_ioc; | |
83 | struct cgroup_subsys_state *bi_css; | |
9e234eea SL |
84 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
85 | void *bi_cg_private; | |
b9147dd1 | 86 | struct blk_issue_stat bi_issue_stat; |
9e234eea | 87 | #endif |
852c788f | 88 | #endif |
180b2f95 | 89 | union { |
7cc01581 | 90 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
180b2f95 | 91 | struct bio_integrity_payload *bi_integrity; /* data integrity */ |
7cc01581 | 92 | #endif |
180b2f95 | 93 | }; |
7cc01581 | 94 | |
4f024f37 KO |
95 | unsigned short bi_vcnt; /* how many bio_vec's */ |
96 | ||
f44b48c7 KO |
97 | /* |
98 | * Everything starting with bi_max_vecs will be preserved by bio_reset() | |
99 | */ | |
100 | ||
4f024f37 | 101 | unsigned short bi_max_vecs; /* max bvl_vecs we can hold */ |
f44b48c7 | 102 | |
dac56212 | 103 | atomic_t __bi_cnt; /* pin count */ |
f44b48c7 KO |
104 | |
105 | struct bio_vec *bi_io_vec; /* the actual vec list */ | |
106 | ||
395c72a7 KO |
107 | struct bio_set *bi_pool; |
108 | ||
7cc01581 TH |
109 | /* |
110 | * We can inline a number of vecs at the end of the bio, to avoid | |
111 | * double allocations for a small number of bio_vecs. This member | |
112 | * MUST obviously be kept at the very end of the bio. | |
113 | */ | |
114 | struct bio_vec bi_inline_vecs[0]; | |
115 | }; | |
116 | ||
f44b48c7 KO |
117 | #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) |
118 | ||
7cc01581 TH |
119 | /* |
120 | * bio flags | |
121 | */ | |
b2dbe0a6 JA |
122 | #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ |
123 | #define BIO_CLONED 2 /* doesn't own data */ | |
124 | #define BIO_BOUNCED 3 /* bio is a bounce bio */ | |
125 | #define BIO_USER_MAPPED 4 /* contains user pages */ | |
126 | #define BIO_NULL_MAPPED 5 /* contains invalid user pages */ | |
127 | #define BIO_QUIET 6 /* Make BIO Quiet */ | |
a3ad0a9d JK |
128 | #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */ |
129 | #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */ | |
8d2bbd4c CH |
130 | #define BIO_THROTTLED 9 /* This bio has already been subjected to |
131 | * throttling rules. Don't do it again. */ | |
fbbaf700 N |
132 | #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion |
133 | * of this bio. */ | |
dbde775c | 134 | /* See BVEC_POOL_OFFSET below before adding new flags */ |
f44b48c7 | 135 | |
7cc01581 | 136 | /* |
ed996a52 CH |
137 | * We support 6 different bvec pools, the last one is magic in that it |
138 | * is backed by a mempool. | |
7cc01581 | 139 | */ |
ed996a52 CH |
140 | #define BVEC_POOL_NR 6 |
141 | #define BVEC_POOL_MAX (BVEC_POOL_NR - 1) | |
142 | ||
143 | /* | |
dbde775c | 144 | * Top 3 bits of bio flags indicate the pool the bvecs came from. We add |
ed996a52 CH |
145 | * 1 to the actual index so that 0 indicates that there are no bvecs to be |
146 | * freed. | |
147 | */ | |
dbde775c | 148 | #define BVEC_POOL_BITS (3) |
c0acf12a | 149 | #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS) |
ed996a52 | 150 | #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET) |
dbde775c N |
151 | #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1) |
152 | # error "BVEC_POOL_BITS is too small" | |
153 | #endif | |
154 | ||
155 | /* | |
156 | * Flags starting here get preserved by bio_reset() - this includes | |
157 | * only BVEC_POOL_IDX() | |
158 | */ | |
159 | #define BIO_RESET_BITS BVEC_POOL_OFFSET | |
7cc01581 TH |
160 | |
161 | /* | |
ef295ecf CH |
162 | * Operations and flags common to the bio and request structures. |
163 | * We use 8 bits for encoding the operation, and the remaining 24 for flags. | |
87374179 CH |
164 | * |
165 | * The least significant bit of the operation number indicates the data | |
166 | * transfer direction: | |
167 | * | |
168 | * - if the least significant bit is set transfers are TO the device | |
169 | * - if the least significant bit is not set transfers are FROM the device | |
170 | * | |
171 | * If a operation does not transfer data the least significant bit has no | |
172 | * meaning. | |
7cc01581 | 173 | */ |
ef295ecf CH |
174 | #define REQ_OP_BITS 8 |
175 | #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) | |
176 | #define REQ_FLAG_BITS 24 | |
177 | ||
178 | enum req_opf { | |
87374179 CH |
179 | /* read sectors from the device */ |
180 | REQ_OP_READ = 0, | |
181 | /* write sectors to the device */ | |
182 | REQ_OP_WRITE = 1, | |
183 | /* flush the volatile write cache */ | |
184 | REQ_OP_FLUSH = 2, | |
185 | /* discard sectors */ | |
186 | REQ_OP_DISCARD = 3, | |
187 | /* get zone information */ | |
188 | REQ_OP_ZONE_REPORT = 4, | |
189 | /* securely erase sectors */ | |
190 | REQ_OP_SECURE_ERASE = 5, | |
191 | /* seset a zone write pointer */ | |
192 | REQ_OP_ZONE_RESET = 6, | |
193 | /* write the same sector many times */ | |
194 | REQ_OP_WRITE_SAME = 7, | |
a6f0788e | 195 | /* write the zero filled sector many times */ |
1d62ac13 | 196 | REQ_OP_WRITE_ZEROES = 9, |
ef295ecf | 197 | |
aebf526b CH |
198 | /* SCSI passthrough using struct scsi_request */ |
199 | REQ_OP_SCSI_IN = 32, | |
200 | REQ_OP_SCSI_OUT = 33, | |
201 | /* Driver private requests */ | |
202 | REQ_OP_DRV_IN = 34, | |
203 | REQ_OP_DRV_OUT = 35, | |
204 | ||
ef295ecf CH |
205 | REQ_OP_LAST, |
206 | }; | |
207 | ||
208 | enum req_flag_bits { | |
209 | __REQ_FAILFAST_DEV = /* no driver retries of device errors */ | |
210 | REQ_OP_BITS, | |
7cc01581 TH |
211 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ |
212 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | |
7cc01581 TH |
213 | __REQ_SYNC, /* request is sync (sync write or read) */ |
214 | __REQ_META, /* metadata io request */ | |
65299a3b | 215 | __REQ_PRIO, /* boost priority in cfq */ |
bd1c1c21 | 216 | __REQ_NOMERGE, /* don't touch this for merging */ |
a2b80967 | 217 | __REQ_IDLE, /* anticipate more IO after this one */ |
180b2f95 | 218 | __REQ_INTEGRITY, /* I/O includes block integrity payload */ |
8e4bf844 | 219 | __REQ_FUA, /* forced unit access */ |
28a8f0d3 | 220 | __REQ_PREFLUSH, /* request for cache flush */ |
188bd2b1 | 221 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
1d796d6a | 222 | __REQ_BACKGROUND, /* background IO */ |
d928be9f CH |
223 | |
224 | /* command specific flags for REQ_OP_WRITE_ZEROES: */ | |
225 | __REQ_NOUNMAP, /* do not free blocks when zeroing */ | |
226 | ||
7cc01581 TH |
227 | __REQ_NR_BITS, /* stops here */ |
228 | }; | |
229 | ||
5953316d JA |
230 | #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV) |
231 | #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT) | |
232 | #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER) | |
233 | #define REQ_SYNC (1ULL << __REQ_SYNC) | |
234 | #define REQ_META (1ULL << __REQ_META) | |
235 | #define REQ_PRIO (1ULL << __REQ_PRIO) | |
ef295ecf | 236 | #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) |
a2b80967 | 237 | #define REQ_IDLE (1ULL << __REQ_IDLE) |
180b2f95 | 238 | #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) |
ef295ecf CH |
239 | #define REQ_FUA (1ULL << __REQ_FUA) |
240 | #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) | |
241 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | |
1d796d6a | 242 | #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) |
7cc01581 | 243 | |
d928be9f CH |
244 | #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) |
245 | ||
7cc01581 TH |
246 | #define REQ_FAILFAST_MASK \ |
247 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | |
7cc01581 | 248 | |
e2a60da7 | 249 | #define REQ_NOMERGE_FLAGS \ |
e8064021 | 250 | (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) |
e2a60da7 | 251 | |
ef295ecf CH |
252 | #define bio_op(bio) \ |
253 | ((bio)->bi_opf & REQ_OP_MASK) | |
254 | #define req_op(req) \ | |
255 | ((req)->cmd_flags & REQ_OP_MASK) | |
7cc01581 | 256 | |
ef295ecf | 257 | /* obsolete, don't use in new code */ |
93c5bdf7 CH |
258 | static inline void bio_set_op_attrs(struct bio *bio, unsigned op, |
259 | unsigned op_flags) | |
260 | { | |
261 | bio->bi_opf = op | op_flags; | |
262 | } | |
c11f0c0b | 263 | |
87374179 CH |
264 | static inline bool op_is_write(unsigned int op) |
265 | { | |
266 | return (op & 1); | |
267 | } | |
268 | ||
f73f44eb CH |
269 | /* |
270 | * Check if the bio or request is one that needs special treatment in the | |
271 | * flush state machine. | |
272 | */ | |
273 | static inline bool op_is_flush(unsigned int op) | |
274 | { | |
275 | return op & (REQ_FUA | REQ_PREFLUSH); | |
276 | } | |
277 | ||
b685d3d6 CH |
278 | /* |
279 | * Reads are always treated as synchronous, as are requests with the FUA or | |
280 | * PREFLUSH flag. Other operations may be marked as synchronous using the | |
281 | * REQ_SYNC flag. | |
282 | */ | |
ef295ecf CH |
283 | static inline bool op_is_sync(unsigned int op) |
284 | { | |
b685d3d6 CH |
285 | return (op & REQ_OP_MASK) == REQ_OP_READ || |
286 | (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH)); | |
ef295ecf | 287 | } |
c11f0c0b | 288 | |
dece1635 | 289 | typedef unsigned int blk_qc_t; |
fd2d3326 JA |
290 | #define BLK_QC_T_NONE -1U |
291 | #define BLK_QC_T_SHIFT 16 | |
292 | #define BLK_QC_T_INTERNAL (1U << 31) | |
dece1635 JA |
293 | |
294 | static inline bool blk_qc_t_valid(blk_qc_t cookie) | |
295 | { | |
296 | return cookie != BLK_QC_T_NONE; | |
297 | } | |
298 | ||
fd2d3326 JA |
299 | static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num, |
300 | bool internal) | |
dece1635 | 301 | { |
fd2d3326 JA |
302 | blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT); |
303 | ||
304 | if (internal) | |
305 | ret |= BLK_QC_T_INTERNAL; | |
306 | ||
307 | return ret; | |
dece1635 JA |
308 | } |
309 | ||
310 | static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) | |
311 | { | |
fd2d3326 | 312 | return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; |
dece1635 JA |
313 | } |
314 | ||
315 | static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) | |
316 | { | |
e3a7a3bf | 317 | return cookie & ((1u << BLK_QC_T_SHIFT) - 1); |
dece1635 JA |
318 | } |
319 | ||
fd2d3326 JA |
320 | static inline bool blk_qc_t_is_internal(blk_qc_t cookie) |
321 | { | |
322 | return (cookie & BLK_QC_T_INTERNAL) != 0; | |
323 | } | |
324 | ||
cf43e6be JA |
325 | struct blk_rq_stat { |
326 | s64 mean; | |
327 | u64 min; | |
328 | u64 max; | |
329 | s32 nr_samples; | |
330 | s32 nr_batch; | |
331 | u64 batch; | |
cf43e6be JA |
332 | }; |
333 | ||
7cc01581 | 334 | #endif /* __LINUX_BLK_TYPES_H */ |