]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/blk_types.h
Merge tag 'kbuild-misc-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/masahi...
[mirror_ubuntu-artful-kernel.git] / include / linux / blk_types.h
1 /*
2 * Block data types and constants. Directly include this file only to
3 * break include dependency loop.
4 */
5 #ifndef __LINUX_BLK_TYPES_H
6 #define __LINUX_BLK_TYPES_H
7
8 #include <linux/types.h>
9 #include <linux/bvec.h>
10
11 struct bio_set;
12 struct bio;
13 struct bio_integrity_payload;
14 struct page;
15 struct block_device;
16 struct io_context;
17 struct cgroup_subsys_state;
18 typedef void (bio_end_io_t) (struct bio *);
19
20 struct blk_issue_stat {
21 u64 stat;
22 };
23
24 /*
25 * main unit of I/O for the block layer and lower layers (ie drivers and
26 * stacking drivers)
27 */
28 struct bio {
29 struct bio *bi_next; /* request queue link */
30 struct block_device *bi_bdev;
31 int bi_error;
32 unsigned int bi_opf; /* bottom bits req flags,
33 * top bits REQ_OP. Use
34 * accessors.
35 */
36 unsigned short bi_flags; /* status, etc and bvec pool number */
37 unsigned short bi_ioprio;
38
39 struct bvec_iter bi_iter;
40
41 /* Number of segments in this BIO after
42 * physical address coalescing is performed.
43 */
44 unsigned int bi_phys_segments;
45
46 /*
47 * To keep track of the max segment size, we account for the
48 * sizes of the first and last mergeable segments in this bio.
49 */
50 unsigned int bi_seg_front_size;
51 unsigned int bi_seg_back_size;
52
53 atomic_t __bi_remaining;
54
55 bio_end_io_t *bi_end_io;
56
57 void *bi_private;
58 #ifdef CONFIG_BLK_CGROUP
59 /*
60 * Optional ioc and css associated with this bio. Put on bio
61 * release. Read comment on top of bio_associate_current().
62 */
63 struct io_context *bi_ioc;
64 struct cgroup_subsys_state *bi_css;
65 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
66 void *bi_cg_private;
67 struct blk_issue_stat bi_issue_stat;
68 #endif
69 #endif
70 union {
71 #if defined(CONFIG_BLK_DEV_INTEGRITY)
72 struct bio_integrity_payload *bi_integrity; /* data integrity */
73 #endif
74 };
75
76 unsigned short bi_vcnt; /* how many bio_vec's */
77
78 /*
79 * Everything starting with bi_max_vecs will be preserved by bio_reset()
80 */
81
82 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
83
84 atomic_t __bi_cnt; /* pin count */
85
86 struct bio_vec *bi_io_vec; /* the actual vec list */
87
88 struct bio_set *bi_pool;
89
90 /*
91 * We can inline a number of vecs at the end of the bio, to avoid
92 * double allocations for a small number of bio_vecs. This member
93 * MUST obviously be kept at the very end of the bio.
94 */
95 struct bio_vec bi_inline_vecs[0];
96 };
97
98 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
99
100 /*
101 * bio flags
102 */
103 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */
104 #define BIO_CLONED 2 /* doesn't own data */
105 #define BIO_BOUNCED 3 /* bio is a bounce bio */
106 #define BIO_USER_MAPPED 4 /* contains user pages */
107 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */
108 #define BIO_QUIET 6 /* Make BIO Quiet */
109 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
110 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
111 #define BIO_THROTTLED 9 /* This bio has already been subjected to
112 * throttling rules. Don't do it again. */
113 #define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
114 * of this bio. */
115 /* See BVEC_POOL_OFFSET below before adding new flags */
116
117 /*
118 * We support 6 different bvec pools, the last one is magic in that it
119 * is backed by a mempool.
120 */
121 #define BVEC_POOL_NR 6
122 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
123
124 /*
125 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
126 * 1 to the actual index so that 0 indicates that there are no bvecs to be
127 * freed.
128 */
129 #define BVEC_POOL_BITS (3)
130 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
131 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
132 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
133 # error "BVEC_POOL_BITS is too small"
134 #endif
135
136 /*
137 * Flags starting here get preserved by bio_reset() - this includes
138 * only BVEC_POOL_IDX()
139 */
140 #define BIO_RESET_BITS BVEC_POOL_OFFSET
141
142 /*
143 * Operations and flags common to the bio and request structures.
144 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
145 *
146 * The least significant bit of the operation number indicates the data
147 * transfer direction:
148 *
149 * - if the least significant bit is set transfers are TO the device
150 * - if the least significant bit is not set transfers are FROM the device
151 *
152 * If a operation does not transfer data the least significant bit has no
153 * meaning.
154 */
155 #define REQ_OP_BITS 8
156 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
157 #define REQ_FLAG_BITS 24
158
159 enum req_opf {
160 /* read sectors from the device */
161 REQ_OP_READ = 0,
162 /* write sectors to the device */
163 REQ_OP_WRITE = 1,
164 /* flush the volatile write cache */
165 REQ_OP_FLUSH = 2,
166 /* discard sectors */
167 REQ_OP_DISCARD = 3,
168 /* get zone information */
169 REQ_OP_ZONE_REPORT = 4,
170 /* securely erase sectors */
171 REQ_OP_SECURE_ERASE = 5,
172 /* seset a zone write pointer */
173 REQ_OP_ZONE_RESET = 6,
174 /* write the same sector many times */
175 REQ_OP_WRITE_SAME = 7,
176 /* write the zero filled sector many times */
177 REQ_OP_WRITE_ZEROES = 9,
178
179 /* SCSI passthrough using struct scsi_request */
180 REQ_OP_SCSI_IN = 32,
181 REQ_OP_SCSI_OUT = 33,
182 /* Driver private requests */
183 REQ_OP_DRV_IN = 34,
184 REQ_OP_DRV_OUT = 35,
185
186 REQ_OP_LAST,
187 };
188
189 enum req_flag_bits {
190 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
191 REQ_OP_BITS,
192 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
193 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
194 __REQ_SYNC, /* request is sync (sync write or read) */
195 __REQ_META, /* metadata io request */
196 __REQ_PRIO, /* boost priority in cfq */
197 __REQ_NOMERGE, /* don't touch this for merging */
198 __REQ_IDLE, /* anticipate more IO after this one */
199 __REQ_INTEGRITY, /* I/O includes block integrity payload */
200 __REQ_FUA, /* forced unit access */
201 __REQ_PREFLUSH, /* request for cache flush */
202 __REQ_RAHEAD, /* read ahead, can fail anytime */
203 __REQ_BACKGROUND, /* background IO */
204
205 /* command specific flags for REQ_OP_WRITE_ZEROES: */
206 __REQ_NOUNMAP, /* do not free blocks when zeroing */
207
208 __REQ_NR_BITS, /* stops here */
209 };
210
211 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
212 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
213 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
214 #define REQ_SYNC (1ULL << __REQ_SYNC)
215 #define REQ_META (1ULL << __REQ_META)
216 #define REQ_PRIO (1ULL << __REQ_PRIO)
217 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
218 #define REQ_IDLE (1ULL << __REQ_IDLE)
219 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
220 #define REQ_FUA (1ULL << __REQ_FUA)
221 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
222 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
223 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
224
225 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
226
227 #define REQ_FAILFAST_MASK \
228 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
229
230 #define REQ_NOMERGE_FLAGS \
231 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
232
233 #define bio_op(bio) \
234 ((bio)->bi_opf & REQ_OP_MASK)
235 #define req_op(req) \
236 ((req)->cmd_flags & REQ_OP_MASK)
237
238 /* obsolete, don't use in new code */
239 static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
240 unsigned op_flags)
241 {
242 bio->bi_opf = op | op_flags;
243 }
244
245 static inline bool op_is_write(unsigned int op)
246 {
247 return (op & 1);
248 }
249
250 /*
251 * Check if the bio or request is one that needs special treatment in the
252 * flush state machine.
253 */
254 static inline bool op_is_flush(unsigned int op)
255 {
256 return op & (REQ_FUA | REQ_PREFLUSH);
257 }
258
259 /*
260 * Reads are always treated as synchronous, as are requests with the FUA or
261 * PREFLUSH flag. Other operations may be marked as synchronous using the
262 * REQ_SYNC flag.
263 */
264 static inline bool op_is_sync(unsigned int op)
265 {
266 return (op & REQ_OP_MASK) == REQ_OP_READ ||
267 (op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
268 }
269
270 typedef unsigned int blk_qc_t;
271 #define BLK_QC_T_NONE -1U
272 #define BLK_QC_T_SHIFT 16
273 #define BLK_QC_T_INTERNAL (1U << 31)
274
275 static inline bool blk_qc_t_valid(blk_qc_t cookie)
276 {
277 return cookie != BLK_QC_T_NONE;
278 }
279
280 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
281 bool internal)
282 {
283 blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
284
285 if (internal)
286 ret |= BLK_QC_T_INTERNAL;
287
288 return ret;
289 }
290
291 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
292 {
293 return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
294 }
295
296 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
297 {
298 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
299 }
300
301 static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
302 {
303 return (cookie & BLK_QC_T_INTERNAL) != 0;
304 }
305
306 struct blk_rq_stat {
307 s64 mean;
308 u64 min;
309 u64 max;
310 s32 nr_samples;
311 s32 nr_batch;
312 u64 batch;
313 };
314
315 #endif /* __LINUX_BLK_TYPES_H */