]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/blk_types.h
block: don't use REQ_SYNC in the READ_SYNC definition
[mirror_ubuntu-jammy-kernel.git] / include / linux / blk_types.h
1 /*
2 * Block data types and constants. Directly include this file only to
3 * break include dependency loop.
4 */
5 #ifndef __LINUX_BLK_TYPES_H
6 #define __LINUX_BLK_TYPES_H
7
8 #include <linux/types.h>
9 #include <linux/bvec.h>
10
11 struct bio_set;
12 struct bio;
13 struct bio_integrity_payload;
14 struct page;
15 struct block_device;
16 struct io_context;
17 struct cgroup_subsys_state;
18 typedef void (bio_end_io_t) (struct bio *);
19
20 #ifdef CONFIG_BLOCK
21 /*
22 * main unit of I/O for the block layer and lower layers (ie drivers and
23 * stacking drivers)
24 */
25 struct bio {
26 struct bio *bi_next; /* request queue link */
27 struct block_device *bi_bdev;
28 int bi_error;
29 unsigned int bi_opf; /* bottom bits req flags,
30 * top bits REQ_OP. Use
31 * accessors.
32 */
33 unsigned short bi_flags; /* status, command, etc */
34 unsigned short bi_ioprio;
35
36 struct bvec_iter bi_iter;
37
38 /* Number of segments in this BIO after
39 * physical address coalescing is performed.
40 */
41 unsigned int bi_phys_segments;
42
43 /*
44 * To keep track of the max segment size, we account for the
45 * sizes of the first and last mergeable segments in this bio.
46 */
47 unsigned int bi_seg_front_size;
48 unsigned int bi_seg_back_size;
49
50 atomic_t __bi_remaining;
51
52 bio_end_io_t *bi_end_io;
53
54 void *bi_private;
55 #ifdef CONFIG_BLK_CGROUP
56 /*
57 * Optional ioc and css associated with this bio. Put on bio
58 * release. Read comment on top of bio_associate_current().
59 */
60 struct io_context *bi_ioc;
61 struct cgroup_subsys_state *bi_css;
62 #endif
63 union {
64 #if defined(CONFIG_BLK_DEV_INTEGRITY)
65 struct bio_integrity_payload *bi_integrity; /* data integrity */
66 #endif
67 };
68
69 unsigned short bi_vcnt; /* how many bio_vec's */
70
71 /*
72 * Everything starting with bi_max_vecs will be preserved by bio_reset()
73 */
74
75 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
76
77 atomic_t __bi_cnt; /* pin count */
78
79 struct bio_vec *bi_io_vec; /* the actual vec list */
80
81 struct bio_set *bi_pool;
82
83 /*
84 * We can inline a number of vecs at the end of the bio, to avoid
85 * double allocations for a small number of bio_vecs. This member
86 * MUST obviously be kept at the very end of the bio.
87 */
88 struct bio_vec bi_inline_vecs[0];
89 };
90
91 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
92
93 /*
94 * bio flags
95 */
96 #define BIO_SEG_VALID 1 /* bi_phys_segments valid */
97 #define BIO_CLONED 2 /* doesn't own data */
98 #define BIO_BOUNCED 3 /* bio is a bounce bio */
99 #define BIO_USER_MAPPED 4 /* contains user pages */
100 #define BIO_NULL_MAPPED 5 /* contains invalid user pages */
101 #define BIO_QUIET 6 /* Make BIO Quiet */
102 #define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
103 #define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
104 #define BIO_THROTTLED 9 /* This bio has already been subjected to
105 * throttling rules. Don't do it again. */
106
107 /*
108 * Flags starting here get preserved by bio_reset() - this includes
109 * BVEC_POOL_IDX()
110 */
111 #define BIO_RESET_BITS 10
112
113 /*
114 * We support 6 different bvec pools, the last one is magic in that it
115 * is backed by a mempool.
116 */
117 #define BVEC_POOL_NR 6
118 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
119
120 /*
121 * Top 4 bits of bio flags indicate the pool the bvecs came from. We add
122 * 1 to the actual index so that 0 indicates that there are no bvecs to be
123 * freed.
124 */
125 #define BVEC_POOL_BITS (4)
126 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
127 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
128
129 #endif /* CONFIG_BLOCK */
130
131 /*
132 * Operations and flags common to the bio and request structures.
133 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
134 *
135 * The least significant bit of the operation number indicates the data
136 * transfer direction:
137 *
138 * - if the least significant bit is set transfers are TO the device
139 * - if the least significant bit is not set transfers are FROM the device
140 *
141 * If a operation does not transfer data the least significant bit has no
142 * meaning.
143 */
144 #define REQ_OP_BITS 8
145 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
146 #define REQ_FLAG_BITS 24
147
148 enum req_opf {
149 /* read sectors from the device */
150 REQ_OP_READ = 0,
151 /* write sectors to the device */
152 REQ_OP_WRITE = 1,
153 /* flush the volatile write cache */
154 REQ_OP_FLUSH = 2,
155 /* discard sectors */
156 REQ_OP_DISCARD = 3,
157 /* get zone information */
158 REQ_OP_ZONE_REPORT = 4,
159 /* securely erase sectors */
160 REQ_OP_SECURE_ERASE = 5,
161 /* seset a zone write pointer */
162 REQ_OP_ZONE_RESET = 6,
163 /* write the same sector many times */
164 REQ_OP_WRITE_SAME = 7,
165
166 REQ_OP_LAST,
167 };
168
169 enum req_flag_bits {
170 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
171 REQ_OP_BITS,
172 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
173 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
174 __REQ_SYNC, /* request is sync (sync write or read) */
175 __REQ_META, /* metadata io request */
176 __REQ_PRIO, /* boost priority in cfq */
177 __REQ_NOMERGE, /* don't touch this for merging */
178 __REQ_NOIDLE, /* don't anticipate more IO after this one */
179 __REQ_INTEGRITY, /* I/O includes block integrity payload */
180 __REQ_FUA, /* forced unit access */
181 __REQ_PREFLUSH, /* request for cache flush */
182 __REQ_RAHEAD, /* read ahead, can fail anytime */
183 __REQ_NR_BITS, /* stops here */
184 };
185
186 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
187 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
188 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
189 #define REQ_SYNC (1ULL << __REQ_SYNC)
190 #define REQ_META (1ULL << __REQ_META)
191 #define REQ_PRIO (1ULL << __REQ_PRIO)
192 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
193 #define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
194 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
195 #define REQ_FUA (1ULL << __REQ_FUA)
196 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
197 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
198
199 #define REQ_FAILFAST_MASK \
200 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
201
202 #define REQ_NOMERGE_FLAGS \
203 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
204
205 #define bio_op(bio) \
206 ((bio)->bi_opf & REQ_OP_MASK)
207 #define req_op(req) \
208 ((req)->cmd_flags & REQ_OP_MASK)
209
210 /* obsolete, don't use in new code */
211 #define bio_set_op_attrs(bio, op, op_flags) \
212 ((bio)->bi_opf |= (op | op_flags))
213
214 static inline bool op_is_write(unsigned int op)
215 {
216 return (op & 1);
217 }
218
219 static inline bool op_is_sync(unsigned int op)
220 {
221 return (op & REQ_OP_MASK) == REQ_OP_READ || (op & REQ_SYNC);
222 }
223
224 typedef unsigned int blk_qc_t;
225 #define BLK_QC_T_NONE -1U
226 #define BLK_QC_T_SHIFT 16
227
228 static inline bool blk_qc_t_valid(blk_qc_t cookie)
229 {
230 return cookie != BLK_QC_T_NONE;
231 }
232
233 static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
234 {
235 return tag | (queue_num << BLK_QC_T_SHIFT);
236 }
237
238 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
239 {
240 return cookie >> BLK_QC_T_SHIFT;
241 }
242
243 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
244 {
245 return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
246 }
247
248 #endif /* __LINUX_BLK_TYPES_H */