]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - block/blk.h
block: don't use bio->bi_vcnt to figure out segment number
[mirror_ubuntu-bionic-kernel.git] / block / blk.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4
5 #include <linux/idr.h>
6 #include <linux/blk-mq.h>
7 #include "blk-mq.h"
8
9 /* Amount of time in which a process may batch requests */
10 #define BLK_BATCH_TIME (HZ/50UL)
11
12 /* Number of requests a "batching" process may submit */
13 #define BLK_BATCH_REQ 32
14
15 /* Max future timer expiry for timeouts */
16 #define BLK_MAX_TIMEOUT (5 * HZ)
17
18 #ifdef CONFIG_DEBUG_FS
19 extern struct dentry *blk_debugfs_root;
20 #endif
21
22 struct blk_flush_queue {
23 unsigned int flush_queue_delayed:1;
24 unsigned int flush_pending_idx:1;
25 unsigned int flush_running_idx:1;
26 unsigned long flush_pending_since;
27 struct list_head flush_queue[2];
28 struct list_head flush_data_in_flight;
29 struct request *flush_rq;
30
31 /*
32 * flush_rq shares tag with this rq, both can't be active
33 * at the same time
34 */
35 struct request *orig_rq;
36 spinlock_t mq_flush_lock;
37 };
38
39 extern struct kmem_cache *blk_requestq_cachep;
40 extern struct kmem_cache *request_cachep;
41 extern struct kobj_type blk_queue_ktype;
42 extern struct ida blk_queue_ida;
43
44 static inline struct blk_flush_queue *blk_get_flush_queue(
45 struct request_queue *q, struct blk_mq_ctx *ctx)
46 {
47 if (q->mq_ops)
48 return blk_mq_map_queue(q, ctx->cpu)->fq;
49 return q->fq;
50 }
51
52 static inline void __blk_get_queue(struct request_queue *q)
53 {
54 kobject_get(&q->kobj);
55 }
56
57 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
58 int node, int cmd_size, gfp_t flags);
59 void blk_free_flush_queue(struct blk_flush_queue *q);
60
61 int blk_init_rl(struct request_list *rl, struct request_queue *q,
62 gfp_t gfp_mask);
63 void blk_exit_rl(struct request_queue *q, struct request_list *rl);
64 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
65 struct bio *bio);
66 void blk_queue_bypass_start(struct request_queue *q);
67 void blk_queue_bypass_end(struct request_queue *q);
68 void __blk_queue_free_tags(struct request_queue *q);
69 void blk_freeze_queue(struct request_queue *q);
70
71 static inline void blk_queue_enter_live(struct request_queue *q)
72 {
73 /*
74 * Given that running in generic_make_request() context
75 * guarantees that a live reference against q_usage_counter has
76 * been established, further references under that same context
77 * need not check that the queue has been frozen (marked dead).
78 */
79 percpu_ref_get(&q->q_usage_counter);
80 }
81
82 #ifdef CONFIG_BLK_DEV_INTEGRITY
83 void blk_flush_integrity(void);
84 bool __bio_integrity_endio(struct bio *);
85 static inline bool bio_integrity_endio(struct bio *bio)
86 {
87 if (bio_integrity(bio))
88 return __bio_integrity_endio(bio);
89 return true;
90 }
91 #else
92 static inline void blk_flush_integrity(void)
93 {
94 }
95 static inline bool bio_integrity_endio(struct bio *bio)
96 {
97 return true;
98 }
99 #endif
100
101 void blk_timeout_work(struct work_struct *work);
102 unsigned long blk_rq_timeout(unsigned long timeout);
103 void blk_add_timer(struct request *req);
104 void blk_delete_timer(struct request *);
105
106
107 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
108 struct bio *bio);
109 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
110 struct bio *bio);
111 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
112 struct bio *bio);
113 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
114 unsigned int *request_count,
115 struct request **same_queue_rq);
116 unsigned int blk_plug_queued_count(struct request_queue *q);
117
118 void blk_account_io_start(struct request *req, bool new_io);
119 void blk_account_io_completion(struct request *req, unsigned int bytes);
120 void blk_account_io_done(struct request *req);
121
122 /*
123 * Internal atomic flags for request handling
124 */
125 enum rq_atomic_flags {
126 /*
127 * Keep these two bits first - not because we depend on the
128 * value of them, but we do depend on them being in the same
129 * byte of storage to ensure ordering on writes. Keeping them
130 * first will achieve that nicely.
131 */
132 REQ_ATOM_COMPLETE = 0,
133 REQ_ATOM_STARTED,
134
135 REQ_ATOM_POLL_SLEPT,
136 };
137
138 /*
139 * EH timer and IO completion will both attempt to 'grab' the request, make
140 * sure that only one of them succeeds
141 */
142 static inline int blk_mark_rq_complete(struct request *rq)
143 {
144 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
145 }
146
147 static inline void blk_clear_rq_complete(struct request *rq)
148 {
149 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
150 }
151
152 /*
153 * Internal elevator interface
154 */
155 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
156
157 void blk_insert_flush(struct request *rq);
158
159 static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
160 {
161 struct elevator_queue *e = q->elevator;
162
163 if (e->type->ops.sq.elevator_activate_req_fn)
164 e->type->ops.sq.elevator_activate_req_fn(q, rq);
165 }
166
167 static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
168 {
169 struct elevator_queue *e = q->elevator;
170
171 if (e->type->ops.sq.elevator_deactivate_req_fn)
172 e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
173 }
174
175 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
176
177 #ifdef CONFIG_FAIL_IO_TIMEOUT
178 int blk_should_fake_timeout(struct request_queue *);
179 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
180 ssize_t part_timeout_store(struct device *, struct device_attribute *,
181 const char *, size_t);
182 #else
183 static inline int blk_should_fake_timeout(struct request_queue *q)
184 {
185 return 0;
186 }
187 #endif
188
189 int ll_back_merge_fn(struct request_queue *q, struct request *req,
190 struct bio *bio);
191 int ll_front_merge_fn(struct request_queue *q, struct request *req,
192 struct bio *bio);
193 struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
194 struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
195 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
196 struct request *next);
197 void blk_recalc_rq_segments(struct request *rq);
198 void blk_rq_set_mixed_merge(struct request *rq);
199 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
200 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
201
202 void blk_queue_congestion_threshold(struct request_queue *q);
203
204 int blk_dev_init(void);
205
206
207 /*
208 * Return the threshold (number of used requests) at which the queue is
209 * considered to be congested. It include a little hysteresis to keep the
210 * context switch rate down.
211 */
212 static inline int queue_congestion_on_threshold(struct request_queue *q)
213 {
214 return q->nr_congestion_on;
215 }
216
217 /*
218 * The threshold at which a queue is considered to be uncongested
219 */
220 static inline int queue_congestion_off_threshold(struct request_queue *q)
221 {
222 return q->nr_congestion_off;
223 }
224
225 extern int blk_update_nr_requests(struct request_queue *, unsigned int);
226
227 /*
228 * Contribute to IO statistics IFF:
229 *
230 * a) it's attached to a gendisk, and
231 * b) the queue had IO stats enabled when this request was started, and
232 * c) it's a file system request
233 */
234 static inline int blk_do_io_stat(struct request *rq)
235 {
236 return rq->rq_disk &&
237 (rq->rq_flags & RQF_IO_STAT) &&
238 !blk_rq_is_passthrough(rq);
239 }
240
241 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
242 {
243 req->cmd_flags |= REQ_NOMERGE;
244 if (req == q->last_merge)
245 q->last_merge = NULL;
246 }
247
248 /*
249 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
250 * is defined as 'unsigned int', meantime it has to aligned to with logical
251 * block size which is the minimum accepted unit by hardware.
252 */
253 static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
254 {
255 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
256 }
257
258 /*
259 * Internal io_context interface
260 */
261 void get_io_context(struct io_context *ioc);
262 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
263 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
264 gfp_t gfp_mask);
265 void ioc_clear_queue(struct request_queue *q);
266
267 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
268
269 /**
270 * rq_ioc - determine io_context for request allocation
271 * @bio: request being allocated is for this bio (can be %NULL)
272 *
273 * Determine io_context to use for request allocation for @bio. May return
274 * %NULL if %current->io_context doesn't exist.
275 */
276 static inline struct io_context *rq_ioc(struct bio *bio)
277 {
278 #ifdef CONFIG_BLK_CGROUP
279 if (bio && bio->bi_ioc)
280 return bio->bi_ioc;
281 #endif
282 return current->io_context;
283 }
284
285 /**
286 * create_io_context - try to create task->io_context
287 * @gfp_mask: allocation mask
288 * @node: allocation node
289 *
290 * If %current->io_context is %NULL, allocate a new io_context and install
291 * it. Returns the current %current->io_context which may be %NULL if
292 * allocation failed.
293 *
294 * Note that this function can't be called with IRQ disabled because
295 * task_lock which protects %current->io_context is IRQ-unsafe.
296 */
297 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
298 {
299 WARN_ON_ONCE(irqs_disabled());
300 if (unlikely(!current->io_context))
301 create_task_io_context(current, gfp_mask, node);
302 return current->io_context;
303 }
304
305 /*
306 * Internal throttling interface
307 */
308 #ifdef CONFIG_BLK_DEV_THROTTLING
309 extern void blk_throtl_drain(struct request_queue *q);
310 extern int blk_throtl_init(struct request_queue *q);
311 extern void blk_throtl_exit(struct request_queue *q);
312 extern void blk_throtl_register_queue(struct request_queue *q);
313 #else /* CONFIG_BLK_DEV_THROTTLING */
314 static inline void blk_throtl_drain(struct request_queue *q) { }
315 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
316 static inline void blk_throtl_exit(struct request_queue *q) { }
317 static inline void blk_throtl_register_queue(struct request_queue *q) { }
318 #endif /* CONFIG_BLK_DEV_THROTTLING */
319 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
320 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
321 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
322 const char *page, size_t count);
323 extern void blk_throtl_bio_endio(struct bio *bio);
324 extern void blk_throtl_stat_add(struct request *rq, u64 time);
325 #else
326 static inline void blk_throtl_bio_endio(struct bio *bio) { }
327 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
328 #endif
329
330 #ifdef CONFIG_BOUNCE
331 extern int init_emergency_isa_pool(void);
332 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
333 #else
334 static inline int init_emergency_isa_pool(void)
335 {
336 return 0;
337 }
338 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
339 {
340 }
341 #endif /* CONFIG_BOUNCE */
342
343 extern void blk_drain_queue(struct request_queue *q);
344
345 #endif /* BLK_INTERNAL_H */