]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - block/blk.h
block: remove set but not used variable 'et'
[mirror_ubuntu-eoan-kernel.git] / block / blk.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef BLK_INTERNAL_H
3 #define BLK_INTERNAL_H
4
5 #include <linux/idr.h>
6 #include <linux/blk-mq.h>
7 #include <xen/xen.h>
8 #include "blk-mq.h"
9
10 /* Max future timer expiry for timeouts */
11 #define BLK_MAX_TIMEOUT (5 * HZ)
12
13 #ifdef CONFIG_DEBUG_FS
14 extern struct dentry *blk_debugfs_root;
15 #endif
16
17 struct blk_flush_queue {
18 unsigned int flush_queue_delayed:1;
19 unsigned int flush_pending_idx:1;
20 unsigned int flush_running_idx:1;
21 unsigned long flush_pending_since;
22 struct list_head flush_queue[2];
23 struct list_head flush_data_in_flight;
24 struct request *flush_rq;
25
26 /*
27 * flush_rq shares tag with this rq, both can't be active
28 * at the same time
29 */
30 struct request *orig_rq;
31 spinlock_t mq_flush_lock;
32 };
33
34 extern struct kmem_cache *blk_requestq_cachep;
35 extern struct kobj_type blk_queue_ktype;
36 extern struct ida blk_queue_ida;
37
38 /*
39 * @q->queue_lock is set while a queue is being initialized. Since we know
40 * that no other threads access the queue object before @q->queue_lock has
41 * been set, it is safe to manipulate queue flags without holding the
42 * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
43 * blk_init_allocated_queue().
44 */
45 static inline void queue_lockdep_assert_held(struct request_queue *q)
46 {
47 if (q->queue_lock)
48 lockdep_assert_held(q->queue_lock);
49 }
50
51 static inline void queue_flag_set_unlocked(unsigned int flag,
52 struct request_queue *q)
53 {
54 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
55 kref_read(&q->kobj.kref))
56 lockdep_assert_held(q->queue_lock);
57 __set_bit(flag, &q->queue_flags);
58 }
59
60 static inline void queue_flag_clear_unlocked(unsigned int flag,
61 struct request_queue *q)
62 {
63 if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
64 kref_read(&q->kobj.kref))
65 lockdep_assert_held(q->queue_lock);
66 __clear_bit(flag, &q->queue_flags);
67 }
68
69 static inline int queue_flag_test_and_clear(unsigned int flag,
70 struct request_queue *q)
71 {
72 queue_lockdep_assert_held(q);
73
74 if (test_bit(flag, &q->queue_flags)) {
75 __clear_bit(flag, &q->queue_flags);
76 return 1;
77 }
78
79 return 0;
80 }
81
82 static inline int queue_flag_test_and_set(unsigned int flag,
83 struct request_queue *q)
84 {
85 queue_lockdep_assert_held(q);
86
87 if (!test_bit(flag, &q->queue_flags)) {
88 __set_bit(flag, &q->queue_flags);
89 return 0;
90 }
91
92 return 1;
93 }
94
95 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
96 {
97 queue_lockdep_assert_held(q);
98 __set_bit(flag, &q->queue_flags);
99 }
100
101 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
102 {
103 queue_lockdep_assert_held(q);
104 __clear_bit(flag, &q->queue_flags);
105 }
106
107 static inline struct blk_flush_queue *
108 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
109 {
110 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq;
111 }
112
113 static inline void __blk_get_queue(struct request_queue *q)
114 {
115 kobject_get(&q->kobj);
116 }
117
118 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
119 int node, int cmd_size, gfp_t flags);
120 void blk_free_flush_queue(struct blk_flush_queue *q);
121
122 void blk_exit_queue(struct request_queue *q);
123 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
124 struct bio *bio);
125 void blk_freeze_queue(struct request_queue *q);
126
127 static inline void blk_queue_enter_live(struct request_queue *q)
128 {
129 /*
130 * Given that running in generic_make_request() context
131 * guarantees that a live reference against q_usage_counter has
132 * been established, further references under that same context
133 * need not check that the queue has been frozen (marked dead).
134 */
135 percpu_ref_get(&q->q_usage_counter);
136 }
137
138 static inline bool biovec_phys_mergeable(struct request_queue *q,
139 struct bio_vec *vec1, struct bio_vec *vec2)
140 {
141 unsigned long mask = queue_segment_boundary(q);
142 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
143 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
144
145 if (addr1 + vec1->bv_len != addr2)
146 return false;
147 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
148 return false;
149 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
150 return false;
151 return true;
152 }
153
154 static inline bool __bvec_gap_to_prev(struct request_queue *q,
155 struct bio_vec *bprv, unsigned int offset)
156 {
157 return offset ||
158 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
159 }
160
161 /*
162 * Check if adding a bio_vec after bprv with offset would create a gap in
163 * the SG list. Most drivers don't care about this, but some do.
164 */
165 static inline bool bvec_gap_to_prev(struct request_queue *q,
166 struct bio_vec *bprv, unsigned int offset)
167 {
168 if (!queue_virt_boundary(q))
169 return false;
170 return __bvec_gap_to_prev(q, bprv, offset);
171 }
172
173 #ifdef CONFIG_BLK_DEV_INTEGRITY
174 void blk_flush_integrity(void);
175 bool __bio_integrity_endio(struct bio *);
176 static inline bool bio_integrity_endio(struct bio *bio)
177 {
178 if (bio_integrity(bio))
179 return __bio_integrity_endio(bio);
180 return true;
181 }
182
183 static inline bool integrity_req_gap_back_merge(struct request *req,
184 struct bio *next)
185 {
186 struct bio_integrity_payload *bip = bio_integrity(req->bio);
187 struct bio_integrity_payload *bip_next = bio_integrity(next);
188
189 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
190 bip_next->bip_vec[0].bv_offset);
191 }
192
193 static inline bool integrity_req_gap_front_merge(struct request *req,
194 struct bio *bio)
195 {
196 struct bio_integrity_payload *bip = bio_integrity(bio);
197 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
198
199 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
200 bip_next->bip_vec[0].bv_offset);
201 }
202 #else /* CONFIG_BLK_DEV_INTEGRITY */
203 static inline bool integrity_req_gap_back_merge(struct request *req,
204 struct bio *next)
205 {
206 return false;
207 }
208 static inline bool integrity_req_gap_front_merge(struct request *req,
209 struct bio *bio)
210 {
211 return false;
212 }
213
214 static inline void blk_flush_integrity(void)
215 {
216 }
217 static inline bool bio_integrity_endio(struct bio *bio)
218 {
219 return true;
220 }
221 #endif /* CONFIG_BLK_DEV_INTEGRITY */
222
223 unsigned long blk_rq_timeout(unsigned long timeout);
224 void blk_add_timer(struct request *req);
225
226 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
227 struct bio *bio);
228 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
229 struct bio *bio);
230 bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
231 struct bio *bio);
232 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
233 unsigned int *request_count,
234 struct request **same_queue_rq);
235 unsigned int blk_plug_queued_count(struct request_queue *q);
236
237 void blk_account_io_start(struct request *req, bool new_io);
238 void blk_account_io_completion(struct request *req, unsigned int bytes);
239 void blk_account_io_done(struct request *req, u64 now);
240
241 /*
242 * EH timer and IO completion will both attempt to 'grab' the request, make
243 * sure that only one of them succeeds. Steal the bottom bit of the
244 * __deadline field for this.
245 */
246 static inline int blk_mark_rq_complete(struct request *rq)
247 {
248 return test_and_set_bit(0, &rq->__deadline);
249 }
250
251 static inline void blk_clear_rq_complete(struct request *rq)
252 {
253 clear_bit(0, &rq->__deadline);
254 }
255
256 static inline bool blk_rq_is_complete(struct request *rq)
257 {
258 return test_bit(0, &rq->__deadline);
259 }
260
261 /*
262 * Internal elevator interface
263 */
264 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
265
266 void blk_insert_flush(struct request *rq);
267
268 int elevator_init_mq(struct request_queue *q);
269 int elevator_switch_mq(struct request_queue *q,
270 struct elevator_type *new_e);
271 void elevator_exit(struct request_queue *, struct elevator_queue *);
272 int elv_register_queue(struct request_queue *q);
273 void elv_unregister_queue(struct request_queue *q);
274
275 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
276
277 #ifdef CONFIG_FAIL_IO_TIMEOUT
278 int blk_should_fake_timeout(struct request_queue *);
279 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
280 ssize_t part_timeout_store(struct device *, struct device_attribute *,
281 const char *, size_t);
282 #else
283 static inline int blk_should_fake_timeout(struct request_queue *q)
284 {
285 return 0;
286 }
287 #endif
288
289 int ll_back_merge_fn(struct request_queue *q, struct request *req,
290 struct bio *bio);
291 int ll_front_merge_fn(struct request_queue *q, struct request *req,
292 struct bio *bio);
293 struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
294 struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
295 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
296 struct request *next);
297 void blk_recalc_rq_segments(struct request *rq);
298 void blk_rq_set_mixed_merge(struct request *rq);
299 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
300 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
301
302 int blk_dev_init(void);
303
304 /*
305 * Contribute to IO statistics IFF:
306 *
307 * a) it's attached to a gendisk, and
308 * b) the queue had IO stats enabled when this request was started, and
309 * c) it's a file system request
310 */
311 static inline bool blk_do_io_stat(struct request *rq)
312 {
313 return rq->rq_disk &&
314 (rq->rq_flags & RQF_IO_STAT) &&
315 !blk_rq_is_passthrough(rq);
316 }
317
318 static inline void req_set_nomerge(struct request_queue *q, struct request *req)
319 {
320 req->cmd_flags |= REQ_NOMERGE;
321 if (req == q->last_merge)
322 q->last_merge = NULL;
323 }
324
325 /*
326 * Steal a bit from this field for legacy IO path atomic IO marking. Note that
327 * setting the deadline clears the bottom bit, potentially clearing the
328 * completed bit. The user has to be OK with this (current ones are fine).
329 */
330 static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
331 {
332 rq->__deadline = time & ~0x1UL;
333 }
334
335 static inline unsigned long blk_rq_deadline(struct request *rq)
336 {
337 return rq->__deadline & ~0x1UL;
338 }
339
340 /*
341 * Internal io_context interface
342 */
343 void get_io_context(struct io_context *ioc);
344 struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
345 struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
346 gfp_t gfp_mask);
347 void ioc_clear_queue(struct request_queue *q);
348
349 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
350
351 /**
352 * rq_ioc - determine io_context for request allocation
353 * @bio: request being allocated is for this bio (can be %NULL)
354 *
355 * Determine io_context to use for request allocation for @bio. May return
356 * %NULL if %current->io_context doesn't exist.
357 */
358 static inline struct io_context *rq_ioc(struct bio *bio)
359 {
360 #ifdef CONFIG_BLK_CGROUP
361 if (bio && bio->bi_ioc)
362 return bio->bi_ioc;
363 #endif
364 return current->io_context;
365 }
366
367 /**
368 * create_io_context - try to create task->io_context
369 * @gfp_mask: allocation mask
370 * @node: allocation node
371 *
372 * If %current->io_context is %NULL, allocate a new io_context and install
373 * it. Returns the current %current->io_context which may be %NULL if
374 * allocation failed.
375 *
376 * Note that this function can't be called with IRQ disabled because
377 * task_lock which protects %current->io_context is IRQ-unsafe.
378 */
379 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
380 {
381 WARN_ON_ONCE(irqs_disabled());
382 if (unlikely(!current->io_context))
383 create_task_io_context(current, gfp_mask, node);
384 return current->io_context;
385 }
386
387 /*
388 * Internal throttling interface
389 */
390 #ifdef CONFIG_BLK_DEV_THROTTLING
391 extern void blk_throtl_drain(struct request_queue *q);
392 extern int blk_throtl_init(struct request_queue *q);
393 extern void blk_throtl_exit(struct request_queue *q);
394 extern void blk_throtl_register_queue(struct request_queue *q);
395 #else /* CONFIG_BLK_DEV_THROTTLING */
396 static inline void blk_throtl_drain(struct request_queue *q) { }
397 static inline int blk_throtl_init(struct request_queue *q) { return 0; }
398 static inline void blk_throtl_exit(struct request_queue *q) { }
399 static inline void blk_throtl_register_queue(struct request_queue *q) { }
400 #endif /* CONFIG_BLK_DEV_THROTTLING */
401 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
402 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
403 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
404 const char *page, size_t count);
405 extern void blk_throtl_bio_endio(struct bio *bio);
406 extern void blk_throtl_stat_add(struct request *rq, u64 time);
407 #else
408 static inline void blk_throtl_bio_endio(struct bio *bio) { }
409 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
410 #endif
411
412 #ifdef CONFIG_BOUNCE
413 extern int init_emergency_isa_pool(void);
414 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
415 #else
416 static inline int init_emergency_isa_pool(void)
417 {
418 return 0;
419 }
420 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
421 {
422 }
423 #endif /* CONFIG_BOUNCE */
424
425 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
426 extern int blk_iolatency_init(struct request_queue *q);
427 #else
428 static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
429 #endif
430
431 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
432
433 #ifdef CONFIG_BLK_DEV_ZONED
434 void blk_queue_free_zone_bitmaps(struct request_queue *q);
435 #else
436 static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
437 #endif
438
439 #endif /* BLK_INTERNAL_H */