]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - block/blk.h
Merge tag 'riscv-for-linus-5.15-mw1' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / block / blk.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
8324aa91
JA
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
a73f730d 5#include <linux/idr.h>
f70ced09 6#include <linux/blk-mq.h>
c6a564ff 7#include <linux/part_stat.h>
a892c8d5 8#include <linux/blk-crypto.h>
9bb33f24 9#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
c39ae60d 10#include <xen/xen.h>
a892c8d5 11#include "blk-crypto-internal.h"
f70ced09 12#include "blk-mq.h"
c3e22192 13#include "blk-mq-sched.h"
a73f730d 14
0d2602ca
JA
15/* Max future timer expiry for timeouts */
16#define BLK_MAX_TIMEOUT (5 * HZ)
17
18fbda91 18extern struct dentry *blk_debugfs_root;
18fbda91 19
7c94e1c1 20struct blk_flush_queue {
7c94e1c1
ML
21 unsigned int flush_pending_idx:1;
22 unsigned int flush_running_idx:1;
8d699663 23 blk_status_t rq_status;
7c94e1c1
ML
24 unsigned long flush_pending_since;
25 struct list_head flush_queue[2];
26 struct list_head flush_data_in_flight;
27 struct request *flush_rq;
0048b483 28
7c94e1c1
ML
29 spinlock_t mq_flush_lock;
30};
31
8324aa91
JA
32extern struct kmem_cache *blk_requestq_cachep;
33extern struct kobj_type blk_queue_ktype;
a73f730d 34extern struct ida blk_queue_ida;
8324aa91 35
f9afca4d
JA
36static inline struct blk_flush_queue *
37blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
7c94e1c1 38{
8ccdf4a3 39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
7c94e1c1
ML
40}
41
09ac46c4
TH
42static inline void __blk_get_queue(struct request_queue *q)
43{
44 kobject_get(&q->kobj);
45}
46
a9ed27a7 47bool is_flush_rq(struct request *req);
8d699663 48
754a1572
GJ
49struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
50 gfp_t flags);
f70ced09 51void blk_free_flush_queue(struct blk_flush_queue *q);
f3552655 52
3ef28e83
DW
53void blk_freeze_queue(struct request_queue *q);
54
dc0b8a57 55#define BIO_INLINE_VECS 4
7a800a20
CH
56struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
57 gfp_t gfp_mask);
58void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
eec716a1 59
3dccdae5
CH
60static inline bool biovec_phys_mergeable(struct request_queue *q,
61 struct bio_vec *vec1, struct bio_vec *vec2)
6a9f5f24 62{
3dccdae5 63 unsigned long mask = queue_segment_boundary(q);
6e768461
CH
64 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
65 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
3dccdae5
CH
66
67 if (addr1 + vec1->bv_len != addr2)
6a9f5f24 68 return false;
0383ad43 69 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
6a9f5f24 70 return false;
3dccdae5
CH
71 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
72 return false;
6a9f5f24
CH
73 return true;
74}
75
27ca1d4e
CH
76static inline bool __bvec_gap_to_prev(struct request_queue *q,
77 struct bio_vec *bprv, unsigned int offset)
78{
df376b2e 79 return (offset & queue_virt_boundary(q)) ||
27ca1d4e
CH
80 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
81}
82
83/*
84 * Check if adding a bio_vec after bprv with offset would create a gap in
85 * the SG list. Most drivers don't care about this, but some do.
86 */
87static inline bool bvec_gap_to_prev(struct request_queue *q,
88 struct bio_vec *bprv, unsigned int offset)
89{
90 if (!queue_virt_boundary(q))
91 return false;
92 return __bvec_gap_to_prev(q, bprv, offset);
93}
94
5a48fc14
DW
95#ifdef CONFIG_BLK_DEV_INTEGRITY
96void blk_flush_integrity(void);
7c20f116 97bool __bio_integrity_endio(struct bio *);
ece841ab 98void bio_integrity_free(struct bio *bio);
7c20f116
CH
99static inline bool bio_integrity_endio(struct bio *bio)
100{
101 if (bio_integrity(bio))
102 return __bio_integrity_endio(bio);
103 return true;
104}
43b729bf 105
92cf2fd1
CH
106bool blk_integrity_merge_rq(struct request_queue *, struct request *,
107 struct request *);
d59da419
CH
108bool blk_integrity_merge_bio(struct request_queue *, struct request *,
109 struct bio *);
92cf2fd1 110
43b729bf
CH
111static inline bool integrity_req_gap_back_merge(struct request *req,
112 struct bio *next)
113{
114 struct bio_integrity_payload *bip = bio_integrity(req->bio);
115 struct bio_integrity_payload *bip_next = bio_integrity(next);
116
117 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
118 bip_next->bip_vec[0].bv_offset);
119}
120
121static inline bool integrity_req_gap_front_merge(struct request *req,
122 struct bio *bio)
123{
124 struct bio_integrity_payload *bip = bio_integrity(bio);
125 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
126
127 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
128 bip_next->bip_vec[0].bv_offset);
129}
581e2600 130
614310c9 131int blk_integrity_add(struct gendisk *disk);
581e2600 132void blk_integrity_del(struct gendisk *);
43b729bf 133#else /* CONFIG_BLK_DEV_INTEGRITY */
92cf2fd1
CH
134static inline bool blk_integrity_merge_rq(struct request_queue *rq,
135 struct request *r1, struct request *r2)
136{
137 return true;
138}
d59da419
CH
139static inline bool blk_integrity_merge_bio(struct request_queue *rq,
140 struct request *r, struct bio *b)
141{
142 return true;
143}
43b729bf
CH
144static inline bool integrity_req_gap_back_merge(struct request *req,
145 struct bio *next)
146{
147 return false;
148}
149static inline bool integrity_req_gap_front_merge(struct request *req,
150 struct bio *bio)
151{
152 return false;
153}
154
5a48fc14
DW
155static inline void blk_flush_integrity(void)
156{
157}
7c20f116
CH
158static inline bool bio_integrity_endio(struct bio *bio)
159{
160 return true;
161}
ece841ab
JT
162static inline void bio_integrity_free(struct bio *bio)
163{
164}
614310c9 165static inline int blk_integrity_add(struct gendisk *disk)
581e2600 166{
614310c9 167 return 0;
581e2600
CH
168}
169static inline void blk_integrity_del(struct gendisk *disk)
170{
171}
43b729bf 172#endif /* CONFIG_BLK_DEV_INTEGRITY */
8324aa91 173
0d2602ca 174unsigned long blk_rq_timeout(unsigned long timeout);
87ee7b11 175void blk_add_timer(struct request *req);
320ae51f 176
320ae51f 177bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
14ccb66b 178 unsigned int nr_segs, struct request **same_queue_rq);
bdc6a287
BW
179bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
180 struct bio *bio, unsigned int nr_segs);
320ae51f 181
b5af37ab 182void blk_account_io_start(struct request *req);
522a7775 183void blk_account_io_done(struct request *req, u64 now);
320ae51f 184
158dbda0
TH
185/*
186 * Internal elevator interface
187 */
e8064021 188#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
158dbda0 189
ae1b1539 190void blk_insert_flush(struct request *rq);
dd831006 191
d48ece20
JW
192int elevator_switch_mq(struct request_queue *q,
193 struct elevator_type *new_e);
c3e22192 194void __elevator_exit(struct request_queue *, struct elevator_queue *);
cecf5d87 195int elv_register_queue(struct request_queue *q, bool uevent);
83d016ac
BVA
196void elv_unregister_queue(struct request_queue *q);
197
c3e22192
ML
198static inline void elevator_exit(struct request_queue *q,
199 struct elevator_queue *e)
200{
284b94be
ML
201 lockdep_assert_held(&q->sysfs_lock);
202
c3e22192
ML
203 blk_mq_sched_free_requests(q);
204 __elevator_exit(q, e);
205}
206
3ad5cee5
CH
207ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
208 char *buf);
209ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
210 char *buf);
211ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
212 char *buf);
213ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
214 char *buf);
215ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
216 const char *buf, size_t count);
581d4e28
JA
217ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
218ssize_t part_timeout_store(struct device *, struct device_attribute *,
219 const char *, size_t);
581d4e28 220
f695ca38 221void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
14ccb66b
CH
222int ll_back_merge_fn(struct request *req, struct bio *bio,
223 unsigned int nr_segs);
fd2ef39c 224bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5e84ea3a 225 struct request *next);
e9cd19c0 226unsigned int blk_recalc_rq_segments(struct request *rq);
80a761fd 227void blk_rq_set_mixed_merge(struct request *rq);
050c8ea8 228bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
34fe7c05 229enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
d6d48196 230
ff88972c
AB
231int blk_dev_init(void);
232
c2553b58
JA
233/*
234 * Contribute to IO statistics IFF:
235 *
236 * a) it's attached to a gendisk, and
48d9b0d4 237 * b) the queue had IO stats enabled when this request was started
c2553b58 238 */
599d067d 239static inline bool blk_do_io_stat(struct request *rq)
fb8ec18c 240{
48d9b0d4 241 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
fb8ec18c
JA
242}
243
6cf7677f
CH
244static inline void req_set_nomerge(struct request_queue *q, struct request *req)
245{
246 req->cmd_flags |= REQ_NOMERGE;
247 if (req == q->last_merge)
248 q->last_merge = NULL;
249}
250
1adfc5e4
ML
251/*
252 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
253 * is defined as 'unsigned int', meantime it has to aligned to with logical
254 * block size which is the minimum accepted unit by hardware.
255 */
256static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
257{
258 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
259}
260
9b15d109
CL
261/*
262 * The max bio size which is aligned to q->limits.discard_granularity. This
263 * is a hint to split large discard bio in generic block layer, then if device
264 * driver needs to split the discard bio into smaller ones, their bi_size can
265 * be very probably and easily aligned to discard_granularity of the device's
266 * queue.
267 */
268static inline unsigned int bio_aligned_discard_max_sectors(
269 struct request_queue *q)
270{
271 return round_down(UINT_MAX, q->limits.discard_granularity) >>
272 SECTOR_SHIFT;
273}
274
f2dbd76a
TH
275/*
276 * Internal io_context interface
277 */
278void get_io_context(struct io_context *ioc);
47fdd4ca 279struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
24acfc34
TH
280struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
281 gfp_t gfp_mask);
7e5a8794 282void ioc_clear_queue(struct request_queue *q);
f2dbd76a 283
24acfc34 284int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
f2dbd76a 285
f2dbd76a
TH
286/*
287 * Internal throttling interface
288 */
bc9fcbf9 289#ifdef CONFIG_BLK_DEV_THROTTLING
bc9fcbf9
TH
290extern int blk_throtl_init(struct request_queue *q);
291extern void blk_throtl_exit(struct request_queue *q);
d61fcfa4 292extern void blk_throtl_register_queue(struct request_queue *q);
4f1e9630 293extern void blk_throtl_charge_bio_split(struct bio *bio);
db18a53e 294bool blk_throtl_bio(struct bio *bio);
bc9fcbf9 295#else /* CONFIG_BLK_DEV_THROTTLING */
bc9fcbf9
TH
296static inline int blk_throtl_init(struct request_queue *q) { return 0; }
297static inline void blk_throtl_exit(struct request_queue *q) { }
d61fcfa4 298static inline void blk_throtl_register_queue(struct request_queue *q) { }
4f1e9630 299static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
db18a53e 300static inline bool blk_throtl_bio(struct bio *bio) { return false; }
bc9fcbf9 301#endif /* CONFIG_BLK_DEV_THROTTLING */
297e3d85
SL
302#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
303extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
304extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
305 const char *page, size_t count);
9e234eea 306extern void blk_throtl_bio_endio(struct bio *bio);
b9147dd1 307extern void blk_throtl_stat_add(struct request *rq, u64 time);
9e234eea
SL
308#else
309static inline void blk_throtl_bio_endio(struct bio *bio) { }
b9147dd1 310static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
297e3d85 311#endif
bc9fcbf9 312
9bb33f24
CH
313void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
314
315static inline bool blk_queue_may_bounce(struct request_queue *q)
316{
317 return IS_ENABLED(CONFIG_BOUNCE) &&
318 q->limits.bounce == BLK_BOUNCE_HIGH &&
319 max_low_pfn >= max_pfn;
320}
321
3bce016a
CH
322static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
323{
9bb33f24
CH
324 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
325 __blk_queue_bounce(q, bio);
3bce016a 326}
3bce016a 327
d7067512
JB
328#ifdef CONFIG_BLK_CGROUP_IOLATENCY
329extern int blk_iolatency_init(struct request_queue *q);
330#else
331static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
332#endif
333
a2d6b3a2
DLM
334struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
335
bf505456
DLM
336#ifdef CONFIG_BLK_DEV_ZONED
337void blk_queue_free_zone_bitmaps(struct request_queue *q);
508aebb8 338void blk_queue_clear_zone_settings(struct request_queue *q);
bf505456
DLM
339#else
340static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
508aebb8 341static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
bf505456
DLM
342#endif
343
7c3f828b
CH
344int blk_alloc_ext_minor(void);
345void blk_free_ext_minor(unsigned int minor);
581e2600
CH
346#define ADDPART_FLAG_NONE 0
347#define ADDPART_FLAG_RAID 1
348#define ADDPART_FLAG_WHOLEDISK 2
7f6be376
CH
349int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
350 sector_t length);
926fbb16 351int bdev_del_partition(struct gendisk *disk, int partno);
3d2e7989
CH
352int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
353 sector_t length);
581e2600 354
e4581105 355int bio_add_hw_page(struct request_queue *q, struct bio *bio,
130879f1 356 struct page *page, unsigned int len, unsigned int offset,
e4581105 357 unsigned int max_sectors, bool *same_page);
130879f1 358
da7ba729
CH
359struct request_queue *blk_alloc_queue(int node_id);
360
92e7755e 361int disk_alloc_events(struct gendisk *disk);
d5870edf
CH
362void disk_add_events(struct gendisk *disk);
363void disk_del_events(struct gendisk *disk);
364void disk_release_events(struct gendisk *disk);
2bc8cda5
CH
365extern struct device_attribute dev_attr_events;
366extern struct device_attribute dev_attr_events_async;
367extern struct device_attribute dev_attr_events_poll_msecs;
d5870edf 368
270a1c91
JA
369static inline void bio_clear_hipri(struct bio *bio)
370{
371 /* can't support alloc cache if we turn off polling */
372 bio_clear_flag(bio, BIO_PERCPU_CACHE);
373 bio->bi_opf &= ~REQ_HIPRI;
374}
375
cd82cca7
CH
376extern const struct address_space_operations def_blk_aops;
377
bc9fcbf9 378#endif /* BLK_INTERNAL_H */