]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - block/blk.h
PCI: aardvark: Enable MSI-X support
[mirror_ubuntu-jammy-kernel.git] / block / blk.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
8324aa91
JA
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
a73f730d 5#include <linux/idr.h>
f70ced09 6#include <linux/blk-mq.h>
c6a564ff 7#include <linux/part_stat.h>
a892c8d5 8#include <linux/blk-crypto.h>
9bb33f24 9#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
c39ae60d 10#include <xen/xen.h>
a892c8d5 11#include "blk-crypto-internal.h"
f70ced09 12#include "blk-mq.h"
c3e22192 13#include "blk-mq-sched.h"
a73f730d 14
0d2602ca
JA
15/* Max future timer expiry for timeouts */
16#define BLK_MAX_TIMEOUT (5 * HZ)
17
18fbda91 18extern struct dentry *blk_debugfs_root;
18fbda91 19
7c94e1c1 20struct blk_flush_queue {
7c94e1c1
ML
21 unsigned int flush_pending_idx:1;
22 unsigned int flush_running_idx:1;
8d699663 23 blk_status_t rq_status;
7c94e1c1
ML
24 unsigned long flush_pending_since;
25 struct list_head flush_queue[2];
26 struct list_head flush_data_in_flight;
27 struct request *flush_rq;
0048b483 28
7c94e1c1
ML
29 spinlock_t mq_flush_lock;
30};
31
8324aa91
JA
32extern struct kmem_cache *blk_requestq_cachep;
33extern struct kobj_type blk_queue_ktype;
a73f730d 34extern struct ida blk_queue_ida;
8324aa91 35
f9afca4d
JA
36static inline struct blk_flush_queue *
37blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
7c94e1c1 38{
8ccdf4a3 39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
7c94e1c1
ML
40}
41
09ac46c4
TH
42static inline void __blk_get_queue(struct request_queue *q)
43{
44 kobject_get(&q->kobj);
45}
46
a9ed27a7 47bool is_flush_rq(struct request *req);
8d699663 48
754a1572
GJ
49struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
50 gfp_t flags);
f70ced09 51void blk_free_flush_queue(struct blk_flush_queue *q);
f3552655 52
3ef28e83 53void blk_freeze_queue(struct request_queue *q);
aec89dc5 54void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
8e141f9e 55void blk_queue_start_drain(struct request_queue *q);
3ef28e83 56
dc0b8a57 57#define BIO_INLINE_VECS 4
7a800a20
CH
58struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
59 gfp_t gfp_mask);
60void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
eec716a1 61
3dccdae5
CH
62static inline bool biovec_phys_mergeable(struct request_queue *q,
63 struct bio_vec *vec1, struct bio_vec *vec2)
6a9f5f24 64{
3dccdae5 65 unsigned long mask = queue_segment_boundary(q);
6e768461
CH
66 phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
67 phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
3dccdae5
CH
68
69 if (addr1 + vec1->bv_len != addr2)
6a9f5f24 70 return false;
0383ad43 71 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
6a9f5f24 72 return false;
3dccdae5
CH
73 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
74 return false;
6a9f5f24
CH
75 return true;
76}
77
27ca1d4e
CH
78static inline bool __bvec_gap_to_prev(struct request_queue *q,
79 struct bio_vec *bprv, unsigned int offset)
80{
df376b2e 81 return (offset & queue_virt_boundary(q)) ||
27ca1d4e
CH
82 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
83}
84
85/*
86 * Check if adding a bio_vec after bprv with offset would create a gap in
87 * the SG list. Most drivers don't care about this, but some do.
88 */
89static inline bool bvec_gap_to_prev(struct request_queue *q,
90 struct bio_vec *bprv, unsigned int offset)
91{
92 if (!queue_virt_boundary(q))
93 return false;
94 return __bvec_gap_to_prev(q, bprv, offset);
95}
96
5a48fc14
DW
97#ifdef CONFIG_BLK_DEV_INTEGRITY
98void blk_flush_integrity(void);
7c20f116 99bool __bio_integrity_endio(struct bio *);
ece841ab 100void bio_integrity_free(struct bio *bio);
7c20f116
CH
101static inline bool bio_integrity_endio(struct bio *bio)
102{
103 if (bio_integrity(bio))
104 return __bio_integrity_endio(bio);
105 return true;
106}
43b729bf 107
92cf2fd1
CH
108bool blk_integrity_merge_rq(struct request_queue *, struct request *,
109 struct request *);
d59da419
CH
110bool blk_integrity_merge_bio(struct request_queue *, struct request *,
111 struct bio *);
92cf2fd1 112
43b729bf
CH
113static inline bool integrity_req_gap_back_merge(struct request *req,
114 struct bio *next)
115{
116 struct bio_integrity_payload *bip = bio_integrity(req->bio);
117 struct bio_integrity_payload *bip_next = bio_integrity(next);
118
119 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
120 bip_next->bip_vec[0].bv_offset);
121}
122
123static inline bool integrity_req_gap_front_merge(struct request *req,
124 struct bio *bio)
125{
126 struct bio_integrity_payload *bip = bio_integrity(bio);
127 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
128
129 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
130 bip_next->bip_vec[0].bv_offset);
131}
581e2600 132
614310c9 133int blk_integrity_add(struct gendisk *disk);
581e2600 134void blk_integrity_del(struct gendisk *);
43b729bf 135#else /* CONFIG_BLK_DEV_INTEGRITY */
92cf2fd1
CH
136static inline bool blk_integrity_merge_rq(struct request_queue *rq,
137 struct request *r1, struct request *r2)
138{
139 return true;
140}
d59da419
CH
141static inline bool blk_integrity_merge_bio(struct request_queue *rq,
142 struct request *r, struct bio *b)
143{
144 return true;
145}
43b729bf
CH
146static inline bool integrity_req_gap_back_merge(struct request *req,
147 struct bio *next)
148{
149 return false;
150}
151static inline bool integrity_req_gap_front_merge(struct request *req,
152 struct bio *bio)
153{
154 return false;
155}
156
5a48fc14
DW
157static inline void blk_flush_integrity(void)
158{
159}
7c20f116
CH
160static inline bool bio_integrity_endio(struct bio *bio)
161{
162 return true;
163}
ece841ab
JT
164static inline void bio_integrity_free(struct bio *bio)
165{
166}
614310c9 167static inline int blk_integrity_add(struct gendisk *disk)
581e2600 168{
614310c9 169 return 0;
581e2600
CH
170}
171static inline void blk_integrity_del(struct gendisk *disk)
172{
173}
43b729bf 174#endif /* CONFIG_BLK_DEV_INTEGRITY */
8324aa91 175
0d2602ca 176unsigned long blk_rq_timeout(unsigned long timeout);
87ee7b11 177void blk_add_timer(struct request *req);
320ae51f 178
320ae51f 179bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
14ccb66b 180 unsigned int nr_segs, struct request **same_queue_rq);
bdc6a287
BW
181bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
182 struct bio *bio, unsigned int nr_segs);
320ae51f 183
b5af37ab 184void blk_account_io_start(struct request *req);
522a7775 185void blk_account_io_done(struct request *req, u64 now);
320ae51f 186
316c6c88
JA
187/*
188 * Plug flush limits
189 */
190#define BLK_MAX_REQUEST_COUNT 32
191#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
192
158dbda0
TH
193/*
194 * Internal elevator interface
195 */
e8064021 196#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
158dbda0 197
ae1b1539 198void blk_insert_flush(struct request *rq);
dd831006 199
d48ece20
JW
200int elevator_switch_mq(struct request_queue *q,
201 struct elevator_type *new_e);
c3e22192 202void __elevator_exit(struct request_queue *, struct elevator_queue *);
cecf5d87 203int elv_register_queue(struct request_queue *q, bool uevent);
83d016ac
BVA
204void elv_unregister_queue(struct request_queue *q);
205
c3e22192
ML
206static inline void elevator_exit(struct request_queue *q,
207 struct elevator_queue *e)
208{
284b94be
ML
209 lockdep_assert_held(&q->sysfs_lock);
210
c3e22192
ML
211 blk_mq_sched_free_requests(q);
212 __elevator_exit(q, e);
213}
214
3ad5cee5
CH
215ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
216 char *buf);
217ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
218 char *buf);
219ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
220 char *buf);
221ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
222 char *buf);
223ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
224 const char *buf, size_t count);
581d4e28
JA
225ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
226ssize_t part_timeout_store(struct device *, struct device_attribute *,
227 const char *, size_t);
581d4e28 228
f695ca38 229void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
14ccb66b
CH
230int ll_back_merge_fn(struct request *req, struct bio *bio,
231 unsigned int nr_segs);
fd2ef39c 232bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
5e84ea3a 233 struct request *next);
e9cd19c0 234unsigned int blk_recalc_rq_segments(struct request *rq);
80a761fd 235void blk_rq_set_mixed_merge(struct request *rq);
050c8ea8 236bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
34fe7c05 237enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
d6d48196 238
ff88972c
AB
239int blk_dev_init(void);
240
c2553b58
JA
241/*
242 * Contribute to IO statistics IFF:
243 *
244 * a) it's attached to a gendisk, and
48d9b0d4 245 * b) the queue had IO stats enabled when this request was started
c2553b58 246 */
599d067d 247static inline bool blk_do_io_stat(struct request *rq)
fb8ec18c 248{
48d9b0d4 249 return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT);
fb8ec18c
JA
250}
251
6cf7677f
CH
252static inline void req_set_nomerge(struct request_queue *q, struct request *req)
253{
254 req->cmd_flags |= REQ_NOMERGE;
255 if (req == q->last_merge)
256 q->last_merge = NULL;
257}
258
1adfc5e4
ML
259/*
260 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
261 * is defined as 'unsigned int', meantime it has to aligned to with logical
262 * block size which is the minimum accepted unit by hardware.
263 */
264static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
265{
266 return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
267}
268
9b15d109
CL
269/*
270 * The max bio size which is aligned to q->limits.discard_granularity. This
271 * is a hint to split large discard bio in generic block layer, then if device
272 * driver needs to split the discard bio into smaller ones, their bi_size can
273 * be very probably and easily aligned to discard_granularity of the device's
274 * queue.
275 */
276static inline unsigned int bio_aligned_discard_max_sectors(
277 struct request_queue *q)
278{
279 return round_down(UINT_MAX, q->limits.discard_granularity) >>
280 SECTOR_SHIFT;
281}
282
f2dbd76a
TH
283/*
284 * Internal io_context interface
285 */
286void get_io_context(struct io_context *ioc);
47fdd4ca 287struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
24acfc34
TH
288struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
289 gfp_t gfp_mask);
7e5a8794 290void ioc_clear_queue(struct request_queue *q);
f2dbd76a 291
24acfc34 292int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
f2dbd76a 293
f2dbd76a
TH
294/*
295 * Internal throttling interface
296 */
bc9fcbf9 297#ifdef CONFIG_BLK_DEV_THROTTLING
bc9fcbf9
TH
298extern int blk_throtl_init(struct request_queue *q);
299extern void blk_throtl_exit(struct request_queue *q);
d61fcfa4 300extern void blk_throtl_register_queue(struct request_queue *q);
4f1e9630 301extern void blk_throtl_charge_bio_split(struct bio *bio);
db18a53e 302bool blk_throtl_bio(struct bio *bio);
bc9fcbf9 303#else /* CONFIG_BLK_DEV_THROTTLING */
bc9fcbf9
TH
304static inline int blk_throtl_init(struct request_queue *q) { return 0; }
305static inline void blk_throtl_exit(struct request_queue *q) { }
d61fcfa4 306static inline void blk_throtl_register_queue(struct request_queue *q) { }
4f1e9630 307static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
db18a53e 308static inline bool blk_throtl_bio(struct bio *bio) { return false; }
bc9fcbf9 309#endif /* CONFIG_BLK_DEV_THROTTLING */
297e3d85
SL
310#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
311extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
312extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
313 const char *page, size_t count);
9e234eea 314extern void blk_throtl_bio_endio(struct bio *bio);
b9147dd1 315extern void blk_throtl_stat_add(struct request *rq, u64 time);
9e234eea
SL
316#else
317static inline void blk_throtl_bio_endio(struct bio *bio) { }
b9147dd1 318static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
297e3d85 319#endif
bc9fcbf9 320
9bb33f24
CH
321void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
322
323static inline bool blk_queue_may_bounce(struct request_queue *q)
324{
325 return IS_ENABLED(CONFIG_BOUNCE) &&
326 q->limits.bounce == BLK_BOUNCE_HIGH &&
327 max_low_pfn >= max_pfn;
328}
329
3bce016a
CH
330static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
331{
9bb33f24
CH
332 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
333 __blk_queue_bounce(q, bio);
3bce016a 334}
3bce016a 335
d7067512
JB
336#ifdef CONFIG_BLK_CGROUP_IOLATENCY
337extern int blk_iolatency_init(struct request_queue *q);
338#else
339static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
340#endif
341
a2d6b3a2
DLM
342struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
343
bf505456
DLM
344#ifdef CONFIG_BLK_DEV_ZONED
345void blk_queue_free_zone_bitmaps(struct request_queue *q);
508aebb8 346void blk_queue_clear_zone_settings(struct request_queue *q);
bf505456
DLM
347#else
348static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
508aebb8 349static inline void blk_queue_clear_zone_settings(struct request_queue *q) {}
bf505456
DLM
350#endif
351
7c3f828b
CH
352int blk_alloc_ext_minor(void);
353void blk_free_ext_minor(unsigned int minor);
581e2600
CH
354#define ADDPART_FLAG_NONE 0
355#define ADDPART_FLAG_RAID 1
356#define ADDPART_FLAG_WHOLEDISK 2
7f6be376
CH
357int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
358 sector_t length);
926fbb16 359int bdev_del_partition(struct gendisk *disk, int partno);
3d2e7989
CH
360int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
361 sector_t length);
581e2600 362
e4581105 363int bio_add_hw_page(struct request_queue *q, struct bio *bio,
130879f1 364 struct page *page, unsigned int len, unsigned int offset,
e4581105 365 unsigned int max_sectors, bool *same_page);
130879f1 366
da7ba729
CH
367struct request_queue *blk_alloc_queue(int node_id);
368
92e7755e 369int disk_alloc_events(struct gendisk *disk);
d5870edf
CH
370void disk_add_events(struct gendisk *disk);
371void disk_del_events(struct gendisk *disk);
372void disk_release_events(struct gendisk *disk);
2bc8cda5
CH
373extern struct device_attribute dev_attr_events;
374extern struct device_attribute dev_attr_events_async;
375extern struct device_attribute dev_attr_events_poll_msecs;
d5870edf 376
270a1c91
JA
377static inline void bio_clear_hipri(struct bio *bio)
378{
379 /* can't support alloc cache if we turn off polling */
380 bio_clear_flag(bio, BIO_PERCPU_CACHE);
381 bio->bi_opf &= ~REQ_HIPRI;
382}
383
cd82cca7
CH
384extern const struct address_space_operations def_blk_aops;
385
bc9fcbf9 386#endif /* BLK_INTERNAL_H */