]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-rq-qos.h
1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10 #include <linux/blk-mq.h>
12 #include "blk-mq-debugfs.h"
14 struct blk_mq_debugfs_attr
;
24 wait_queue_head_t wait
;
29 struct rq_qos_ops
*ops
;
30 struct request_queue
*q
;
33 #ifdef CONFIG_BLK_DEBUG_FS
34 struct dentry
*debugfs_dir
;
39 void (*throttle
)(struct rq_qos
*, struct bio
*);
40 void (*track
)(struct rq_qos
*, struct request
*, struct bio
*);
41 void (*merge
)(struct rq_qos
*, struct request
*, struct bio
*);
42 void (*issue
)(struct rq_qos
*, struct request
*);
43 void (*requeue
)(struct rq_qos
*, struct request
*);
44 void (*done
)(struct rq_qos
*, struct request
*);
45 void (*done_bio
)(struct rq_qos
*, struct bio
*);
46 void (*cleanup
)(struct rq_qos
*, struct bio
*);
47 void (*queue_depth_changed
)(struct rq_qos
*);
48 void (*exit
)(struct rq_qos
*);
49 const struct blk_mq_debugfs_attr
*debugfs_attrs
;
53 unsigned int max_depth
;
58 unsigned int queue_depth
;
59 unsigned int default_depth
;
62 static inline struct rq_qos
*rq_qos_id(struct request_queue
*q
,
66 for (rqos
= q
->rq_qos
; rqos
; rqos
= rqos
->next
) {
73 static inline struct rq_qos
*wbt_rq_qos(struct request_queue
*q
)
75 return rq_qos_id(q
, RQ_QOS_WBT
);
78 static inline struct rq_qos
*blkcg_rq_qos(struct request_queue
*q
)
80 return rq_qos_id(q
, RQ_QOS_LATENCY
);
83 static inline void rq_wait_init(struct rq_wait
*rq_wait
)
85 atomic_set(&rq_wait
->inflight
, 0);
86 init_waitqueue_head(&rq_wait
->wait
);
89 static inline int rq_qos_add(struct request_queue
*q
, struct rq_qos
*rqos
)
92 * No IO can be in-flight when adding rqos, so freeze queue, which
93 * is fine since we only support rq_qos for blk-mq queue.
95 * Reuse ->queue_lock for protecting against other concurrent
96 * rq_qos adding/deleting
98 blk_mq_freeze_queue(q
);
100 spin_lock_irq(&q
->queue_lock
);
101 if (rq_qos_id(q
, rqos
->id
))
103 rqos
->next
= q
->rq_qos
;
105 spin_unlock_irq(&q
->queue_lock
);
107 blk_mq_unfreeze_queue(q
);
109 if (rqos
->ops
->debugfs_attrs
)
110 blk_mq_debugfs_register_rqos(rqos
);
114 spin_unlock_irq(&q
->queue_lock
);
115 blk_mq_unfreeze_queue(q
);
120 static inline void rq_qos_del(struct request_queue
*q
, struct rq_qos
*rqos
)
125 * See comment in rq_qos_add() about freezing queue & using
128 blk_mq_freeze_queue(q
);
130 spin_lock_irq(&q
->queue_lock
);
131 for (cur
= &q
->rq_qos
; *cur
; cur
= &(*cur
)->next
) {
137 spin_unlock_irq(&q
->queue_lock
);
139 blk_mq_unfreeze_queue(q
);
141 blk_mq_debugfs_unregister_rqos(rqos
);
144 typedef bool (acquire_inflight_cb_t
)(struct rq_wait
*rqw
, void *private_data
);
145 typedef void (cleanup_cb_t
)(struct rq_wait
*rqw
, void *private_data
);
147 void rq_qos_wait(struct rq_wait
*rqw
, void *private_data
,
148 acquire_inflight_cb_t
*acquire_inflight_cb
,
149 cleanup_cb_t
*cleanup_cb
);
150 bool rq_wait_inc_below(struct rq_wait
*rq_wait
, unsigned int limit
);
151 bool rq_depth_scale_up(struct rq_depth
*rqd
);
152 bool rq_depth_scale_down(struct rq_depth
*rqd
, bool hard_throttle
);
153 bool rq_depth_calc_max_depth(struct rq_depth
*rqd
);
155 void __rq_qos_cleanup(struct rq_qos
*rqos
, struct bio
*bio
);
156 void __rq_qos_done(struct rq_qos
*rqos
, struct request
*rq
);
157 void __rq_qos_issue(struct rq_qos
*rqos
, struct request
*rq
);
158 void __rq_qos_requeue(struct rq_qos
*rqos
, struct request
*rq
);
159 void __rq_qos_throttle(struct rq_qos
*rqos
, struct bio
*bio
);
160 void __rq_qos_track(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
);
161 void __rq_qos_merge(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
);
162 void __rq_qos_done_bio(struct rq_qos
*rqos
, struct bio
*bio
);
163 void __rq_qos_queue_depth_changed(struct rq_qos
*rqos
);
165 static inline void rq_qos_cleanup(struct request_queue
*q
, struct bio
*bio
)
168 __rq_qos_cleanup(q
->rq_qos
, bio
);
171 static inline void rq_qos_done(struct request_queue
*q
, struct request
*rq
)
174 __rq_qos_done(q
->rq_qos
, rq
);
177 static inline void rq_qos_issue(struct request_queue
*q
, struct request
*rq
)
180 __rq_qos_issue(q
->rq_qos
, rq
);
183 static inline void rq_qos_requeue(struct request_queue
*q
, struct request
*rq
)
186 __rq_qos_requeue(q
->rq_qos
, rq
);
189 static inline void rq_qos_done_bio(struct bio
*bio
)
191 if (bio
->bi_bdev
&& (bio_flagged(bio
, BIO_QOS_THROTTLED
) ||
192 bio_flagged(bio
, BIO_QOS_MERGED
))) {
193 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
195 __rq_qos_done_bio(q
->rq_qos
, bio
);
199 static inline void rq_qos_throttle(struct request_queue
*q
, struct bio
*bio
)
202 bio_set_flag(bio
, BIO_QOS_THROTTLED
);
203 __rq_qos_throttle(q
->rq_qos
, bio
);
207 static inline void rq_qos_track(struct request_queue
*q
, struct request
*rq
,
211 __rq_qos_track(q
->rq_qos
, rq
, bio
);
214 static inline void rq_qos_merge(struct request_queue
*q
, struct request
*rq
,
218 bio_set_flag(bio
, BIO_QOS_MERGED
);
219 __rq_qos_merge(q
->rq_qos
, rq
, bio
);
223 static inline void rq_qos_queue_depth_changed(struct request_queue
*q
)
226 __rq_qos_queue_depth_changed(q
->rq_qos
);
229 void rq_qos_exit(struct request_queue
*);