]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - block/blk-rq-qos.h
1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/kernel.h>
6 #include <linux/blkdev.h>
7 #include <linux/blk_types.h>
8 #include <linux/atomic.h>
9 #include <linux/wait.h>
10 #include <linux/blk-mq.h>
12 #include "blk-mq-debugfs.h"
14 struct blk_mq_debugfs_attr
;
24 wait_queue_head_t wait
;
29 struct rq_qos_ops
*ops
;
30 struct request_queue
*q
;
33 #ifdef CONFIG_BLK_DEBUG_FS
34 struct dentry
*debugfs_dir
;
39 void (*throttle
)(struct rq_qos
*, struct bio
*);
40 void (*track
)(struct rq_qos
*, struct request
*, struct bio
*);
41 void (*merge
)(struct rq_qos
*, struct request
*, struct bio
*);
42 void (*issue
)(struct rq_qos
*, struct request
*);
43 void (*requeue
)(struct rq_qos
*, struct request
*);
44 void (*done
)(struct rq_qos
*, struct request
*);
45 void (*done_bio
)(struct rq_qos
*, struct bio
*);
46 void (*cleanup
)(struct rq_qos
*, struct bio
*);
47 void (*queue_depth_changed
)(struct rq_qos
*);
48 void (*exit
)(struct rq_qos
*);
49 const struct blk_mq_debugfs_attr
*debugfs_attrs
;
53 unsigned int max_depth
;
58 unsigned int queue_depth
;
59 unsigned int default_depth
;
62 static inline struct rq_qos
*rq_qos_id(struct request_queue
*q
,
66 for (rqos
= q
->rq_qos
; rqos
; rqos
= rqos
->next
) {
73 static inline struct rq_qos
*wbt_rq_qos(struct request_queue
*q
)
75 return rq_qos_id(q
, RQ_QOS_WBT
);
78 static inline struct rq_qos
*blkcg_rq_qos(struct request_queue
*q
)
80 return rq_qos_id(q
, RQ_QOS_LATENCY
);
83 static inline void rq_wait_init(struct rq_wait
*rq_wait
)
85 atomic_set(&rq_wait
->inflight
, 0);
86 init_waitqueue_head(&rq_wait
->wait
);
89 static inline void rq_qos_add(struct request_queue
*q
, struct rq_qos
*rqos
)
92 * No IO can be in-flight when adding rqos, so freeze queue, which
93 * is fine since we only support rq_qos for blk-mq queue.
95 * Reuse ->queue_lock for protecting against other concurrent
96 * rq_qos adding/deleting
98 blk_mq_freeze_queue(q
);
100 spin_lock_irq(&q
->queue_lock
);
101 rqos
->next
= q
->rq_qos
;
103 spin_unlock_irq(&q
->queue_lock
);
105 blk_mq_unfreeze_queue(q
);
107 if (rqos
->ops
->debugfs_attrs
)
108 blk_mq_debugfs_register_rqos(rqos
);
111 static inline void rq_qos_del(struct request_queue
*q
, struct rq_qos
*rqos
)
116 * See comment in rq_qos_add() about freezing queue & using
119 blk_mq_freeze_queue(q
);
121 spin_lock_irq(&q
->queue_lock
);
122 for (cur
= &q
->rq_qos
; *cur
; cur
= &(*cur
)->next
) {
128 spin_unlock_irq(&q
->queue_lock
);
130 blk_mq_unfreeze_queue(q
);
132 blk_mq_debugfs_unregister_rqos(rqos
);
135 typedef bool (acquire_inflight_cb_t
)(struct rq_wait
*rqw
, void *private_data
);
136 typedef void (cleanup_cb_t
)(struct rq_wait
*rqw
, void *private_data
);
138 void rq_qos_wait(struct rq_wait
*rqw
, void *private_data
,
139 acquire_inflight_cb_t
*acquire_inflight_cb
,
140 cleanup_cb_t
*cleanup_cb
);
141 bool rq_wait_inc_below(struct rq_wait
*rq_wait
, unsigned int limit
);
142 bool rq_depth_scale_up(struct rq_depth
*rqd
);
143 bool rq_depth_scale_down(struct rq_depth
*rqd
, bool hard_throttle
);
144 bool rq_depth_calc_max_depth(struct rq_depth
*rqd
);
146 void __rq_qos_cleanup(struct rq_qos
*rqos
, struct bio
*bio
);
147 void __rq_qos_done(struct rq_qos
*rqos
, struct request
*rq
);
148 void __rq_qos_issue(struct rq_qos
*rqos
, struct request
*rq
);
149 void __rq_qos_requeue(struct rq_qos
*rqos
, struct request
*rq
);
150 void __rq_qos_throttle(struct rq_qos
*rqos
, struct bio
*bio
);
151 void __rq_qos_track(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
);
152 void __rq_qos_merge(struct rq_qos
*rqos
, struct request
*rq
, struct bio
*bio
);
153 void __rq_qos_done_bio(struct rq_qos
*rqos
, struct bio
*bio
);
154 void __rq_qos_queue_depth_changed(struct rq_qos
*rqos
);
156 static inline void rq_qos_cleanup(struct request_queue
*q
, struct bio
*bio
)
159 __rq_qos_cleanup(q
->rq_qos
, bio
);
162 static inline void rq_qos_done(struct request_queue
*q
, struct request
*rq
)
165 __rq_qos_done(q
->rq_qos
, rq
);
168 static inline void rq_qos_issue(struct request_queue
*q
, struct request
*rq
)
171 __rq_qos_issue(q
->rq_qos
, rq
);
174 static inline void rq_qos_requeue(struct request_queue
*q
, struct request
*rq
)
177 __rq_qos_requeue(q
->rq_qos
, rq
);
180 static inline void rq_qos_done_bio(struct request_queue
*q
, struct bio
*bio
)
183 __rq_qos_done_bio(q
->rq_qos
, bio
);
186 static inline void rq_qos_throttle(struct request_queue
*q
, struct bio
*bio
)
189 * BIO_TRACKED lets controllers know that a bio went through the
190 * normal rq_qos path.
192 bio_set_flag(bio
, BIO_TRACKED
);
194 __rq_qos_throttle(q
->rq_qos
, bio
);
197 static inline void rq_qos_track(struct request_queue
*q
, struct request
*rq
,
201 __rq_qos_track(q
->rq_qos
, rq
, bio
);
204 static inline void rq_qos_merge(struct request_queue
*q
, struct request
*rq
,
208 __rq_qos_merge(q
->rq_qos
, rq
, bio
);
211 static inline void rq_qos_queue_depth_changed(struct request_queue
*q
)
214 __rq_qos_queue_depth_changed(q
->rq_qos
);
217 void rq_qos_exit(struct request_queue
*);