]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - block/blk-mq.h
block: remove set but not used variable 'et'
[mirror_ubuntu-eoan-kernel.git] / block / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
cf43e6be 5#include "blk-stat.h"
244c65a3 6#include "blk-mq-tag.h"
cf43e6be 7
24d2f903
CH
8struct blk_mq_tag_set;
9
fe644072
LW
10/**
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12 */
320ae51f
JA
13struct blk_mq_ctx {
14 struct {
15 spinlock_t lock;
16 struct list_head rq_list;
17 } ____cacheline_aligned_in_smp;
18
19 unsigned int cpu;
f31967f0 20 unsigned short index_hw[HCTX_MAX_TYPES];
320ae51f
JA
21
22 /* incremented at dispatch time */
23 unsigned long rq_dispatched[2];
24 unsigned long rq_merged;
25
26 /* incremented at completion time */
27 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
28
29 struct request_queue *queue;
30 struct kobject kobj;
4bb659b1 31} ____cacheline_aligned_in_smp;
320ae51f 32
780db207 33void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce 34void blk_mq_free_queue(struct request_queue *q);
e3a2b3f9 35int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 36void blk_mq_wake_waiters(struct request_queue *q);
de148297 37bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2c3ad667 38void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
8ab6bb9e 39bool blk_mq_get_driver_tag(struct request *rq);
b347689f
ML
40struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
41 struct blk_mq_ctx *start);
2c3ad667
JA
42
43/*
44 * Internal helpers for allocating/freeing the request map
45 */
cc71a6f4
JA
46void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
47 unsigned int hctx_idx);
48void blk_mq_free_rq_map(struct blk_mq_tags *tags);
49struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
50 unsigned int hctx_idx,
51 unsigned int nr_tags,
52 unsigned int reserved_tags);
53int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
54 unsigned int hctx_idx, unsigned int depth);
2c3ad667
JA
55
56/*
57 * Internal helpers for request insertion into sw queues
58 */
59void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
60 bool at_head);
b0850297 61void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
bd166ef1
JA
62void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
63 struct list_head *list);
320ae51f 64
396eaf21 65/* Used by blk_insert_cloned_request() to issue request directly */
c77ff7fd 66blk_status_t blk_mq_request_issue_directly(struct request *rq);
6ce3dd6e
ML
67void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
68 struct list_head *list);
396eaf21 69
320ae51f
JA
70/*
71 * CPU -> queue mappings
72 */
ed76e329 73extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
320ae51f 74
b3c661b1
JA
75/*
76 * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
77 * @q: request queue
78 * @hctx_type: the hctx type index
79 * @cpu: CPU
80 */
81static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
82 unsigned int hctx_type,
83 unsigned int cpu)
7d7e0f90 84{
a8908939
JA
85 struct blk_mq_tag_set *set = q->tag_set;
86
b3c661b1 87 return q->queue_hw_ctx[set->map[hctx_type].mq_map[cpu]];
7d7e0f90
CH
88}
89
b3c661b1
JA
90/*
91 * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
92 * @q: request queue
93 * @flags: request command flags
94 * @cpu: CPU
95 */
96static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
97 unsigned int flags,
98 unsigned int cpu)
ff2c5660 99{
b3c661b1
JA
100 int hctx_type = 0;
101
102 if (q->mq_ops->rq_flags_to_type)
103 hctx_type = q->mq_ops->rq_flags_to_type(q, flags);
104
105 return blk_mq_map_queue_type(q, hctx_type, cpu);
ff2c5660
JA
106}
107
67aec14c
JA
108/*
109 * sysfs helpers
110 */
737f98cf 111extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31 112extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8 113extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14c
JA
114extern int blk_mq_sysfs_register(struct request_queue *q);
115extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 116extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 117
e09aae7e
ML
118void blk_mq_release(struct request_queue *q);
119
1d9bd516
TH
120/**
121 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
122 * @rq: target request.
123 */
12f5b931 124static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
1d9bd516 125{
12f5b931 126 return READ_ONCE(rq->state);
1d9bd516
TH
127}
128
1aecfe48
ML
129static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
130 unsigned int cpu)
131{
132 return per_cpu_ptr(q->queue_ctx, cpu);
133}
134
135/*
136 * This assumes per-cpu software queueing queues. They could be per-node
137 * as well, for instance. For now this is hardcoded as-is. Note that we don't
138 * care about preemption, since we know the ctx's are persistent. This does
139 * mean that we can't rely on ctx always matching the currently running CPU.
140 */
141static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
142{
143 return __blk_mq_get_ctx(q, get_cpu());
144}
145
146static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
147{
148 put_cpu();
149}
150
cb96a42c
ML
151struct blk_mq_alloc_data {
152 /* input parameter */
153 struct request_queue *q;
9a95e4ef 154 blk_mq_req_flags_t flags;
229a9287 155 unsigned int shallow_depth;
f9afca4d 156 unsigned int cmd_flags;
cb96a42c
ML
157
158 /* input & output parameter */
159 struct blk_mq_ctx *ctx;
160 struct blk_mq_hw_ctx *hctx;
161};
162
4941115b
JA
163static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
164{
bd166ef1
JA
165 if (data->flags & BLK_MQ_REQ_INTERNAL)
166 return data->hctx->sched_tags;
167
4941115b
JA
168 return data->hctx->tags;
169}
170
5d1b25c1
BVA
171static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
172{
173 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
174}
175
19c66e59
ML
176static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
177{
178 return hctx->nr_ctx && hctx->tags;
179}
180
f299b7c7 181void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
bf0ddaba
OS
182 unsigned int inflight[2]);
183void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
184 unsigned int inflight[2]);
f299b7c7 185
de148297
ML
186static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
187{
188 struct request_queue *q = hctx->queue;
189
190 if (q->mq_ops->put_budget)
191 q->mq_ops->put_budget(hctx);
192}
193
88022d72 194static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
de148297
ML
195{
196 struct request_queue *q = hctx->queue;
197
198 if (q->mq_ops->get_budget)
199 return q->mq_ops->get_budget(hctx);
88022d72 200 return true;
de148297
ML
201}
202
244c65a3
ML
203static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
204 struct request *rq)
205{
206 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
207 rq->tag = -1;
208
209 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
210 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
211 atomic_dec(&hctx->nr_active);
212 }
213}
214
215static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
216 struct request *rq)
217{
218 if (rq->tag == -1 || rq->internal_tag == -1)
219 return;
220
221 __blk_mq_put_driver_tag(hctx, rq);
222}
223
224static inline void blk_mq_put_driver_tag(struct request *rq)
225{
244c65a3
ML
226 if (rq->tag == -1 || rq->internal_tag == -1)
227 return;
228
ea4f995e 229 __blk_mq_put_driver_tag(rq->mq_hctx, rq);
244c65a3
ML
230}
231
ed76e329 232static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
0da73d00
MI
233{
234 int cpu;
235
236 for_each_possible_cpu(cpu)
ed76e329 237 qmap->mq_map[cpu] = 0;
0da73d00
MI
238}
239
320ae51f 240#endif