]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-mq.h
compat_ioctl: block: handle BLKREPORTZONE/BLKRESETZONE
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
320ae51f
JA
2#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
cf43e6be 5#include "blk-stat.h"
244c65a3 6#include "blk-mq-tag.h"
cf43e6be 7
24d2f903
CH
8struct blk_mq_tag_set;
9
320ae51f
JA
10struct blk_mq_ctx {
11 struct {
12 spinlock_t lock;
13 struct list_head rq_list;
14 } ____cacheline_aligned_in_smp;
15
16 unsigned int cpu;
17 unsigned int index_hw;
320ae51f
JA
18
19 /* incremented at dispatch time */
20 unsigned long rq_dispatched[2];
21 unsigned long rq_merged;
22
23 /* incremented at completion time */
24 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
25
26 struct request_queue *queue;
27 struct kobject kobj;
4bb659b1 28} ____cacheline_aligned_in_smp;
320ae51f 29
780db207 30void blk_mq_freeze_queue(struct request_queue *q);
84c8ff7f 31void blk_mq_exit_queue(struct request_queue *q);
e3a2b3f9 32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 33void blk_mq_wake_waiters(struct request_queue *q);
de148297 34bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2c3ad667 35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bd6737f1
JA
36bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
37 bool wait);
b347689f
ML
38struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
39 struct blk_mq_ctx *start);
2c3ad667
JA
40
41/*
42 * Internal helpers for allocating/freeing the request map
43 */
cc71a6f4
JA
44void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
45 unsigned int hctx_idx);
46void blk_mq_free_rq_map(struct blk_mq_tags *tags);
47struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
48 unsigned int hctx_idx,
49 unsigned int nr_tags,
50 unsigned int reserved_tags);
51int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
52 unsigned int hctx_idx, unsigned int depth);
2c3ad667
JA
53
54/*
55 * Internal helpers for request insertion into sw queues
56 */
57void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
58 bool at_head);
b0850297 59void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
bd166ef1
JA
60void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
61 struct list_head *list);
320ae51f 62
5e22a908 63/* Used by blk_insert_cloned_request() to issue request directly */
ffef502f 64blk_status_t blk_mq_request_issue_directly(struct request *rq);
afceae86
ML
65void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
66 struct list_head *list);
5e22a908 67
320ae51f
JA
68/*
69 * CPU -> queue mappings
70 */
f14bbe77 71extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51f 72
7d7e0f90
CH
73static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
74 int cpu)
75{
76 return q->queue_hw_ctx[q->mq_map[cpu]];
77}
78
67aec14c
JA
79/*
80 * sysfs helpers
81 */
737f98cf 82extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31 83extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8 84extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14c
JA
85extern int blk_mq_sysfs_register(struct request_queue *q);
86extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 87extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 88
90415837
CH
89extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
90
e09aae7e
ML
91void blk_mq_release(struct request_queue *q);
92
1aecfe48
ML
93static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
94 unsigned int cpu)
95{
96 return per_cpu_ptr(q->queue_ctx, cpu);
97}
98
99/*
100 * This assumes per-cpu software queueing queues. They could be per-node
101 * as well, for instance. For now this is hardcoded as-is. Note that we don't
102 * care about preemption, since we know the ctx's are persistent. This does
103 * mean that we can't rely on ctx always matching the currently running CPU.
104 */
105static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
106{
107 return __blk_mq_get_ctx(q, get_cpu());
108}
109
110static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
111{
112 put_cpu();
113}
114
cb96a42c
ML
115struct blk_mq_alloc_data {
116 /* input parameter */
117 struct request_queue *q;
9a95e4ef 118 blk_mq_req_flags_t flags;
229a9287 119 unsigned int shallow_depth;
cb96a42c
ML
120
121 /* input & output parameter */
122 struct blk_mq_ctx *ctx;
123 struct blk_mq_hw_ctx *hctx;
124};
125
4941115b
JA
126static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
127{
bd166ef1
JA
128 if (data->flags & BLK_MQ_REQ_INTERNAL)
129 return data->hctx->sched_tags;
130
4941115b
JA
131 return data->hctx->tags;
132}
133
5d1b25c1
BVA
134static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
135{
136 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
137}
138
19c66e59
ML
139static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
140{
141 return hctx->nr_ctx && hctx->tags;
142}
143
f299b7c7 144void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
416d3aa7
OS
145 unsigned int inflight[2]);
146void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
147 unsigned int inflight[2]);
f299b7c7 148
de148297
ML
149static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
150{
151 struct request_queue *q = hctx->queue;
152
153 if (q->mq_ops->put_budget)
154 q->mq_ops->put_budget(hctx);
155}
156
88022d72 157static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
de148297
ML
158{
159 struct request_queue *q = hctx->queue;
160
161 if (q->mq_ops->get_budget)
162 return q->mq_ops->get_budget(hctx);
88022d72 163 return true;
de148297
ML
164}
165
244c65a3
ML
166static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
167 struct request *rq)
168{
169 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
170 rq->tag = -1;
171
172 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
173 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
174 atomic_dec(&hctx->nr_active);
175 }
176}
177
178static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
179 struct request *rq)
180{
181 if (rq->tag == -1 || rq->internal_tag == -1)
182 return;
183
184 __blk_mq_put_driver_tag(hctx, rq);
185}
186
187static inline void blk_mq_put_driver_tag(struct request *rq)
188{
189 struct blk_mq_hw_ctx *hctx;
190
191 if (rq->tag == -1 || rq->internal_tag == -1)
192 return;
193
194 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
195 __blk_mq_put_driver_tag(hctx, rq);
196}
197
320ae51f 198#endif