]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - block/blk-mq.h
block, scsi: Make SCSI quiesce and resume work reliably
[mirror_ubuntu-bionic-kernel.git] / block / blk-mq.h
CommitLineData
320ae51f
JA
1#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
cf43e6be 4#include "blk-stat.h"
244c65a3 5#include "blk-mq-tag.h"
cf43e6be 6
24d2f903
CH
7struct blk_mq_tag_set;
8
320ae51f
JA
9struct blk_mq_ctx {
10 struct {
11 spinlock_t lock;
12 struct list_head rq_list;
13 } ____cacheline_aligned_in_smp;
14
15 unsigned int cpu;
16 unsigned int index_hw;
320ae51f
JA
17
18 /* incremented at dispatch time */
19 unsigned long rq_dispatched[2];
20 unsigned long rq_merged;
21
22 /* incremented at completion time */
23 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
24
25 struct request_queue *queue;
26 struct kobject kobj;
4bb659b1 27} ____cacheline_aligned_in_smp;
320ae51f 28
320ae51f 29void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
780db207 30void blk_mq_freeze_queue(struct request_queue *q);
3edcc0ce 31void blk_mq_free_queue(struct request_queue *q);
e3a2b3f9 32int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
aed3ea94 33void blk_mq_wake_waiters(struct request_queue *q);
de148297 34bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2c3ad667 35void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
50e1dab8 36bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bd6737f1
JA
37bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38 bool wait);
b347689f
ML
39struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
40 struct blk_mq_ctx *start);
2c3ad667
JA
41
42/*
43 * Internal helpers for allocating/freeing the request map
44 */
cc71a6f4
JA
45void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
46 unsigned int hctx_idx);
47void blk_mq_free_rq_map(struct blk_mq_tags *tags);
48struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
49 unsigned int hctx_idx,
50 unsigned int nr_tags,
51 unsigned int reserved_tags);
52int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
53 unsigned int hctx_idx, unsigned int depth);
2c3ad667
JA
54
55/*
56 * Internal helpers for request insertion into sw queues
57 */
58void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
59 bool at_head);
b0850297 60void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
bd166ef1
JA
61void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
62 struct list_head *list);
320ae51f
JA
63
64/*
65 * CPU -> queue mappings
66 */
f14bbe77 67extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
320ae51f 68
7d7e0f90
CH
69static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
70 int cpu)
71{
72 return q->queue_hw_ctx[q->mq_map[cpu]];
73}
74
67aec14c
JA
75/*
76 * sysfs helpers
77 */
737f98cf 78extern void blk_mq_sysfs_init(struct request_queue *q);
7ea5fe31 79extern void blk_mq_sysfs_deinit(struct request_queue *q);
2d0364c8 80extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
67aec14c
JA
81extern int blk_mq_sysfs_register(struct request_queue *q);
82extern void blk_mq_sysfs_unregister(struct request_queue *q);
868f2f0b 83extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
67aec14c 84
90415837
CH
85extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
86
e09aae7e
ML
87void blk_mq_release(struct request_queue *q);
88
1aecfe48
ML
89static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
90 unsigned int cpu)
91{
92 return per_cpu_ptr(q->queue_ctx, cpu);
93}
94
95/*
96 * This assumes per-cpu software queueing queues. They could be per-node
97 * as well, for instance. For now this is hardcoded as-is. Note that we don't
98 * care about preemption, since we know the ctx's are persistent. This does
99 * mean that we can't rely on ctx always matching the currently running CPU.
100 */
101static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
102{
103 return __blk_mq_get_ctx(q, get_cpu());
104}
105
106static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
107{
108 put_cpu();
109}
110
cb96a42c
ML
111struct blk_mq_alloc_data {
112 /* input parameter */
113 struct request_queue *q;
6f3b0e8b 114 unsigned int flags;
229a9287 115 unsigned int shallow_depth;
cb96a42c
ML
116
117 /* input & output parameter */
118 struct blk_mq_ctx *ctx;
119 struct blk_mq_hw_ctx *hctx;
120};
121
4941115b
JA
122static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
123{
bd166ef1
JA
124 if (data->flags & BLK_MQ_REQ_INTERNAL)
125 return data->hctx->sched_tags;
126
4941115b
JA
127 return data->hctx->tags;
128}
129
5d1b25c1
BVA
130static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
131{
132 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
133}
134
19c66e59
ML
135static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
136{
137 return hctx->nr_ctx && hctx->tags;
138}
139
f299b7c7
JA
140void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
141 unsigned int inflight[2]);
142
de148297
ML
143static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
144{
145 struct request_queue *q = hctx->queue;
146
147 if (q->mq_ops->put_budget)
148 q->mq_ops->put_budget(hctx);
149}
150
88022d72 151static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
de148297
ML
152{
153 struct request_queue *q = hctx->queue;
154
155 if (q->mq_ops->get_budget)
156 return q->mq_ops->get_budget(hctx);
88022d72 157 return true;
de148297
ML
158}
159
244c65a3
ML
160static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
161 struct request *rq)
162{
163 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
164 rq->tag = -1;
165
166 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
167 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
168 atomic_dec(&hctx->nr_active);
169 }
170}
171
172static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
173 struct request *rq)
174{
175 if (rq->tag == -1 || rq->internal_tag == -1)
176 return;
177
178 __blk_mq_put_driver_tag(hctx, rq);
179}
180
181static inline void blk_mq_put_driver_tag(struct request *rq)
182{
183 struct blk_mq_hw_ctx *hctx;
184
185 if (rq->tag == -1 || rq->internal_tag == -1)
186 return;
187
188 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
189 __blk_mq_put_driver_tag(hctx, rq);
190}
191
320ae51f 192#endif