]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef INT_BLK_MQ_H | |
3 | #define INT_BLK_MQ_H | |
4 | ||
5 | #include "blk-stat.h" | |
6 | #include "blk-mq-tag.h" | |
7 | ||
8 | struct blk_mq_tag_set; | |
9 | ||
10 | struct blk_mq_ctx { | |
11 | struct { | |
12 | spinlock_t lock; | |
13 | struct list_head rq_list; | |
14 | } ____cacheline_aligned_in_smp; | |
15 | ||
16 | unsigned int cpu; | |
17 | unsigned int index_hw; | |
18 | ||
19 | /* incremented at dispatch time */ | |
20 | unsigned long rq_dispatched[2]; | |
21 | unsigned long rq_merged; | |
22 | ||
23 | /* incremented at completion time */ | |
24 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; | |
25 | ||
26 | struct request_queue *queue; | |
27 | struct kobject kobj; | |
28 | } ____cacheline_aligned_in_smp; | |
29 | ||
30 | void blk_mq_freeze_queue(struct request_queue *q); | |
31 | void blk_mq_exit_queue(struct request_queue *q); | |
32 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); | |
33 | void blk_mq_wake_waiters(struct request_queue *q); | |
34 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); | |
35 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); | |
36 | bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, | |
37 | bool wait); | |
38 | struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, | |
39 | struct blk_mq_ctx *start); | |
40 | ||
41 | /* | |
42 | * Internal helpers for allocating/freeing the request map | |
43 | */ | |
44 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |
45 | unsigned int hctx_idx); | |
46 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); | |
47 | struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, | |
48 | unsigned int hctx_idx, | |
49 | unsigned int nr_tags, | |
50 | unsigned int reserved_tags); | |
51 | int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, | |
52 | unsigned int hctx_idx, unsigned int depth); | |
53 | ||
54 | /* | |
55 | * Internal helpers for request insertion into sw queues | |
56 | */ | |
57 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
58 | bool at_head); | |
59 | void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); | |
60 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, | |
61 | struct list_head *list); | |
62 | ||
63 | /* Used by blk_insert_cloned_request() to issue request directly */ | |
64 | blk_status_t blk_mq_request_issue_directly(struct request *rq); | |
65 | void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, | |
66 | struct list_head *list); | |
67 | ||
68 | /* | |
69 | * CPU -> queue mappings | |
70 | */ | |
71 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); | |
72 | ||
73 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, | |
74 | int cpu) | |
75 | { | |
76 | return q->queue_hw_ctx[q->mq_map[cpu]]; | |
77 | } | |
78 | ||
79 | /* | |
80 | * sysfs helpers | |
81 | */ | |
82 | extern void blk_mq_sysfs_init(struct request_queue *q); | |
83 | extern void blk_mq_sysfs_deinit(struct request_queue *q); | |
84 | extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); | |
85 | extern int blk_mq_sysfs_register(struct request_queue *q); | |
86 | extern void blk_mq_sysfs_unregister(struct request_queue *q); | |
87 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); | |
88 | ||
89 | extern void blk_mq_rq_timed_out(struct request *req, bool reserved); | |
90 | ||
91 | void blk_mq_release(struct request_queue *q); | |
92 | ||
93 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, | |
94 | unsigned int cpu) | |
95 | { | |
96 | return per_cpu_ptr(q->queue_ctx, cpu); | |
97 | } | |
98 | ||
99 | /* | |
100 | * This assumes per-cpu software queueing queues. They could be per-node | |
101 | * as well, for instance. For now this is hardcoded as-is. Note that we don't | |
102 | * care about preemption, since we know the ctx's are persistent. This does | |
103 | * mean that we can't rely on ctx always matching the currently running CPU. | |
104 | */ | |
105 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) | |
106 | { | |
107 | return __blk_mq_get_ctx(q, get_cpu()); | |
108 | } | |
109 | ||
110 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |
111 | { | |
112 | put_cpu(); | |
113 | } | |
114 | ||
115 | struct blk_mq_alloc_data { | |
116 | /* input parameter */ | |
117 | struct request_queue *q; | |
118 | blk_mq_req_flags_t flags; | |
119 | unsigned int shallow_depth; | |
120 | ||
121 | /* input & output parameter */ | |
122 | struct blk_mq_ctx *ctx; | |
123 | struct blk_mq_hw_ctx *hctx; | |
124 | }; | |
125 | ||
126 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) | |
127 | { | |
128 | if (data->flags & BLK_MQ_REQ_INTERNAL) | |
129 | return data->hctx->sched_tags; | |
130 | ||
131 | return data->hctx->tags; | |
132 | } | |
133 | ||
134 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) | |
135 | { | |
136 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); | |
137 | } | |
138 | ||
139 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) | |
140 | { | |
141 | return hctx->nr_ctx && hctx->tags; | |
142 | } | |
143 | ||
144 | void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, | |
145 | unsigned int inflight[2]); | |
146 | void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, | |
147 | unsigned int inflight[2]); | |
148 | ||
149 | static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) | |
150 | { | |
151 | struct request_queue *q = hctx->queue; | |
152 | ||
153 | if (q->mq_ops->put_budget) | |
154 | q->mq_ops->put_budget(hctx); | |
155 | } | |
156 | ||
157 | static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx) | |
158 | { | |
159 | struct request_queue *q = hctx->queue; | |
160 | ||
161 | if (q->mq_ops->get_budget) | |
162 | return q->mq_ops->get_budget(hctx); | |
163 | return true; | |
164 | } | |
165 | ||
166 | static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, | |
167 | struct request *rq) | |
168 | { | |
169 | blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag); | |
170 | rq->tag = -1; | |
171 | ||
172 | if (rq->rq_flags & RQF_MQ_INFLIGHT) { | |
173 | rq->rq_flags &= ~RQF_MQ_INFLIGHT; | |
174 | atomic_dec(&hctx->nr_active); | |
175 | } | |
176 | } | |
177 | ||
178 | static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx, | |
179 | struct request *rq) | |
180 | { | |
181 | if (rq->tag == -1 || rq->internal_tag == -1) | |
182 | return; | |
183 | ||
184 | __blk_mq_put_driver_tag(hctx, rq); | |
185 | } | |
186 | ||
187 | static inline void blk_mq_put_driver_tag(struct request *rq) | |
188 | { | |
189 | struct blk_mq_hw_ctx *hctx; | |
190 | ||
191 | if (rq->tag == -1 || rq->internal_tag == -1) | |
192 | return; | |
193 | ||
194 | hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); | |
195 | __blk_mq_put_driver_tag(hctx, rq); | |
196 | } | |
197 | ||
198 | #endif |